diff options
author | Dave Airlie <airlied@redhat.com> | 2013-02-07 20:08:10 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-02-07 20:08:10 -0500 |
commit | cd17ef4114ad5c514b17e6a0bb02a309ab90b692 (patch) | |
tree | 9c162eaa96931597b83e165702e3483ba5c6bb1e | |
parent | 67c964000236497e00c646472cd6b70b5c5109c8 (diff) | |
parent | 7d37beaaf3dbc6ff16f4d32a4dd6f8c557c6ab50 (diff) |
Merge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes:
"Probably the last feature pull for 3.9, there's some fixes outstanding
thought that I'd like to sneak in. And maybe 3.8 takes a bit longer ...
Anyway, highlights of this pull:
- Kill the horrible IS_DISPLAYREG hack to handle the mmio offset movements
on vlv, big thanks to Ville.
- Dynamic power well support for Haswell, shaves away a bit when only
using the eDP port on pipe A (Paulo). Plus unclaimed register fixes
uncovered by this.
- Clarifications of the gpu hang/reset state transitions, hopefully fixing
a few spurious -EIO deaths in userspace.
- Haswell ELD fixes.
- Some more (pp)gtt cleanups from Ben.
- A few smaller things all over.
Plus all the stuff from the previous rather small pull request:
- Broadcast RBG improvements and reduced color range fixes from Ville.
- Ben is on a "kill legacy gtt code for good" spree, first pile of patches
included.
- No-relocs and bo lut improvements for faster execbuf from Chris.
- Some refactorings from Imre."
* tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
GPU/i915: Fix acpi_bus_get_device() check in drivers/gpu/drm/i915/intel_opregion.c
drm/i915: Set the SR01 "screen off" bit in i915_redisable_vga() too
drm/i915: Kill IS_DISPLAYREG()
drm/i915: Introduce i915_vgacntrl_reg()
drm/i915: gen6_gmch_remove can be static
drm/i915: dynamic Haswell display power well support
drm/i915: check the power down well on assert_pipe()
drm/i915: don't send DP "idle" pattern before "normal" on HSW PORT_A
drm/i915: don't run hsw power well code on !hsw
drm/i915: kill cargo-culted locking from power well code
drm/i915: Only run idle processing from i915_gem_retire_requests_worker
drm/i915: Fix CAGF for HSW
drm/i915: Reclaim GTT space for failed PPGTT
drm/i915: remove intel_gtt structure
drm/i915: Add probe and remove to the gtt ops
drm/i915: extract hw ppgtt setup/cleanup code
drm/i915: pte_encode is gen6+
drm/i915: vfuncs for ppgtt
drm/i915: vfuncs for gtt_clear_range/insert_entries
drm/i915: Error state should print /sys/kernel/debug
...
34 files changed, 2255 insertions, 1732 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c8d9dcb15db0..d8e7e6c9114e 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -60,7 +60,6 @@ struct intel_gtt_driver { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | static struct _intel_private { | 62 | static struct _intel_private { |
63 | struct intel_gtt base; | ||
64 | const struct intel_gtt_driver *driver; | 63 | const struct intel_gtt_driver *driver; |
65 | struct pci_dev *pcidev; /* device one */ | 64 | struct pci_dev *pcidev; /* device one */ |
66 | struct pci_dev *bridge_dev; | 65 | struct pci_dev *bridge_dev; |
@@ -75,7 +74,18 @@ static struct _intel_private { | |||
75 | struct resource ifp_resource; | 74 | struct resource ifp_resource; |
76 | int resource_valid; | 75 | int resource_valid; |
77 | struct page *scratch_page; | 76 | struct page *scratch_page; |
77 | phys_addr_t scratch_page_dma; | ||
78 | int refcount; | 78 | int refcount; |
79 | /* Whether i915 needs to use the dmar apis or not. */ | ||
80 | unsigned int needs_dmar : 1; | ||
81 | phys_addr_t gma_bus_addr; | ||
82 | /* Size of memory reserved for graphics by the BIOS */ | ||
83 | unsigned int stolen_size; | ||
84 | /* Total number of gtt entries. */ | ||
85 | unsigned int gtt_total_entries; | ||
86 | /* Part of the gtt that is mappable by the cpu, for those chips where | ||
87 | * this is not the full gtt. */ | ||
88 | unsigned int gtt_mappable_entries; | ||
79 | } intel_private; | 89 | } intel_private; |
80 | 90 | ||
81 | #define INTEL_GTT_GEN intel_private.driver->gen | 91 | #define INTEL_GTT_GEN intel_private.driver->gen |
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void) | |||
291 | get_page(page); | 301 | get_page(page); |
292 | set_pages_uc(page, 1); | 302 | set_pages_uc(page, 1); |
293 | 303 | ||
294 | if (intel_private.base.needs_dmar) { | 304 | if (intel_private.needs_dmar) { |
295 | dma_addr = pci_map_page(intel_private.pcidev, page, 0, | 305 | dma_addr = pci_map_page(intel_private.pcidev, page, 0, |
296 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 306 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
297 | if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) | 307 | if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) |
298 | return -EINVAL; | 308 | return -EINVAL; |
299 | 309 | ||
300 | intel_private.base.scratch_page_dma = dma_addr; | 310 | intel_private.scratch_page_dma = dma_addr; |
301 | } else | 311 | } else |
302 | intel_private.base.scratch_page_dma = page_to_phys(page); | 312 | intel_private.scratch_page_dma = page_to_phys(page); |
303 | 313 | ||
304 | intel_private.scratch_page = page; | 314 | intel_private.scratch_page = page; |
305 | 315 | ||
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void) | |||
506 | /* On previous hardware, the GTT size was just what was | 516 | /* On previous hardware, the GTT size was just what was |
507 | * required to map the aperture. | 517 | * required to map the aperture. |
508 | */ | 518 | */ |
509 | return intel_private.base.gtt_mappable_entries; | 519 | return intel_private.gtt_mappable_entries; |
510 | } | 520 | } |
511 | } | 521 | } |
512 | 522 | ||
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void) | |||
546 | static void intel_gtt_teardown_scratch_page(void) | 556 | static void intel_gtt_teardown_scratch_page(void) |
547 | { | 557 | { |
548 | set_pages_wb(intel_private.scratch_page, 1); | 558 | set_pages_wb(intel_private.scratch_page, 1); |
549 | pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, | 559 | pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, |
550 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 560 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
551 | put_page(intel_private.scratch_page); | 561 | put_page(intel_private.scratch_page); |
552 | __free_page(intel_private.scratch_page); | 562 | __free_page(intel_private.scratch_page); |
@@ -572,8 +582,8 @@ static int intel_gtt_init(void) | |||
572 | if (ret != 0) | 582 | if (ret != 0) |
573 | return ret; | 583 | return ret; |
574 | 584 | ||
575 | intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); | 585 | intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); |
576 | intel_private.base.gtt_total_entries = intel_gtt_total_entries(); | 586 | intel_private.gtt_total_entries = intel_gtt_total_entries(); |
577 | 587 | ||
578 | /* save the PGETBL reg for resume */ | 588 | /* save the PGETBL reg for resume */ |
579 | intel_private.PGETBL_save = | 589 | intel_private.PGETBL_save = |
@@ -585,10 +595,10 @@ static int intel_gtt_init(void) | |||
585 | 595 | ||
586 | dev_info(&intel_private.bridge_dev->dev, | 596 | dev_info(&intel_private.bridge_dev->dev, |
587 | "detected gtt size: %dK total, %dK mappable\n", | 597 | "detected gtt size: %dK total, %dK mappable\n", |
588 | intel_private.base.gtt_total_entries * 4, | 598 | intel_private.gtt_total_entries * 4, |
589 | intel_private.base.gtt_mappable_entries * 4); | 599 | intel_private.gtt_mappable_entries * 4); |
590 | 600 | ||
591 | gtt_map_size = intel_private.base.gtt_total_entries * 4; | 601 | gtt_map_size = intel_private.gtt_total_entries * 4; |
592 | 602 | ||
593 | intel_private.gtt = NULL; | 603 | intel_private.gtt = NULL; |
594 | if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) | 604 | if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) |
@@ -605,9 +615,9 @@ static int intel_gtt_init(void) | |||
605 | 615 | ||
606 | global_cache_flush(); /* FIXME: ? */ | 616 | global_cache_flush(); /* FIXME: ? */ |
607 | 617 | ||
608 | intel_private.base.stolen_size = intel_gtt_stolen_size(); | 618 | intel_private.stolen_size = intel_gtt_stolen_size(); |
609 | 619 | ||
610 | intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; | 620 | intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
611 | 621 | ||
612 | ret = intel_gtt_setup_scratch_page(); | 622 | ret = intel_gtt_setup_scratch_page(); |
613 | if (ret != 0) { | 623 | if (ret != 0) { |
@@ -622,7 +632,7 @@ static int intel_gtt_init(void) | |||
622 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, | 632 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, |
623 | &gma_addr); | 633 | &gma_addr); |
624 | 634 | ||
625 | intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); | 635 | intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
626 | 636 | ||
627 | return 0; | 637 | return 0; |
628 | } | 638 | } |
@@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void) | |||
633 | unsigned int aper_size; | 643 | unsigned int aper_size; |
634 | int i; | 644 | int i; |
635 | 645 | ||
636 | aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) | 646 | aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1); |
637 | / MB(1); | ||
638 | 647 | ||
639 | for (i = 0; i < num_sizes; i++) { | 648 | for (i = 0; i < num_sizes; i++) { |
640 | if (aper_size == intel_fake_agp_sizes[i].size) { | 649 | if (aper_size == intel_fake_agp_sizes[i].size) { |
@@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void) | |||
778 | return -EIO; | 787 | return -EIO; |
779 | 788 | ||
780 | intel_private.clear_fake_agp = true; | 789 | intel_private.clear_fake_agp = true; |
781 | agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; | 790 | agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; |
782 | 791 | ||
783 | return 0; | 792 | return 0; |
784 | } | 793 | } |
@@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, | |||
840 | { | 849 | { |
841 | int ret = -EINVAL; | 850 | int ret = -EINVAL; |
842 | 851 | ||
843 | if (intel_private.base.do_idle_maps) | ||
844 | return -ENODEV; | ||
845 | |||
846 | if (intel_private.clear_fake_agp) { | 852 | if (intel_private.clear_fake_agp) { |
847 | int start = intel_private.base.stolen_size / PAGE_SIZE; | 853 | int start = intel_private.stolen_size / PAGE_SIZE; |
848 | int end = intel_private.base.gtt_mappable_entries; | 854 | int end = intel_private.gtt_mappable_entries; |
849 | intel_gtt_clear_range(start, end - start); | 855 | intel_gtt_clear_range(start, end - start); |
850 | intel_private.clear_fake_agp = false; | 856 | intel_private.clear_fake_agp = false; |
851 | } | 857 | } |
@@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, | |||
856 | if (mem->page_count == 0) | 862 | if (mem->page_count == 0) |
857 | goto out; | 863 | goto out; |
858 | 864 | ||
859 | if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) | 865 | if (pg_start + mem->page_count > intel_private.gtt_total_entries) |
860 | goto out_err; | 866 | goto out_err; |
861 | 867 | ||
862 | if (type != mem->type) | 868 | if (type != mem->type) |
@@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, | |||
868 | if (!mem->is_flushed) | 874 | if (!mem->is_flushed) |
869 | global_cache_flush(); | 875 | global_cache_flush(); |
870 | 876 | ||
871 | if (intel_private.base.needs_dmar) { | 877 | if (intel_private.needs_dmar) { |
872 | struct sg_table st; | 878 | struct sg_table st; |
873 | 879 | ||
874 | ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); | 880 | ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); |
@@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) | |||
894 | unsigned int i; | 900 | unsigned int i; |
895 | 901 | ||
896 | for (i = first_entry; i < (first_entry + num_entries); i++) { | 902 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
897 | intel_private.driver->write_entry(intel_private.base.scratch_page_dma, | 903 | intel_private.driver->write_entry(intel_private.scratch_page_dma, |
898 | i, 0); | 904 | i, 0); |
899 | } | 905 | } |
900 | readl(intel_private.gtt+i-1); | 906 | readl(intel_private.gtt+i-1); |
@@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, | |||
907 | if (mem->page_count == 0) | 913 | if (mem->page_count == 0) |
908 | return 0; | 914 | return 0; |
909 | 915 | ||
910 | if (intel_private.base.do_idle_maps) | ||
911 | return -ENODEV; | ||
912 | |||
913 | intel_gtt_clear_range(pg_start, mem->page_count); | 916 | intel_gtt_clear_range(pg_start, mem->page_count); |
914 | 917 | ||
915 | if (intel_private.base.needs_dmar) { | 918 | if (intel_private.needs_dmar) { |
916 | intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); | 919 | intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); |
917 | mem->sg_list = NULL; | 920 | mem->sg_list = NULL; |
918 | mem->num_sg = 0; | 921 | mem->num_sg = 0; |
@@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr, | |||
1069 | writel(addr | pte_flags, intel_private.gtt + entry); | 1072 | writel(addr | pte_flags, intel_private.gtt + entry); |
1070 | } | 1073 | } |
1071 | 1074 | ||
1072 | /* Certain Gen5 chipsets require require idling the GPU before | ||
1073 | * unmapping anything from the GTT when VT-d is enabled. | ||
1074 | */ | ||
1075 | static inline int needs_idle_maps(void) | ||
1076 | { | ||
1077 | #ifdef CONFIG_INTEL_IOMMU | ||
1078 | const unsigned short gpu_devid = intel_private.pcidev->device; | ||
1079 | |||
1080 | /* Query intel_iommu to see if we need the workaround. Presumably that | ||
1081 | * was loaded first. | ||
1082 | */ | ||
1083 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || | ||
1084 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && | ||
1085 | intel_iommu_gfx_mapped) | ||
1086 | return 1; | ||
1087 | #endif | ||
1088 | return 0; | ||
1089 | } | ||
1090 | 1075 | ||
1091 | static int i9xx_setup(void) | 1076 | static int i9xx_setup(void) |
1092 | { | 1077 | { |
@@ -1115,9 +1100,6 @@ static int i9xx_setup(void) | |||
1115 | break; | 1100 | break; |
1116 | } | 1101 | } |
1117 | 1102 | ||
1118 | if (needs_idle_maps()) | ||
1119 | intel_private.base.do_idle_maps = 1; | ||
1120 | |||
1121 | intel_i9xx_setup_flush(); | 1103 | intel_i9xx_setup_flush(); |
1122 | 1104 | ||
1123 | return 0; | 1105 | return 0; |
@@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | |||
1389 | } | 1371 | } |
1390 | EXPORT_SYMBOL(intel_gmch_probe); | 1372 | EXPORT_SYMBOL(intel_gmch_probe); |
1391 | 1373 | ||
1392 | struct intel_gtt *intel_gtt_get(void) | 1374 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size) |
1393 | { | 1375 | { |
1394 | return &intel_private.base; | 1376 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
1377 | *stolen_size = intel_private.stolen_size; | ||
1395 | } | 1378 | } |
1396 | EXPORT_SYMBOL(intel_gtt_get); | 1379 | EXPORT_SYMBOL(intel_gtt_get); |
1397 | 1380 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5a3770fbd770..a3a3b61059ff 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, | |||
1483 | #define VIDEO_BLOCK 0x02 | 1483 | #define VIDEO_BLOCK 0x02 |
1484 | #define VENDOR_BLOCK 0x03 | 1484 | #define VENDOR_BLOCK 0x03 |
1485 | #define SPEAKER_BLOCK 0x04 | 1485 | #define SPEAKER_BLOCK 0x04 |
1486 | #define VIDEO_CAPABILITY_BLOCK 0x07 | ||
1486 | #define EDID_BASIC_AUDIO (1 << 6) | 1487 | #define EDID_BASIC_AUDIO (1 << 6) |
1487 | #define EDID_CEA_YCRCB444 (1 << 5) | 1488 | #define EDID_CEA_YCRCB444 (1 << 5) |
1488 | #define EDID_CEA_YCRCB422 (1 << 4) | 1489 | #define EDID_CEA_YCRCB422 (1 << 4) |
1490 | #define EDID_CEA_VCDB_QS (1 << 6) | ||
1489 | 1491 | ||
1490 | /** | 1492 | /** |
1491 | * Search EDID for CEA extension block. | 1493 | * Search EDID for CEA extension block. |
@@ -1902,6 +1904,37 @@ end: | |||
1902 | EXPORT_SYMBOL(drm_detect_monitor_audio); | 1904 | EXPORT_SYMBOL(drm_detect_monitor_audio); |
1903 | 1905 | ||
1904 | /** | 1906 | /** |
1907 | * drm_rgb_quant_range_selectable - is RGB quantization range selectable? | ||
1908 | * | ||
1909 | * Check whether the monitor reports the RGB quantization range selection | ||
1910 | * as supported. The AVI infoframe can then be used to inform the monitor | ||
1911 | * which quantization range (full or limited) is used. | ||
1912 | */ | ||
1913 | bool drm_rgb_quant_range_selectable(struct edid *edid) | ||
1914 | { | ||
1915 | u8 *edid_ext; | ||
1916 | int i, start, end; | ||
1917 | |||
1918 | edid_ext = drm_find_cea_extension(edid); | ||
1919 | if (!edid_ext) | ||
1920 | return false; | ||
1921 | |||
1922 | if (cea_db_offsets(edid_ext, &start, &end)) | ||
1923 | return false; | ||
1924 | |||
1925 | for_each_cea_db(edid_ext, i, start, end) { | ||
1926 | if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && | ||
1927 | cea_db_payload_len(&edid_ext[i]) == 2) { | ||
1928 | DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); | ||
1929 | return edid_ext[i + 2] & EDID_CEA_VCDB_QS; | ||
1930 | } | ||
1931 | } | ||
1932 | |||
1933 | return false; | ||
1934 | } | ||
1935 | EXPORT_SYMBOL(drm_rgb_quant_range_selectable); | ||
1936 | |||
1937 | /** | ||
1905 | * drm_add_display_info - pull display info out if present | 1938 | * drm_add_display_info - pull display info out if present |
1906 | * @edid: EDID data | 1939 | * @edid: EDID data |
1907 | * @info: display info (attached to connector) | 1940 | * @info: display info (attached to connector) |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0f2c5493242b..91f3ac6cef35 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | |||
16 | i915_gem_tiling.o \ | 16 | i915_gem_tiling.o \ |
17 | i915_sysfs.o \ | 17 | i915_sysfs.o \ |
18 | i915_trace_points.o \ | 18 | i915_trace_points.o \ |
19 | i915_ums.o \ | ||
19 | intel_display.o \ | 20 | intel_display.o \ |
20 | intel_crt.o \ | 21 | intel_crt.o \ |
21 | intel_lvds.o \ | 22 | intel_lvds.o \ |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7576e7874698..aa9833babad1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
258 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", | 258 | seq_printf(m, "%u fault mappable objects, %zu bytes\n", |
259 | count, size); | 259 | count, size); |
260 | 260 | ||
261 | seq_printf(m, "%zu [%zu] gtt total\n", | 261 | seq_printf(m, "%zu [%lu] gtt total\n", |
262 | dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); | 262 | dev_priv->gtt.total, |
263 | dev_priv->gtt.mappable_end - dev_priv->gtt.start); | ||
263 | 264 | ||
264 | mutex_unlock(&dev->struct_mutex); | 265 | mutex_unlock(&dev->struct_mutex); |
265 | 266 | ||
@@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file) | |||
813 | 814 | ||
814 | error_priv->dev = dev; | 815 | error_priv->dev = dev; |
815 | 816 | ||
816 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 817 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
817 | error_priv->error = dev_priv->first_error; | 818 | error_priv->error = dev_priv->gpu_error.first_error; |
818 | if (error_priv->error) | 819 | if (error_priv->error) |
819 | kref_get(&error_priv->error->ref); | 820 | kref_get(&error_priv->error->ref); |
820 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 821 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
821 | 822 | ||
822 | return single_open(file, i915_error_state, error_priv); | 823 | return single_open(file, i915_error_state, error_priv); |
823 | } | 824 | } |
@@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
956 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 957 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
957 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 958 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
958 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 959 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
959 | u32 rpstat; | 960 | u32 rpstat, cagf; |
960 | u32 rpupei, rpcurup, rpprevup; | 961 | u32 rpupei, rpcurup, rpprevup; |
961 | u32 rpdownei, rpcurdown, rpprevdown; | 962 | u32 rpdownei, rpcurdown, rpprevdown; |
962 | int max_freq; | 963 | int max_freq; |
@@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
975 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | 976 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); |
976 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | 977 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); |
977 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | 978 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); |
979 | if (IS_HASWELL(dev)) | ||
980 | cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; | ||
981 | else | ||
982 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | ||
983 | cagf *= GT_FREQUENCY_MULTIPLIER; | ||
978 | 984 | ||
979 | gen6_gt_force_wake_put(dev_priv); | 985 | gen6_gt_force_wake_put(dev_priv); |
980 | mutex_unlock(&dev->struct_mutex); | 986 | mutex_unlock(&dev->struct_mutex); |
@@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
987 | gt_perf_status & 0xff); | 993 | gt_perf_status & 0xff); |
988 | seq_printf(m, "Render p-state limit: %d\n", | 994 | seq_printf(m, "Render p-state limit: %d\n", |
989 | rp_state_limits & 0xff); | 995 | rp_state_limits & 0xff); |
990 | seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> | 996 | seq_printf(m, "CAGF: %dMHz\n", cagf); |
991 | GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); | ||
992 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & | 997 | seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & |
993 | GEN6_CURICONT_MASK); | 998 | GEN6_CURICONT_MASK); |
994 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & | 999 | seq_printf(m, "RP CUR UP: %dus\n", rpcurup & |
@@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp, | |||
1674 | 1679 | ||
1675 | len = snprintf(buf, sizeof(buf), | 1680 | len = snprintf(buf, sizeof(buf), |
1676 | "wedged : %d\n", | 1681 | "wedged : %d\n", |
1677 | atomic_read(&dev_priv->mm.wedged)); | 1682 | atomic_read(&dev_priv->gpu_error.reset_counter)); |
1678 | 1683 | ||
1679 | if (len > sizeof(buf)) | 1684 | if (len > sizeof(buf)) |
1680 | len = sizeof(buf); | 1685 | len = sizeof(buf); |
@@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp, | |||
1729 | int len; | 1734 | int len; |
1730 | 1735 | ||
1731 | len = snprintf(buf, sizeof(buf), | 1736 | len = snprintf(buf, sizeof(buf), |
1732 | "0x%08x\n", dev_priv->stop_rings); | 1737 | "0x%08x\n", dev_priv->gpu_error.stop_rings); |
1733 | 1738 | ||
1734 | if (len > sizeof(buf)) | 1739 | if (len > sizeof(buf)) |
1735 | len = sizeof(buf); | 1740 | len = sizeof(buf); |
@@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp, | |||
1765 | if (ret) | 1770 | if (ret) |
1766 | return ret; | 1771 | return ret; |
1767 | 1772 | ||
1768 | dev_priv->stop_rings = val; | 1773 | dev_priv->gpu_error.stop_rings = val; |
1769 | mutex_unlock(&dev->struct_mutex); | 1774 | mutex_unlock(&dev->struct_mutex); |
1770 | 1775 | ||
1771 | return cnt; | 1776 | return cnt; |
@@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = { | |||
1779 | .llseek = default_llseek, | 1784 | .llseek = default_llseek, |
1780 | }; | 1785 | }; |
1781 | 1786 | ||
1787 | #define DROP_UNBOUND 0x1 | ||
1788 | #define DROP_BOUND 0x2 | ||
1789 | #define DROP_RETIRE 0x4 | ||
1790 | #define DROP_ACTIVE 0x8 | ||
1791 | #define DROP_ALL (DROP_UNBOUND | \ | ||
1792 | DROP_BOUND | \ | ||
1793 | DROP_RETIRE | \ | ||
1794 | DROP_ACTIVE) | ||
1795 | static ssize_t | ||
1796 | i915_drop_caches_read(struct file *filp, | ||
1797 | char __user *ubuf, | ||
1798 | size_t max, | ||
1799 | loff_t *ppos) | ||
1800 | { | ||
1801 | char buf[20]; | ||
1802 | int len; | ||
1803 | |||
1804 | len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL); | ||
1805 | if (len > sizeof(buf)) | ||
1806 | len = sizeof(buf); | ||
1807 | |||
1808 | return simple_read_from_buffer(ubuf, max, ppos, buf, len); | ||
1809 | } | ||
1810 | |||
1811 | static ssize_t | ||
1812 | i915_drop_caches_write(struct file *filp, | ||
1813 | const char __user *ubuf, | ||
1814 | size_t cnt, | ||
1815 | loff_t *ppos) | ||
1816 | { | ||
1817 | struct drm_device *dev = filp->private_data; | ||
1818 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1819 | struct drm_i915_gem_object *obj, *next; | ||
1820 | char buf[20]; | ||
1821 | int val = 0, ret; | ||
1822 | |||
1823 | if (cnt > 0) { | ||
1824 | if (cnt > sizeof(buf) - 1) | ||
1825 | return -EINVAL; | ||
1826 | |||
1827 | if (copy_from_user(buf, ubuf, cnt)) | ||
1828 | return -EFAULT; | ||
1829 | buf[cnt] = 0; | ||
1830 | |||
1831 | val = simple_strtoul(buf, NULL, 0); | ||
1832 | } | ||
1833 | |||
1834 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val); | ||
1835 | |||
1836 | /* No need to check and wait for gpu resets, only libdrm auto-restarts | ||
1837 | * on ioctls on -EAGAIN. */ | ||
1838 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
1839 | if (ret) | ||
1840 | return ret; | ||
1841 | |||
1842 | if (val & DROP_ACTIVE) { | ||
1843 | ret = i915_gpu_idle(dev); | ||
1844 | if (ret) | ||
1845 | goto unlock; | ||
1846 | } | ||
1847 | |||
1848 | if (val & (DROP_RETIRE | DROP_ACTIVE)) | ||
1849 | i915_gem_retire_requests(dev); | ||
1850 | |||
1851 | if (val & DROP_BOUND) { | ||
1852 | list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) | ||
1853 | if (obj->pin_count == 0) { | ||
1854 | ret = i915_gem_object_unbind(obj); | ||
1855 | if (ret) | ||
1856 | goto unlock; | ||
1857 | } | ||
1858 | } | ||
1859 | |||
1860 | if (val & DROP_UNBOUND) { | ||
1861 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) | ||
1862 | if (obj->pages_pin_count == 0) { | ||
1863 | ret = i915_gem_object_put_pages(obj); | ||
1864 | if (ret) | ||
1865 | goto unlock; | ||
1866 | } | ||
1867 | } | ||
1868 | |||
1869 | unlock: | ||
1870 | mutex_unlock(&dev->struct_mutex); | ||
1871 | |||
1872 | return ret ?: cnt; | ||
1873 | } | ||
1874 | |||
1875 | static const struct file_operations i915_drop_caches_fops = { | ||
1876 | .owner = THIS_MODULE, | ||
1877 | .open = simple_open, | ||
1878 | .read = i915_drop_caches_read, | ||
1879 | .write = i915_drop_caches_write, | ||
1880 | .llseek = default_llseek, | ||
1881 | }; | ||
1882 | |||
1782 | static ssize_t | 1883 | static ssize_t |
1783 | i915_max_freq_read(struct file *filp, | 1884 | i915_max_freq_read(struct file *filp, |
1784 | char __user *ubuf, | 1885 | char __user *ubuf, |
@@ -2176,6 +2277,12 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
2176 | return ret; | 2277 | return ret; |
2177 | 2278 | ||
2178 | ret = i915_debugfs_create(minor->debugfs_root, minor, | 2279 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
2280 | "i915_gem_drop_caches", | ||
2281 | &i915_drop_caches_fops); | ||
2282 | if (ret) | ||
2283 | return ret; | ||
2284 | |||
2285 | ret = i915_debugfs_create(minor->debugfs_root, minor, | ||
2179 | "i915_error_state", | 2286 | "i915_error_state", |
2180 | &i915_error_state_fops); | 2287 | &i915_error_state_fops); |
2181 | if (ret) | 2288 | if (ret) |
@@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
2206 | 1, minor); | 2313 | 1, minor); |
2207 | drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, | 2314 | drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, |
2208 | 1, minor); | 2315 | 1, minor); |
2316 | drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, | ||
2317 | 1, minor); | ||
2209 | drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, | 2318 | drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, |
2210 | 1, minor); | 2319 | 1, minor); |
2211 | drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, | 2320 | drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6d8a1dc74934..cf0610330135 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
992 | case I915_PARAM_HAS_PINNED_BATCHES: | 992 | case I915_PARAM_HAS_PINNED_BATCHES: |
993 | value = 1; | 993 | value = 1; |
994 | break; | 994 | break; |
995 | case I915_PARAM_HAS_EXEC_NO_RELOC: | ||
996 | value = 1; | ||
997 | break; | ||
998 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | ||
999 | value = 1; | ||
1000 | break; | ||
995 | default: | 1001 | default: |
996 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 1002 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
997 | param->param); | 1003 | param->param); |
@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, | |||
1070 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | 1076 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
1071 | 1077 | ||
1072 | dev_priv->dri1.gfx_hws_cpu_addr = | 1078 | dev_priv->dri1.gfx_hws_cpu_addr = |
1073 | ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); | 1079 | ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); |
1074 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | 1080 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { |
1075 | i915_dma_cleanup(dev); | 1081 | i915_dma_cleanup(dev); |
1076 | ring->status_page.gfx_addr = 0; | 1082 | ring->status_page.gfx_addr = 0; |
@@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |||
1420 | if (!ap) | 1426 | if (!ap) |
1421 | return; | 1427 | return; |
1422 | 1428 | ||
1423 | ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; | 1429 | ap->ranges[0].base = dev_priv->gtt.mappable_base; |
1424 | ap->ranges[0].size = | 1430 | ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start; |
1425 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1431 | |
1426 | primary = | 1432 | primary = |
1427 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | 1433 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
1428 | 1434 | ||
@@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1536 | goto put_gmch; | 1542 | goto put_gmch; |
1537 | } | 1543 | } |
1538 | 1544 | ||
1539 | aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1545 | aperture_size = dev_priv->gtt.mappable_end; |
1540 | dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; | ||
1541 | 1546 | ||
1542 | dev_priv->mm.gtt_mapping = | 1547 | dev_priv->gtt.mappable = |
1543 | io_mapping_create_wc(dev_priv->mm.gtt_base_addr, | 1548 | io_mapping_create_wc(dev_priv->gtt.mappable_base, |
1544 | aperture_size); | 1549 | aperture_size); |
1545 | if (dev_priv->mm.gtt_mapping == NULL) { | 1550 | if (dev_priv->gtt.mappable == NULL) { |
1546 | ret = -EIO; | 1551 | ret = -EIO; |
1547 | goto out_rmmap; | 1552 | goto out_rmmap; |
1548 | } | 1553 | } |
1549 | 1554 | ||
1550 | i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, | 1555 | i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base, |
1551 | aperture_size); | 1556 | aperture_size); |
1552 | 1557 | ||
1553 | /* The i915 workqueue is primarily used for batched retirement of | 1558 | /* The i915 workqueue is primarily used for batched retirement of |
@@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1600 | pci_enable_msi(dev->pdev); | 1605 | pci_enable_msi(dev->pdev); |
1601 | 1606 | ||
1602 | spin_lock_init(&dev_priv->irq_lock); | 1607 | spin_lock_init(&dev_priv->irq_lock); |
1603 | spin_lock_init(&dev_priv->error_lock); | 1608 | spin_lock_init(&dev_priv->gpu_error.lock); |
1604 | spin_lock_init(&dev_priv->rps.lock); | 1609 | spin_lock_init(&dev_priv->rps.lock); |
1605 | mutex_init(&dev_priv->dpio_lock); | 1610 | mutex_init(&dev_priv->dpio_lock); |
1606 | 1611 | ||
@@ -1652,15 +1657,15 @@ out_gem_unload: | |||
1652 | out_mtrrfree: | 1657 | out_mtrrfree: |
1653 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1658 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1654 | mtrr_del(dev_priv->mm.gtt_mtrr, | 1659 | mtrr_del(dev_priv->mm.gtt_mtrr, |
1655 | dev_priv->mm.gtt_base_addr, | 1660 | dev_priv->gtt.mappable_base, |
1656 | aperture_size); | 1661 | aperture_size); |
1657 | dev_priv->mm.gtt_mtrr = -1; | 1662 | dev_priv->mm.gtt_mtrr = -1; |
1658 | } | 1663 | } |
1659 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1664 | io_mapping_free(dev_priv->gtt.mappable); |
1660 | out_rmmap: | 1665 | out_rmmap: |
1661 | pci_iounmap(dev->pdev, dev_priv->regs); | 1666 | pci_iounmap(dev->pdev, dev_priv->regs); |
1662 | put_gmch: | 1667 | put_gmch: |
1663 | i915_gem_gtt_fini(dev); | 1668 | dev_priv->gtt.gtt_remove(dev); |
1664 | put_bridge: | 1669 | put_bridge: |
1665 | pci_dev_put(dev_priv->bridge_dev); | 1670 | pci_dev_put(dev_priv->bridge_dev); |
1666 | free_priv: | 1671 | free_priv: |
@@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev) | |||
1690 | /* Cancel the retire work handler, which should be idle now. */ | 1695 | /* Cancel the retire work handler, which should be idle now. */ |
1691 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 1696 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
1692 | 1697 | ||
1693 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1698 | io_mapping_free(dev_priv->gtt.mappable); |
1694 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1699 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1695 | mtrr_del(dev_priv->mm.gtt_mtrr, | 1700 | mtrr_del(dev_priv->mm.gtt_mtrr, |
1696 | dev_priv->mm.gtt_base_addr, | 1701 | dev_priv->gtt.mappable_base, |
1697 | dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); | 1702 | dev_priv->gtt.mappable_end); |
1698 | dev_priv->mm.gtt_mtrr = -1; | 1703 | dev_priv->mm.gtt_mtrr = -1; |
1699 | } | 1704 | } |
1700 | 1705 | ||
@@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1720 | } | 1725 | } |
1721 | 1726 | ||
1722 | /* Free error state after interrupts are fully disabled. */ | 1727 | /* Free error state after interrupts are fully disabled. */ |
1723 | del_timer_sync(&dev_priv->hangcheck_timer); | 1728 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
1724 | cancel_work_sync(&dev_priv->error_work); | 1729 | cancel_work_sync(&dev_priv->gpu_error.work); |
1725 | i915_destroy_error_state(dev); | 1730 | i915_destroy_error_state(dev); |
1726 | 1731 | ||
1727 | if (dev->pdev->msi_enabled) | 1732 | if (dev->pdev->msi_enabled) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c8cbc32fe8db..d159d7a402e9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = { | |||
276 | .has_bsd_ring = 1, | 276 | .has_bsd_ring = 1, |
277 | .has_blt_ring = 1, | 277 | .has_blt_ring = 1, |
278 | .is_valleyview = 1, | 278 | .is_valleyview = 1, |
279 | .display_mmio_offset = VLV_DISPLAY_BASE, | ||
279 | }; | 280 | }; |
280 | 281 | ||
281 | static const struct intel_device_info intel_valleyview_d_info = { | 282 | static const struct intel_device_info intel_valleyview_d_info = { |
@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = { | |||
285 | .has_bsd_ring = 1, | 286 | .has_bsd_ring = 1, |
286 | .has_blt_ring = 1, | 287 | .has_blt_ring = 1, |
287 | .is_valleyview = 1, | 288 | .is_valleyview = 1, |
289 | .display_mmio_offset = VLV_DISPLAY_BASE, | ||
288 | }; | 290 | }; |
289 | 291 | ||
290 | static const struct intel_device_info intel_haswell_d_info = { | 292 | static const struct intel_device_info intel_haswell_d_info = { |
@@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
468 | { | 470 | { |
469 | struct drm_i915_private *dev_priv = dev->dev_private; | 471 | struct drm_i915_private *dev_priv = dev->dev_private; |
470 | 472 | ||
473 | intel_set_power_well(dev, true); | ||
474 | |||
471 | drm_kms_helper_poll_disable(dev); | 475 | drm_kms_helper_poll_disable(dev); |
472 | 476 | ||
473 | pci_save_state(dev->pdev); | 477 | pci_save_state(dev->pdev); |
@@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev) | |||
779 | } | 783 | } |
780 | 784 | ||
781 | /* Also reset the gpu hangman. */ | 785 | /* Also reset the gpu hangman. */ |
782 | if (dev_priv->stop_rings) { | 786 | if (dev_priv->gpu_error.stop_rings) { |
783 | DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); | 787 | DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); |
784 | dev_priv->stop_rings = 0; | 788 | dev_priv->gpu_error.stop_rings = 0; |
785 | if (ret == -ENODEV) { | 789 | if (ret == -ENODEV) { |
786 | DRM_ERROR("Reset not implemented, but ignoring " | 790 | DRM_ERROR("Reset not implemented, but ignoring " |
787 | "error for simulated gpu hangs\n"); | 791 | "error for simulated gpu hangs\n"); |
@@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev) | |||
820 | i915_gem_reset(dev); | 824 | i915_gem_reset(dev); |
821 | 825 | ||
822 | ret = -ENODEV; | 826 | ret = -ENODEV; |
823 | if (get_seconds() - dev_priv->last_gpu_reset < 5) | 827 | if (get_seconds() - dev_priv->gpu_error.last_reset < 5) |
824 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | 828 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
825 | else | 829 | else |
826 | ret = intel_gpu_reset(dev); | 830 | ret = intel_gpu_reset(dev); |
827 | 831 | ||
828 | dev_priv->last_gpu_reset = get_seconds(); | 832 | dev_priv->gpu_error.last_reset = get_seconds(); |
829 | if (ret) { | 833 | if (ret) { |
830 | DRM_ERROR("Failed to reset chip.\n"); | 834 | DRM_ERROR("Failed to reset chip.\n"); |
831 | mutex_unlock(&dev->struct_mutex); | 835 | mutex_unlock(&dev->struct_mutex); |
@@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights"); | |||
1115 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ | 1119 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ |
1116 | ((reg) < 0x40000) && \ | 1120 | ((reg) < 0x40000) && \ |
1117 | ((reg) != FORCEWAKE)) | 1121 | ((reg) != FORCEWAKE)) |
1118 | |||
1119 | static bool IS_DISPLAYREG(u32 reg) | ||
1120 | { | ||
1121 | /* | ||
1122 | * This should make it easier to transition modules over to the | ||
1123 | * new register block scheme, since we can do it incrementally. | ||
1124 | */ | ||
1125 | if (reg >= VLV_DISPLAY_BASE) | ||
1126 | return false; | ||
1127 | |||
1128 | if (reg >= RENDER_RING_BASE && | ||
1129 | reg < RENDER_RING_BASE + 0xff) | ||
1130 | return false; | ||
1131 | if (reg >= GEN6_BSD_RING_BASE && | ||
1132 | reg < GEN6_BSD_RING_BASE + 0xff) | ||
1133 | return false; | ||
1134 | if (reg >= BLT_RING_BASE && | ||
1135 | reg < BLT_RING_BASE + 0xff) | ||
1136 | return false; | ||
1137 | |||
1138 | if (reg == PGTBL_ER) | ||
1139 | return false; | ||
1140 | |||
1141 | if (reg >= IPEIR_I965 && | ||
1142 | reg < HWSTAM) | ||
1143 | return false; | ||
1144 | |||
1145 | if (reg == MI_MODE) | ||
1146 | return false; | ||
1147 | |||
1148 | if (reg == GFX_MODE_GEN7) | ||
1149 | return false; | ||
1150 | |||
1151 | if (reg == RENDER_HWS_PGA_GEN7 || | ||
1152 | reg == BSD_HWS_PGA_GEN7 || | ||
1153 | reg == BLT_HWS_PGA_GEN7) | ||
1154 | return false; | ||
1155 | |||
1156 | if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL || | ||
1157 | reg == GEN6_BSD_RNCID) | ||
1158 | return false; | ||
1159 | |||
1160 | if (reg == GEN6_BLITTER_ECOSKPD) | ||
1161 | return false; | ||
1162 | |||
1163 | if (reg >= 0x4000c && | ||
1164 | reg <= 0x4002c) | ||
1165 | return false; | ||
1166 | |||
1167 | if (reg >= 0x4f000 && | ||
1168 | reg <= 0x4f08f) | ||
1169 | return false; | ||
1170 | |||
1171 | if (reg >= 0x4f100 && | ||
1172 | reg <= 0x4f11f) | ||
1173 | return false; | ||
1174 | |||
1175 | if (reg >= VLV_MASTER_IER && | ||
1176 | reg <= GEN6_PMIER) | ||
1177 | return false; | ||
1178 | |||
1179 | if (reg >= FENCE_REG_SANDYBRIDGE_0 && | ||
1180 | reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8))) | ||
1181 | return false; | ||
1182 | |||
1183 | if (reg >= VLV_IIR_RW && | ||
1184 | reg <= VLV_ISR) | ||
1185 | return false; | ||
1186 | |||
1187 | if (reg == FORCEWAKE_VLV || | ||
1188 | reg == FORCEWAKE_ACK_VLV) | ||
1189 | return false; | ||
1190 | |||
1191 | if (reg == GEN6_GDRST) | ||
1192 | return false; | ||
1193 | |||
1194 | switch (reg) { | ||
1195 | case _3D_CHICKEN3: | ||
1196 | case IVB_CHICKEN3: | ||
1197 | case GEN7_COMMON_SLICE_CHICKEN1: | ||
1198 | case GEN7_L3CNTLREG1: | ||
1199 | case GEN7_L3_CHICKEN_MODE_REGISTER: | ||
1200 | case GEN7_ROW_CHICKEN2: | ||
1201 | case GEN7_L3SQCREG4: | ||
1202 | case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: | ||
1203 | case GEN7_HALF_SLICE_CHICKEN1: | ||
1204 | case GEN6_MBCTL: | ||
1205 | case GEN6_UCGCTL2: | ||
1206 | return false; | ||
1207 | default: | ||
1208 | break; | ||
1209 | } | ||
1210 | |||
1211 | return true; | ||
1212 | } | ||
1213 | |||
1214 | static void | 1122 | static void |
1215 | ilk_dummy_write(struct drm_i915_private *dev_priv) | 1123 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
1216 | { | 1124 | { |
@@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | |||
1234 | if (dev_priv->forcewake_count == 0) \ | 1142 | if (dev_priv->forcewake_count == 0) \ |
1235 | dev_priv->gt.force_wake_put(dev_priv); \ | 1143 | dev_priv->gt.force_wake_put(dev_priv); \ |
1236 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | 1144 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ |
1237 | } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ | ||
1238 | val = read##y(dev_priv->regs + reg + 0x180000); \ | ||
1239 | } else { \ | 1145 | } else { \ |
1240 | val = read##y(dev_priv->regs + reg); \ | 1146 | val = read##y(dev_priv->regs + reg); \ |
1241 | } \ | 1147 | } \ |
@@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
1262 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ | 1168 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ |
1263 | I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ | 1169 | I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ |
1264 | } \ | 1170 | } \ |
1265 | if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ | 1171 | write##y(val, dev_priv->regs + reg); \ |
1266 | write##y(val, dev_priv->regs + reg + 0x180000); \ | ||
1267 | } else { \ | ||
1268 | write##y(val, dev_priv->regs + reg); \ | ||
1269 | } \ | ||
1270 | if (unlikely(__fifo_ret)) { \ | 1172 | if (unlikely(__fifo_ret)) { \ |
1271 | gen6_gt_check_fifodbg(dev_priv); \ | 1173 | gen6_gt_check_fifodbg(dev_priv); \ |
1272 | } \ | 1174 | } \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b1b1b7350ca4..984523d809a8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -337,6 +337,7 @@ struct drm_i915_gt_funcs { | |||
337 | DEV_INFO_FLAG(has_llc) | 337 | DEV_INFO_FLAG(has_llc) |
338 | 338 | ||
339 | struct intel_device_info { | 339 | struct intel_device_info { |
340 | u32 display_mmio_offset; | ||
340 | u8 gen; | 341 | u8 gen; |
341 | u8 is_mobile:1; | 342 | u8 is_mobile:1; |
342 | u8 is_i85x:1; | 343 | u8 is_i85x:1; |
@@ -364,6 +365,49 @@ struct intel_device_info { | |||
364 | u8 has_llc:1; | 365 | u8 has_llc:1; |
365 | }; | 366 | }; |
366 | 367 | ||
368 | enum i915_cache_level { | ||
369 | I915_CACHE_NONE = 0, | ||
370 | I915_CACHE_LLC, | ||
371 | I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ | ||
372 | }; | ||
373 | |||
374 | /* The Graphics Translation Table is the way in which GEN hardware translates a | ||
375 | * Graphics Virtual Address into a Physical Address. In addition to the normal | ||
376 | * collateral associated with any va->pa translations GEN hardware also has a | ||
377 | * portion of the GTT which can be mapped by the CPU and remain both coherent | ||
378 | * and correct (in cases like swizzling). That region is referred to as GMADR in | ||
379 | * the spec. | ||
380 | */ | ||
381 | struct i915_gtt { | ||
382 | unsigned long start; /* Start offset of used GTT */ | ||
383 | size_t total; /* Total size GTT can map */ | ||
384 | size_t stolen_size; /* Total size of stolen memory */ | ||
385 | |||
386 | unsigned long mappable_end; /* End offset that we can CPU map */ | ||
387 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ | ||
388 | phys_addr_t mappable_base; /* PA of our GMADR */ | ||
389 | |||
390 | /** "Graphics Stolen Memory" holds the global PTEs */ | ||
391 | void __iomem *gsm; | ||
392 | |||
393 | bool do_idle_maps; | ||
394 | dma_addr_t scratch_page_dma; | ||
395 | struct page *scratch_page; | ||
396 | |||
397 | /* global gtt ops */ | ||
398 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, | ||
399 | size_t *stolen); | ||
400 | void (*gtt_remove)(struct drm_device *dev); | ||
401 | void (*gtt_clear_range)(struct drm_device *dev, | ||
402 | unsigned int first_entry, | ||
403 | unsigned int num_entries); | ||
404 | void (*gtt_insert_entries)(struct drm_device *dev, | ||
405 | struct sg_table *st, | ||
406 | unsigned int pg_start, | ||
407 | enum i915_cache_level cache_level); | ||
408 | }; | ||
409 | #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) | ||
410 | |||
367 | #define I915_PPGTT_PD_ENTRIES 512 | 411 | #define I915_PPGTT_PD_ENTRIES 512 |
368 | #define I915_PPGTT_PT_ENTRIES 1024 | 412 | #define I915_PPGTT_PT_ENTRIES 1024 |
369 | struct i915_hw_ppgtt { | 413 | struct i915_hw_ppgtt { |
@@ -373,6 +417,16 @@ struct i915_hw_ppgtt { | |||
373 | uint32_t pd_offset; | 417 | uint32_t pd_offset; |
374 | dma_addr_t *pt_dma_addr; | 418 | dma_addr_t *pt_dma_addr; |
375 | dma_addr_t scratch_page_dma_addr; | 419 | dma_addr_t scratch_page_dma_addr; |
420 | |||
421 | /* pte functions, mirroring the interface of the global gtt. */ | ||
422 | void (*clear_range)(struct i915_hw_ppgtt *ppgtt, | ||
423 | unsigned int first_entry, | ||
424 | unsigned int num_entries); | ||
425 | void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, | ||
426 | struct sg_table *st, | ||
427 | unsigned int pg_start, | ||
428 | enum i915_cache_level cache_level); | ||
429 | void (*cleanup)(struct i915_hw_ppgtt *ppgtt); | ||
376 | }; | 430 | }; |
377 | 431 | ||
378 | 432 | ||
@@ -642,6 +696,153 @@ struct intel_l3_parity { | |||
642 | struct work_struct error_work; | 696 | struct work_struct error_work; |
643 | }; | 697 | }; |
644 | 698 | ||
699 | struct i915_gem_mm { | ||
700 | /** Memory allocator for GTT stolen memory */ | ||
701 | struct drm_mm stolen; | ||
702 | /** Memory allocator for GTT */ | ||
703 | struct drm_mm gtt_space; | ||
704 | /** List of all objects in gtt_space. Used to restore gtt | ||
705 | * mappings on resume */ | ||
706 | struct list_head bound_list; | ||
707 | /** | ||
708 | * List of objects which are not bound to the GTT (thus | ||
709 | * are idle and not used by the GPU) but still have | ||
710 | * (presumably uncached) pages still attached. | ||
711 | */ | ||
712 | struct list_head unbound_list; | ||
713 | |||
714 | /** Usable portion of the GTT for GEM */ | ||
715 | unsigned long stolen_base; /* limited to low memory (32-bit) */ | ||
716 | |||
717 | int gtt_mtrr; | ||
718 | |||
719 | /** PPGTT used for aliasing the PPGTT with the GTT */ | ||
720 | struct i915_hw_ppgtt *aliasing_ppgtt; | ||
721 | |||
722 | struct shrinker inactive_shrinker; | ||
723 | bool shrinker_no_lock_stealing; | ||
724 | |||
725 | /** | ||
726 | * List of objects currently involved in rendering. | ||
727 | * | ||
728 | * Includes buffers having the contents of their GPU caches | ||
729 | * flushed, not necessarily primitives. last_rendering_seqno | ||
730 | * represents when the rendering involved will be completed. | ||
731 | * | ||
732 | * A reference is held on the buffer while on this list. | ||
733 | */ | ||
734 | struct list_head active_list; | ||
735 | |||
736 | /** | ||
737 | * LRU list of objects which are not in the ringbuffer and | ||
738 | * are ready to unbind, but are still in the GTT. | ||
739 | * | ||
740 | * last_rendering_seqno is 0 while an object is in this list. | ||
741 | * | ||
742 | * A reference is not held on the buffer while on this list, | ||
743 | * as merely being GTT-bound shouldn't prevent its being | ||
744 | * freed, and we'll pull it off the list in the free path. | ||
745 | */ | ||
746 | struct list_head inactive_list; | ||
747 | |||
748 | /** LRU list of objects with fence regs on them. */ | ||
749 | struct list_head fence_list; | ||
750 | |||
751 | /** | ||
752 | * We leave the user IRQ off as much as possible, | ||
753 | * but this means that requests will finish and never | ||
754 | * be retired once the system goes idle. Set a timer to | ||
755 | * fire periodically while the ring is running. When it | ||
756 | * fires, go retire requests. | ||
757 | */ | ||
758 | struct delayed_work retire_work; | ||
759 | |||
760 | /** | ||
761 | * Are we in a non-interruptible section of code like | ||
762 | * modesetting? | ||
763 | */ | ||
764 | bool interruptible; | ||
765 | |||
766 | /** | ||
767 | * Flag if the X Server, and thus DRM, is not currently in | ||
768 | * control of the device. | ||
769 | * | ||
770 | * This is set between LeaveVT and EnterVT. It needs to be | ||
771 | * replaced with a semaphore. It also needs to be | ||
772 | * transitioned away from for kernel modesetting. | ||
773 | */ | ||
774 | int suspended; | ||
775 | |||
776 | /** Bit 6 swizzling required for X tiling */ | ||
777 | uint32_t bit_6_swizzle_x; | ||
778 | /** Bit 6 swizzling required for Y tiling */ | ||
779 | uint32_t bit_6_swizzle_y; | ||
780 | |||
781 | /* storage for physical objects */ | ||
782 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
783 | |||
784 | /* accounting, useful for userland debugging */ | ||
785 | size_t object_memory; | ||
786 | u32 object_count; | ||
787 | }; | ||
788 | |||
789 | struct i915_gpu_error { | ||
790 | /* For hangcheck timer */ | ||
791 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | ||
792 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) | ||
793 | struct timer_list hangcheck_timer; | ||
794 | int hangcheck_count; | ||
795 | uint32_t last_acthd[I915_NUM_RINGS]; | ||
796 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; | ||
797 | |||
798 | /* For reset and error_state handling. */ | ||
799 | spinlock_t lock; | ||
800 | /* Protected by the above dev->gpu_error.lock. */ | ||
801 | struct drm_i915_error_state *first_error; | ||
802 | struct work_struct work; | ||
803 | |||
804 | unsigned long last_reset; | ||
805 | |||
806 | /** | ||
807 | * State variable and reset counter controlling the reset flow | ||
808 | * | ||
809 | * Upper bits are for the reset counter. This counter is used by the | ||
810 | * wait_seqno code to race-free noticed that a reset event happened and | ||
811 | * that it needs to restart the entire ioctl (since most likely the | ||
812 | * seqno it waited for won't ever signal anytime soon). | ||
813 | * | ||
814 | * This is important for lock-free wait paths, where no contended lock | ||
815 | * naturally enforces the correct ordering between the bail-out of the | ||
816 | * waiter and the gpu reset work code. | ||
817 | * | ||
818 | * Lowest bit controls the reset state machine: Set means a reset is in | ||
819 | * progress. This state will (presuming we don't have any bugs) decay | ||
820 | * into either unset (successful reset) or the special WEDGED value (hw | ||
821 | * terminally sour). All waiters on the reset_queue will be woken when | ||
822 | * that happens. | ||
823 | */ | ||
824 | atomic_t reset_counter; | ||
825 | |||
826 | /** | ||
827 | * Special values/flags for reset_counter | ||
828 | * | ||
829 | * Note that the code relies on | ||
830 | * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG | ||
831 | * being true. | ||
832 | */ | ||
833 | #define I915_RESET_IN_PROGRESS_FLAG 1 | ||
834 | #define I915_WEDGED 0xffffffff | ||
835 | |||
836 | /** | ||
837 | * Waitqueue to signal when the reset has completed. Used by clients | ||
838 | * that wait for dev_priv->mm.wedged to settle. | ||
839 | */ | ||
840 | wait_queue_head_t reset_queue; | ||
841 | |||
842 | /* For gpu hang simulation. */ | ||
843 | unsigned int stop_rings; | ||
844 | }; | ||
845 | |||
645 | typedef struct drm_i915_private { | 846 | typedef struct drm_i915_private { |
646 | struct drm_device *dev; | 847 | struct drm_device *dev; |
647 | struct kmem_cache *slab; | 848 | struct kmem_cache *slab; |
@@ -697,7 +898,6 @@ typedef struct drm_i915_private { | |||
697 | u32 pipestat[2]; | 898 | u32 pipestat[2]; |
698 | u32 irq_mask; | 899 | u32 irq_mask; |
699 | u32 gt_irq_mask; | 900 | u32 gt_irq_mask; |
700 | u32 pch_irq_mask; | ||
701 | 901 | ||
702 | u32 hotplug_supported_mask; | 902 | u32 hotplug_supported_mask; |
703 | struct work_struct hotplug_work; | 903 | struct work_struct hotplug_work; |
@@ -706,16 +906,6 @@ typedef struct drm_i915_private { | |||
706 | int num_pipe; | 906 | int num_pipe; |
707 | int num_pch_pll; | 907 | int num_pch_pll; |
708 | 908 | ||
709 | /* For hangcheck timer */ | ||
710 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | ||
711 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) | ||
712 | struct timer_list hangcheck_timer; | ||
713 | int hangcheck_count; | ||
714 | uint32_t last_acthd[I915_NUM_RINGS]; | ||
715 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; | ||
716 | |||
717 | unsigned int stop_rings; | ||
718 | |||
719 | unsigned long cfb_size; | 909 | unsigned long cfb_size; |
720 | unsigned int cfb_fb; | 910 | unsigned int cfb_fb; |
721 | enum plane cfb_plane; | 911 | enum plane cfb_plane; |
@@ -763,11 +953,6 @@ typedef struct drm_i915_private { | |||
763 | 953 | ||
764 | unsigned int fsb_freq, mem_freq, is_ddr3; | 954 | unsigned int fsb_freq, mem_freq, is_ddr3; |
765 | 955 | ||
766 | spinlock_t error_lock; | ||
767 | /* Protected by dev->error_lock. */ | ||
768 | struct drm_i915_error_state *first_error; | ||
769 | struct work_struct error_work; | ||
770 | struct completion error_completion; | ||
771 | struct workqueue_struct *wq; | 956 | struct workqueue_struct *wq; |
772 | 957 | ||
773 | /* Display functions */ | 958 | /* Display functions */ |
@@ -782,116 +967,9 @@ typedef struct drm_i915_private { | |||
782 | /* Register state */ | 967 | /* Register state */ |
783 | bool modeset_on_lid; | 968 | bool modeset_on_lid; |
784 | 969 | ||
785 | struct { | 970 | struct i915_gtt gtt; |
786 | /** Bridge to intel-gtt-ko */ | 971 | |
787 | struct intel_gtt *gtt; | 972 | struct i915_gem_mm mm; |
788 | /** Memory allocator for GTT stolen memory */ | ||
789 | struct drm_mm stolen; | ||
790 | /** Memory allocator for GTT */ | ||
791 | struct drm_mm gtt_space; | ||
792 | /** List of all objects in gtt_space. Used to restore gtt | ||
793 | * mappings on resume */ | ||
794 | struct list_head bound_list; | ||
795 | /** | ||
796 | * List of objects which are not bound to the GTT (thus | ||
797 | * are idle and not used by the GPU) but still have | ||
798 | * (presumably uncached) pages still attached. | ||
799 | */ | ||
800 | struct list_head unbound_list; | ||
801 | |||
802 | /** Usable portion of the GTT for GEM */ | ||
803 | unsigned long gtt_start; | ||
804 | unsigned long gtt_mappable_end; | ||
805 | unsigned long gtt_end; | ||
806 | unsigned long stolen_base; /* limited to low memory (32-bit) */ | ||
807 | |||
808 | /** "Graphics Stolen Memory" holds the global PTEs */ | ||
809 | void __iomem *gsm; | ||
810 | |||
811 | struct io_mapping *gtt_mapping; | ||
812 | phys_addr_t gtt_base_addr; | ||
813 | int gtt_mtrr; | ||
814 | |||
815 | /** PPGTT used for aliasing the PPGTT with the GTT */ | ||
816 | struct i915_hw_ppgtt *aliasing_ppgtt; | ||
817 | |||
818 | struct shrinker inactive_shrinker; | ||
819 | bool shrinker_no_lock_stealing; | ||
820 | |||
821 | /** | ||
822 | * List of objects currently involved in rendering. | ||
823 | * | ||
824 | * Includes buffers having the contents of their GPU caches | ||
825 | * flushed, not necessarily primitives. last_rendering_seqno | ||
826 | * represents when the rendering involved will be completed. | ||
827 | * | ||
828 | * A reference is held on the buffer while on this list. | ||
829 | */ | ||
830 | struct list_head active_list; | ||
831 | |||
832 | /** | ||
833 | * LRU list of objects which are not in the ringbuffer and | ||
834 | * are ready to unbind, but are still in the GTT. | ||
835 | * | ||
836 | * last_rendering_seqno is 0 while an object is in this list. | ||
837 | * | ||
838 | * A reference is not held on the buffer while on this list, | ||
839 | * as merely being GTT-bound shouldn't prevent its being | ||
840 | * freed, and we'll pull it off the list in the free path. | ||
841 | */ | ||
842 | struct list_head inactive_list; | ||
843 | |||
844 | /** LRU list of objects with fence regs on them. */ | ||
845 | struct list_head fence_list; | ||
846 | |||
847 | /** | ||
848 | * We leave the user IRQ off as much as possible, | ||
849 | * but this means that requests will finish and never | ||
850 | * be retired once the system goes idle. Set a timer to | ||
851 | * fire periodically while the ring is running. When it | ||
852 | * fires, go retire requests. | ||
853 | */ | ||
854 | struct delayed_work retire_work; | ||
855 | |||
856 | /** | ||
857 | * Are we in a non-interruptible section of code like | ||
858 | * modesetting? | ||
859 | */ | ||
860 | bool interruptible; | ||
861 | |||
862 | /** | ||
863 | * Flag if the X Server, and thus DRM, is not currently in | ||
864 | * control of the device. | ||
865 | * | ||
866 | * This is set between LeaveVT and EnterVT. It needs to be | ||
867 | * replaced with a semaphore. It also needs to be | ||
868 | * transitioned away from for kernel modesetting. | ||
869 | */ | ||
870 | int suspended; | ||
871 | |||
872 | /** | ||
873 | * Flag if the hardware appears to be wedged. | ||
874 | * | ||
875 | * This is set when attempts to idle the device timeout. | ||
876 | * It prevents command submission from occurring and makes | ||
877 | * every pending request fail | ||
878 | */ | ||
879 | atomic_t wedged; | ||
880 | |||
881 | /** Bit 6 swizzling required for X tiling */ | ||
882 | uint32_t bit_6_swizzle_x; | ||
883 | /** Bit 6 swizzling required for Y tiling */ | ||
884 | uint32_t bit_6_swizzle_y; | ||
885 | |||
886 | /* storage for physical objects */ | ||
887 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
888 | |||
889 | /* accounting, useful for userland debugging */ | ||
890 | size_t gtt_total; | ||
891 | size_t mappable_gtt_total; | ||
892 | size_t object_memory; | ||
893 | u32 object_count; | ||
894 | } mm; | ||
895 | 973 | ||
896 | /* Kernel Modesetting */ | 974 | /* Kernel Modesetting */ |
897 | 975 | ||
@@ -933,7 +1011,7 @@ typedef struct drm_i915_private { | |||
933 | struct drm_mm_node *compressed_fb; | 1011 | struct drm_mm_node *compressed_fb; |
934 | struct drm_mm_node *compressed_llb; | 1012 | struct drm_mm_node *compressed_llb; |
935 | 1013 | ||
936 | unsigned long last_gpu_reset; | 1014 | struct i915_gpu_error gpu_error; |
937 | 1015 | ||
938 | /* list of fbdev register on this device */ | 1016 | /* list of fbdev register on this device */ |
939 | struct intel_fbdev *fbdev; | 1017 | struct intel_fbdev *fbdev; |
@@ -973,12 +1051,6 @@ enum hdmi_force_audio { | |||
973 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ | 1051 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
974 | }; | 1052 | }; |
975 | 1053 | ||
976 | enum i915_cache_level { | ||
977 | I915_CACHE_NONE = 0, | ||
978 | I915_CACHE_LLC, | ||
979 | I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ | ||
980 | }; | ||
981 | |||
982 | #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) | 1054 | #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) |
983 | 1055 | ||
984 | struct drm_i915_gem_object_ops { | 1056 | struct drm_i915_gem_object_ops { |
@@ -1446,6 +1518,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
1446 | bool nonblocking); | 1518 | bool nonblocking); |
1447 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); | 1519 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1448 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); | 1520 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1521 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | ||
1449 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); | 1522 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1450 | void i915_gem_lastclose(struct drm_device *dev); | 1523 | void i915_gem_lastclose(struct drm_device *dev); |
1451 | 1524 | ||
@@ -1524,8 +1597,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | |||
1524 | 1597 | ||
1525 | void i915_gem_retire_requests(struct drm_device *dev); | 1598 | void i915_gem_retire_requests(struct drm_device *dev); |
1526 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); | 1599 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
1527 | int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, | 1600 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
1528 | bool interruptible); | 1601 | bool interruptible); |
1602 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | ||
1603 | { | ||
1604 | return unlikely(atomic_read(&error->reset_counter) | ||
1605 | & I915_RESET_IN_PROGRESS_FLAG); | ||
1606 | } | ||
1607 | |||
1608 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) | ||
1609 | { | ||
1610 | return atomic_read(&error->reset_counter) == I915_WEDGED; | ||
1611 | } | ||
1529 | 1612 | ||
1530 | void i915_gem_reset(struct drm_device *dev); | 1613 | void i915_gem_reset(struct drm_device *dev); |
1531 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); | 1614 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
@@ -1566,9 +1649,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); | |||
1566 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1649 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1567 | 1650 | ||
1568 | uint32_t | 1651 | uint32_t |
1569 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | 1652 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
1570 | uint32_t size, | 1653 | uint32_t |
1571 | int tiling_mode); | 1654 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
1655 | int tiling_mode, bool fenced); | ||
1572 | 1656 | ||
1573 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 1657 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
1574 | enum i915_cache_level cache_level); | 1658 | enum i915_cache_level cache_level); |
@@ -1591,7 +1675,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | |||
1591 | struct drm_file *file); | 1675 | struct drm_file *file); |
1592 | 1676 | ||
1593 | /* i915_gem_gtt.c */ | 1677 | /* i915_gem_gtt.c */ |
1594 | int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); | ||
1595 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); | 1678 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
1596 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | 1679 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
1597 | struct drm_i915_gem_object *obj, | 1680 | struct drm_i915_gem_object *obj, |
@@ -1609,7 +1692,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev); | |||
1609 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, | 1692 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
1610 | unsigned long mappable_end, unsigned long end); | 1693 | unsigned long mappable_end, unsigned long end); |
1611 | int i915_gem_gtt_init(struct drm_device *dev); | 1694 | int i915_gem_gtt_init(struct drm_device *dev); |
1612 | void i915_gem_gtt_fini(struct drm_device *dev); | ||
1613 | static inline void i915_gem_chipset_flush(struct drm_device *dev) | 1695 | static inline void i915_gem_chipset_flush(struct drm_device *dev) |
1614 | { | 1696 | { |
1615 | if (INTEL_INFO(dev)->gen < 6) | 1697 | if (INTEL_INFO(dev)->gen < 6) |
@@ -1668,9 +1750,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor); | |||
1668 | extern int i915_save_state(struct drm_device *dev); | 1750 | extern int i915_save_state(struct drm_device *dev); |
1669 | extern int i915_restore_state(struct drm_device *dev); | 1751 | extern int i915_restore_state(struct drm_device *dev); |
1670 | 1752 | ||
1671 | /* i915_suspend.c */ | 1753 | /* i915_ums.c */ |
1672 | extern int i915_save_state(struct drm_device *dev); | 1754 | void i915_save_display_reg(struct drm_device *dev); |
1673 | extern int i915_restore_state(struct drm_device *dev); | 1755 | void i915_restore_display_reg(struct drm_device *dev); |
1674 | 1756 | ||
1675 | /* i915_sysfs.c */ | 1757 | /* i915_sysfs.c */ |
1676 | void i915_setup_sysfs(struct drm_device *dev_priv); | 1758 | void i915_setup_sysfs(struct drm_device *dev_priv); |
@@ -1727,6 +1809,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); | |||
1727 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | 1809 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1728 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, | 1810 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
1729 | bool force_restore); | 1811 | bool force_restore); |
1812 | extern void i915_redisable_vga(struct drm_device *dev); | ||
1730 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1813 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1731 | extern void intel_disable_fbc(struct drm_device *dev); | 1814 | extern void intel_disable_fbc(struct drm_device *dev); |
1732 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1815 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
@@ -1799,5 +1882,19 @@ __i915_write(64, q) | |||
1799 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | 1882 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
1800 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | 1883 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
1801 | 1884 | ||
1885 | /* "Broadcast RGB" property */ | ||
1886 | #define INTEL_BROADCAST_RGB_AUTO 0 | ||
1887 | #define INTEL_BROADCAST_RGB_FULL 1 | ||
1888 | #define INTEL_BROADCAST_RGB_LIMITED 2 | ||
1889 | |||
1890 | static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) | ||
1891 | { | ||
1892 | if (HAS_PCH_SPLIT(dev)) | ||
1893 | return CPU_VGACNTRL; | ||
1894 | else if (IS_VALLEYVIEW(dev)) | ||
1895 | return VLV_VGACNTRL; | ||
1896 | else | ||
1897 | return VGACNTRL; | ||
1898 | } | ||
1802 | 1899 | ||
1803 | #endif | 1900 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4418e14e3d69..62be74899c2b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | |||
87 | } | 87 | } |
88 | 88 | ||
89 | static int | 89 | static int |
90 | i915_gem_wait_for_error(struct drm_device *dev) | 90 | i915_gem_wait_for_error(struct i915_gpu_error *error) |
91 | { | 91 | { |
92 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
93 | struct completion *x = &dev_priv->error_completion; | ||
94 | unsigned long flags; | ||
95 | int ret; | 92 | int ret; |
96 | 93 | ||
97 | if (!atomic_read(&dev_priv->mm.wedged)) | 94 | #define EXIT_COND (!i915_reset_in_progress(error)) |
95 | if (EXIT_COND) | ||
98 | return 0; | 96 | return 0; |
99 | 97 | ||
98 | /* GPU is already declared terminally dead, give up. */ | ||
99 | if (i915_terminally_wedged(error)) | ||
100 | return -EIO; | ||
101 | |||
100 | /* | 102 | /* |
101 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging | 103 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging |
102 | * userspace. If it takes that long something really bad is going on and | 104 | * userspace. If it takes that long something really bad is going on and |
103 | * we should simply try to bail out and fail as gracefully as possible. | 105 | * we should simply try to bail out and fail as gracefully as possible. |
104 | */ | 106 | */ |
105 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); | 107 | ret = wait_event_interruptible_timeout(error->reset_queue, |
108 | EXIT_COND, | ||
109 | 10*HZ); | ||
106 | if (ret == 0) { | 110 | if (ret == 0) { |
107 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | 111 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
108 | return -EIO; | 112 | return -EIO; |
109 | } else if (ret < 0) { | 113 | } else if (ret < 0) { |
110 | return ret; | 114 | return ret; |
111 | } | 115 | } |
116 | #undef EXIT_COND | ||
112 | 117 | ||
113 | if (atomic_read(&dev_priv->mm.wedged)) { | ||
114 | /* GPU is hung, bump the completion count to account for | ||
115 | * the token we just consumed so that we never hit zero and | ||
116 | * end up waiting upon a subsequent completion event that | ||
117 | * will never happen. | ||
118 | */ | ||
119 | spin_lock_irqsave(&x->wait.lock, flags); | ||
120 | x->done++; | ||
121 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
122 | } | ||
123 | return 0; | 118 | return 0; |
124 | } | 119 | } |
125 | 120 | ||
126 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 121 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
127 | { | 122 | { |
123 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
128 | int ret; | 124 | int ret; |
129 | 125 | ||
130 | ret = i915_gem_wait_for_error(dev); | 126 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
131 | if (ret) | 127 | if (ret) |
132 | return ret; | 128 | return ret; |
133 | 129 | ||
@@ -149,6 +145,7 @@ int | |||
149 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 145 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
150 | struct drm_file *file) | 146 | struct drm_file *file) |
151 | { | 147 | { |
148 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
152 | struct drm_i915_gem_init *args = data; | 149 | struct drm_i915_gem_init *args = data; |
153 | 150 | ||
154 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 151 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
@@ -165,6 +162,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, | |||
165 | mutex_lock(&dev->struct_mutex); | 162 | mutex_lock(&dev->struct_mutex); |
166 | i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, | 163 | i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, |
167 | args->gtt_end); | 164 | args->gtt_end); |
165 | dev_priv->gtt.mappable_end = args->gtt_end; | ||
168 | mutex_unlock(&dev->struct_mutex); | 166 | mutex_unlock(&dev->struct_mutex); |
169 | 167 | ||
170 | return 0; | 168 | return 0; |
@@ -186,7 +184,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
186 | pinned += obj->gtt_space->size; | 184 | pinned += obj->gtt_space->size; |
187 | mutex_unlock(&dev->struct_mutex); | 185 | mutex_unlock(&dev->struct_mutex); |
188 | 186 | ||
189 | args->aper_size = dev_priv->mm.gtt_total; | 187 | args->aper_size = dev_priv->gtt.total; |
190 | args->aper_available_size = args->aper_size - pinned; | 188 | args->aper_available_size = args->aper_size - pinned; |
191 | 189 | ||
192 | return 0; | 190 | return 0; |
@@ -637,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
637 | * source page isn't available. Return the error and we'll | 635 | * source page isn't available. Return the error and we'll |
638 | * retry in the slow path. | 636 | * retry in the slow path. |
639 | */ | 637 | */ |
640 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, | 638 | if (fast_user_write(dev_priv->gtt.mappable, page_base, |
641 | page_offset, user_data, page_length)) { | 639 | page_offset, user_data, page_length)) { |
642 | ret = -EFAULT; | 640 | ret = -EFAULT; |
643 | goto out_unpin; | 641 | goto out_unpin; |
@@ -937,26 +935,17 @@ unlock: | |||
937 | } | 935 | } |
938 | 936 | ||
939 | int | 937 | int |
940 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, | 938 | i915_gem_check_wedge(struct i915_gpu_error *error, |
941 | bool interruptible) | 939 | bool interruptible) |
942 | { | 940 | { |
943 | if (atomic_read(&dev_priv->mm.wedged)) { | 941 | if (i915_reset_in_progress(error)) { |
944 | struct completion *x = &dev_priv->error_completion; | ||
945 | bool recovery_complete; | ||
946 | unsigned long flags; | ||
947 | |||
948 | /* Give the error handler a chance to run. */ | ||
949 | spin_lock_irqsave(&x->wait.lock, flags); | ||
950 | recovery_complete = x->done > 0; | ||
951 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
952 | |||
953 | /* Non-interruptible callers can't handle -EAGAIN, hence return | 942 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
954 | * -EIO unconditionally for these. */ | 943 | * -EIO unconditionally for these. */ |
955 | if (!interruptible) | 944 | if (!interruptible) |
956 | return -EIO; | 945 | return -EIO; |
957 | 946 | ||
958 | /* Recovery complete, but still wedged means reset failure. */ | 947 | /* Recovery complete, but the reset failed ... */ |
959 | if (recovery_complete) | 948 | if (i915_terminally_wedged(error)) |
960 | return -EIO; | 949 | return -EIO; |
961 | 950 | ||
962 | return -EAGAIN; | 951 | return -EAGAIN; |
@@ -987,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | |||
987 | * __wait_seqno - wait until execution of seqno has finished | 976 | * __wait_seqno - wait until execution of seqno has finished |
988 | * @ring: the ring expected to report seqno | 977 | * @ring: the ring expected to report seqno |
989 | * @seqno: duh! | 978 | * @seqno: duh! |
979 | * @reset_counter: reset sequence associated with the given seqno | ||
990 | * @interruptible: do an interruptible wait (normally yes) | 980 | * @interruptible: do an interruptible wait (normally yes) |
991 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | 981 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
992 | * | 982 | * |
983 | * Note: It is of utmost importance that the passed in seqno and reset_counter | ||
984 | * values have been read by the caller in an smp safe manner. Where read-side | ||
985 | * locks are involved, it is sufficient to read the reset_counter before | ||
986 | * unlocking the lock that protects the seqno. For lockless tricks, the | ||
987 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be | ||
988 | * inserted. | ||
989 | * | ||
993 | * Returns 0 if the seqno was found within the alloted time. Else returns the | 990 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
994 | * errno with remaining time filled in timeout argument. | 991 | * errno with remaining time filled in timeout argument. |
995 | */ | 992 | */ |
996 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | 993 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
994 | unsigned reset_counter, | ||
997 | bool interruptible, struct timespec *timeout) | 995 | bool interruptible, struct timespec *timeout) |
998 | { | 996 | { |
999 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 997 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
@@ -1023,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1023 | 1021 | ||
1024 | #define EXIT_COND \ | 1022 | #define EXIT_COND \ |
1025 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ | 1023 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1026 | atomic_read(&dev_priv->mm.wedged)) | 1024 | i915_reset_in_progress(&dev_priv->gpu_error) || \ |
1025 | reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | ||
1027 | do { | 1026 | do { |
1028 | if (interruptible) | 1027 | if (interruptible) |
1029 | end = wait_event_interruptible_timeout(ring->irq_queue, | 1028 | end = wait_event_interruptible_timeout(ring->irq_queue, |
@@ -1033,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1033 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | 1032 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
1034 | timeout_jiffies); | 1033 | timeout_jiffies); |
1035 | 1034 | ||
1036 | ret = i915_gem_check_wedge(dev_priv, interruptible); | 1035 | /* We need to check whether any gpu reset happened in between |
1036 | * the caller grabbing the seqno and now ... */ | ||
1037 | if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | ||
1038 | end = -EAGAIN; | ||
1039 | |||
1040 | /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely | ||
1041 | * gone. */ | ||
1042 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); | ||
1037 | if (ret) | 1043 | if (ret) |
1038 | end = ret; | 1044 | end = ret; |
1039 | } while (end == 0 && wait_forever); | 1045 | } while (end == 0 && wait_forever); |
@@ -1079,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |||
1079 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1085 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1080 | BUG_ON(seqno == 0); | 1086 | BUG_ON(seqno == 0); |
1081 | 1087 | ||
1082 | ret = i915_gem_check_wedge(dev_priv, interruptible); | 1088 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
1083 | if (ret) | 1089 | if (ret) |
1084 | return ret; | 1090 | return ret; |
1085 | 1091 | ||
@@ -1087,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |||
1087 | if (ret) | 1093 | if (ret) |
1088 | return ret; | 1094 | return ret; |
1089 | 1095 | ||
1090 | return __wait_seqno(ring, seqno, interruptible, NULL); | 1096 | return __wait_seqno(ring, seqno, |
1097 | atomic_read(&dev_priv->gpu_error.reset_counter), | ||
1098 | interruptible, NULL); | ||
1091 | } | 1099 | } |
1092 | 1100 | ||
1093 | /** | 1101 | /** |
@@ -1134,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1134 | struct drm_device *dev = obj->base.dev; | 1142 | struct drm_device *dev = obj->base.dev; |
1135 | struct drm_i915_private *dev_priv = dev->dev_private; | 1143 | struct drm_i915_private *dev_priv = dev->dev_private; |
1136 | struct intel_ring_buffer *ring = obj->ring; | 1144 | struct intel_ring_buffer *ring = obj->ring; |
1145 | unsigned reset_counter; | ||
1137 | u32 seqno; | 1146 | u32 seqno; |
1138 | int ret; | 1147 | int ret; |
1139 | 1148 | ||
@@ -1144,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1144 | if (seqno == 0) | 1153 | if (seqno == 0) |
1145 | return 0; | 1154 | return 0; |
1146 | 1155 | ||
1147 | ret = i915_gem_check_wedge(dev_priv, true); | 1156 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); |
1148 | if (ret) | 1157 | if (ret) |
1149 | return ret; | 1158 | return ret; |
1150 | 1159 | ||
@@ -1152,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1152 | if (ret) | 1161 | if (ret) |
1153 | return ret; | 1162 | return ret; |
1154 | 1163 | ||
1164 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
1155 | mutex_unlock(&dev->struct_mutex); | 1165 | mutex_unlock(&dev->struct_mutex); |
1156 | ret = __wait_seqno(ring, seqno, true, NULL); | 1166 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
1157 | mutex_lock(&dev->struct_mutex); | 1167 | mutex_lock(&dev->struct_mutex); |
1158 | 1168 | ||
1159 | i915_gem_retire_requests_ring(ring); | 1169 | i915_gem_retire_requests_ring(ring); |
@@ -1362,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1362 | 1372 | ||
1363 | obj->fault_mappable = true; | 1373 | obj->fault_mappable = true; |
1364 | 1374 | ||
1365 | pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + | 1375 | pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + |
1366 | page_offset; | 1376 | page_offset; |
1367 | 1377 | ||
1368 | /* Finally, remap it using the new GTT offset */ | 1378 | /* Finally, remap it using the new GTT offset */ |
@@ -1377,7 +1387,7 @@ out: | |||
1377 | /* If this -EIO is due to a gpu hang, give the reset code a | 1387 | /* If this -EIO is due to a gpu hang, give the reset code a |
1378 | * chance to clean up the mess. Otherwise return the proper | 1388 | * chance to clean up the mess. Otherwise return the proper |
1379 | * SIGBUS. */ | 1389 | * SIGBUS. */ |
1380 | if (!atomic_read(&dev_priv->mm.wedged)) | 1390 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
1381 | return VM_FAULT_SIGBUS; | 1391 | return VM_FAULT_SIGBUS; |
1382 | case -EAGAIN: | 1392 | case -EAGAIN: |
1383 | /* Give the error handler a chance to run and move the | 1393 | /* Give the error handler a chance to run and move the |
@@ -1435,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |||
1435 | obj->fault_mappable = false; | 1445 | obj->fault_mappable = false; |
1436 | } | 1446 | } |
1437 | 1447 | ||
1438 | static uint32_t | 1448 | uint32_t |
1439 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | 1449 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1440 | { | 1450 | { |
1441 | uint32_t gtt_size; | 1451 | uint32_t gtt_size; |
@@ -1463,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | |||
1463 | * Return the required GTT alignment for an object, taking into account | 1473 | * Return the required GTT alignment for an object, taking into account |
1464 | * potential fence register mapping. | 1474 | * potential fence register mapping. |
1465 | */ | 1475 | */ |
1466 | static uint32_t | 1476 | uint32_t |
1467 | i915_gem_get_gtt_alignment(struct drm_device *dev, | 1477 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
1468 | uint32_t size, | 1478 | int tiling_mode, bool fenced) |
1469 | int tiling_mode) | ||
1470 | { | 1479 | { |
1471 | /* | 1480 | /* |
1472 | * Minimum alignment is 4k (GTT page size), but might be greater | 1481 | * Minimum alignment is 4k (GTT page size), but might be greater |
1473 | * if a fence register is needed for the object. | 1482 | * if a fence register is needed for the object. |
1474 | */ | 1483 | */ |
1475 | if (INTEL_INFO(dev)->gen >= 4 || | 1484 | if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || |
1476 | tiling_mode == I915_TILING_NONE) | 1485 | tiling_mode == I915_TILING_NONE) |
1477 | return 4096; | 1486 | return 4096; |
1478 | 1487 | ||
@@ -1483,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, | |||
1483 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | 1492 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1484 | } | 1493 | } |
1485 | 1494 | ||
1486 | /** | ||
1487 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an | ||
1488 | * unfenced object | ||
1489 | * @dev: the device | ||
1490 | * @size: size of the object | ||
1491 | * @tiling_mode: tiling mode of the object | ||
1492 | * | ||
1493 | * Return the required GTT alignment for an object, only taking into account | ||
1494 | * unfenced tiled surface requirements. | ||
1495 | */ | ||
1496 | uint32_t | ||
1497 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | ||
1498 | uint32_t size, | ||
1499 | int tiling_mode) | ||
1500 | { | ||
1501 | /* | ||
1502 | * Minimum alignment is 4k (GTT page size) for sane hw. | ||
1503 | */ | ||
1504 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | ||
1505 | tiling_mode == I915_TILING_NONE) | ||
1506 | return 4096; | ||
1507 | |||
1508 | /* Previous hardware however needs to be aligned to a power-of-two | ||
1509 | * tile height. The simplest method for determining this is to reuse | ||
1510 | * the power-of-tile object size. | ||
1511 | */ | ||
1512 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | ||
1513 | } | ||
1514 | |||
1515 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | 1495 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) |
1516 | { | 1496 | { |
1517 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 1497 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
@@ -1574,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1574 | goto unlock; | 1554 | goto unlock; |
1575 | } | 1555 | } |
1576 | 1556 | ||
1577 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | 1557 | if (obj->base.size > dev_priv->gtt.mappable_end) { |
1578 | ret = -E2BIG; | 1558 | ret = -E2BIG; |
1579 | goto out; | 1559 | goto out; |
1580 | } | 1560 | } |
@@ -1692,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
1692 | kfree(obj->pages); | 1672 | kfree(obj->pages); |
1693 | } | 1673 | } |
1694 | 1674 | ||
1695 | static int | 1675 | int |
1696 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | 1676 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
1697 | { | 1677 | { |
1698 | const struct drm_i915_gem_object_ops *ops = obj->ops; | 1678 | const struct drm_i915_gem_object_ops *ops = obj->ops; |
@@ -1865,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |||
1865 | if (obj->pages) | 1845 | if (obj->pages) |
1866 | return 0; | 1846 | return 0; |
1867 | 1847 | ||
1848 | if (obj->madv != I915_MADV_WILLNEED) { | ||
1849 | DRM_ERROR("Attempting to obtain a purgeable object\n"); | ||
1850 | return -EINVAL; | ||
1851 | } | ||
1852 | |||
1868 | BUG_ON(obj->pages_pin_count); | 1853 | BUG_ON(obj->pages_pin_count); |
1869 | 1854 | ||
1870 | ret = ops->get_pages(obj); | 1855 | ret = ops->get_pages(obj); |
@@ -1921,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1921 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 1906 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1922 | BUG_ON(!obj->active); | 1907 | BUG_ON(!obj->active); |
1923 | 1908 | ||
1924 | if (obj->pin_count) /* are we a framebuffer? */ | ||
1925 | intel_mark_fb_idle(obj); | ||
1926 | |||
1927 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1909 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1928 | 1910 | ||
1929 | list_del_init(&obj->ring_list); | 1911 | list_del_init(&obj->ring_list); |
@@ -2075,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
2075 | 2057 | ||
2076 | if (!dev_priv->mm.suspended) { | 2058 | if (!dev_priv->mm.suspended) { |
2077 | if (i915_enable_hangcheck) { | 2059 | if (i915_enable_hangcheck) { |
2078 | mod_timer(&dev_priv->hangcheck_timer, | 2060 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
2079 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 2061 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
2080 | } | 2062 | } |
2081 | if (was_empty) { | 2063 | if (was_empty) { |
@@ -2340,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | |||
2340 | int | 2322 | int |
2341 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | 2323 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
2342 | { | 2324 | { |
2325 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2343 | struct drm_i915_gem_wait *args = data; | 2326 | struct drm_i915_gem_wait *args = data; |
2344 | struct drm_i915_gem_object *obj; | 2327 | struct drm_i915_gem_object *obj; |
2345 | struct intel_ring_buffer *ring = NULL; | 2328 | struct intel_ring_buffer *ring = NULL; |
2346 | struct timespec timeout_stack, *timeout = NULL; | 2329 | struct timespec timeout_stack, *timeout = NULL; |
2330 | unsigned reset_counter; | ||
2347 | u32 seqno = 0; | 2331 | u32 seqno = 0; |
2348 | int ret = 0; | 2332 | int ret = 0; |
2349 | 2333 | ||
@@ -2384,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2384 | } | 2368 | } |
2385 | 2369 | ||
2386 | drm_gem_object_unreference(&obj->base); | 2370 | drm_gem_object_unreference(&obj->base); |
2371 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
2387 | mutex_unlock(&dev->struct_mutex); | 2372 | mutex_unlock(&dev->struct_mutex); |
2388 | 2373 | ||
2389 | ret = __wait_seqno(ring, seqno, true, timeout); | 2374 | ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); |
2390 | if (timeout) { | 2375 | if (timeout) { |
2391 | WARN_ON(!timespec_valid(timeout)); | 2376 | WARN_ON(!timespec_valid(timeout)); |
2392 | args->timeout_ns = timespec_to_ns(timeout); | 2377 | args->timeout_ns = timespec_to_ns(timeout); |
@@ -2450,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) | |||
2450 | { | 2435 | { |
2451 | u32 old_write_domain, old_read_domains; | 2436 | u32 old_write_domain, old_read_domains; |
2452 | 2437 | ||
2453 | /* Act a barrier for all accesses through the GTT */ | ||
2454 | mb(); | ||
2455 | |||
2456 | /* Force a pagefault for domain tracking on next user access */ | 2438 | /* Force a pagefault for domain tracking on next user access */ |
2457 | i915_gem_release_mmap(obj); | 2439 | i915_gem_release_mmap(obj); |
2458 | 2440 | ||
2459 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | 2441 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
2460 | return; | 2442 | return; |
2461 | 2443 | ||
2444 | /* Wait for any direct GTT access to complete */ | ||
2445 | mb(); | ||
2446 | |||
2462 | old_read_domains = obj->base.read_domains; | 2447 | old_read_domains = obj->base.read_domains; |
2463 | old_write_domain = obj->base.write_domain; | 2448 | old_write_domain = obj->base.write_domain; |
2464 | 2449 | ||
@@ -2477,7 +2462,7 @@ int | |||
2477 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) | 2462 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2478 | { | 2463 | { |
2479 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | 2464 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2480 | int ret = 0; | 2465 | int ret; |
2481 | 2466 | ||
2482 | if (obj->gtt_space == NULL) | 2467 | if (obj->gtt_space == NULL) |
2483 | return 0; | 2468 | return 0; |
@@ -2544,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev) | |||
2544 | return 0; | 2529 | return 0; |
2545 | } | 2530 | } |
2546 | 2531 | ||
2547 | static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, | ||
2548 | struct drm_i915_gem_object *obj) | ||
2549 | { | ||
2550 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2551 | uint64_t val; | ||
2552 | |||
2553 | if (obj) { | ||
2554 | u32 size = obj->gtt_space->size; | ||
2555 | |||
2556 | val = (uint64_t)((obj->gtt_offset + size - 4096) & | ||
2557 | 0xfffff000) << 32; | ||
2558 | val |= obj->gtt_offset & 0xfffff000; | ||
2559 | val |= (uint64_t)((obj->stride / 128) - 1) << | ||
2560 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | ||
2561 | |||
2562 | if (obj->tiling_mode == I915_TILING_Y) | ||
2563 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | ||
2564 | val |= I965_FENCE_REG_VALID; | ||
2565 | } else | ||
2566 | val = 0; | ||
2567 | |||
2568 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); | ||
2569 | POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); | ||
2570 | } | ||
2571 | |||
2572 | static void i965_write_fence_reg(struct drm_device *dev, int reg, | 2532 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2573 | struct drm_i915_gem_object *obj) | 2533 | struct drm_i915_gem_object *obj) |
2574 | { | 2534 | { |
2575 | drm_i915_private_t *dev_priv = dev->dev_private; | 2535 | drm_i915_private_t *dev_priv = dev->dev_private; |
2536 | int fence_reg; | ||
2537 | int fence_pitch_shift; | ||
2576 | uint64_t val; | 2538 | uint64_t val; |
2577 | 2539 | ||
2540 | if (INTEL_INFO(dev)->gen >= 6) { | ||
2541 | fence_reg = FENCE_REG_SANDYBRIDGE_0; | ||
2542 | fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; | ||
2543 | } else { | ||
2544 | fence_reg = FENCE_REG_965_0; | ||
2545 | fence_pitch_shift = I965_FENCE_PITCH_SHIFT; | ||
2546 | } | ||
2547 | |||
2578 | if (obj) { | 2548 | if (obj) { |
2579 | u32 size = obj->gtt_space->size; | 2549 | u32 size = obj->gtt_space->size; |
2580 | 2550 | ||
2581 | val = (uint64_t)((obj->gtt_offset + size - 4096) & | 2551 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2582 | 0xfffff000) << 32; | 2552 | 0xfffff000) << 32; |
2583 | val |= obj->gtt_offset & 0xfffff000; | 2553 | val |= obj->gtt_offset & 0xfffff000; |
2584 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 2554 | val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; |
2585 | if (obj->tiling_mode == I915_TILING_Y) | 2555 | if (obj->tiling_mode == I915_TILING_Y) |
2586 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2556 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2587 | val |= I965_FENCE_REG_VALID; | 2557 | val |= I965_FENCE_REG_VALID; |
2588 | } else | 2558 | } else |
2589 | val = 0; | 2559 | val = 0; |
2590 | 2560 | ||
2591 | I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); | 2561 | fence_reg += reg * 8; |
2592 | POSTING_READ(FENCE_REG_965_0 + reg * 8); | 2562 | I915_WRITE64(fence_reg, val); |
2563 | POSTING_READ(fence_reg); | ||
2593 | } | 2564 | } |
2594 | 2565 | ||
2595 | static void i915_write_fence_reg(struct drm_device *dev, int reg, | 2566 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
@@ -2668,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, | |||
2668 | POSTING_READ(FENCE_REG_830_0 + reg * 4); | 2639 | POSTING_READ(FENCE_REG_830_0 + reg * 4); |
2669 | } | 2640 | } |
2670 | 2641 | ||
2642 | inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) | ||
2643 | { | ||
2644 | return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; | ||
2645 | } | ||
2646 | |||
2671 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | 2647 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
2672 | struct drm_i915_gem_object *obj) | 2648 | struct drm_i915_gem_object *obj) |
2673 | { | 2649 | { |
2650 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2651 | |||
2652 | /* Ensure that all CPU reads are completed before installing a fence | ||
2653 | * and all writes before removing the fence. | ||
2654 | */ | ||
2655 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | ||
2656 | mb(); | ||
2657 | |||
2674 | switch (INTEL_INFO(dev)->gen) { | 2658 | switch (INTEL_INFO(dev)->gen) { |
2675 | case 7: | 2659 | case 7: |
2676 | case 6: sandybridge_write_fence_reg(dev, reg, obj); break; | 2660 | case 6: |
2677 | case 5: | 2661 | case 5: |
2678 | case 4: i965_write_fence_reg(dev, reg, obj); break; | 2662 | case 4: i965_write_fence_reg(dev, reg, obj); break; |
2679 | case 3: i915_write_fence_reg(dev, reg, obj); break; | 2663 | case 3: i915_write_fence_reg(dev, reg, obj); break; |
2680 | case 2: i830_write_fence_reg(dev, reg, obj); break; | 2664 | case 2: i830_write_fence_reg(dev, reg, obj); break; |
2681 | default: BUG(); | 2665 | default: BUG(); |
2682 | } | 2666 | } |
2667 | |||
2668 | /* And similarly be paranoid that no direct access to this region | ||
2669 | * is reordered to before the fence is installed. | ||
2670 | */ | ||
2671 | if (i915_gem_object_needs_mb(obj)) | ||
2672 | mb(); | ||
2683 | } | 2673 | } |
2684 | 2674 | ||
2685 | static inline int fence_number(struct drm_i915_private *dev_priv, | 2675 | static inline int fence_number(struct drm_i915_private *dev_priv, |
@@ -2709,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
2709 | } | 2699 | } |
2710 | 2700 | ||
2711 | static int | 2701 | static int |
2712 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) | 2702 | i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) |
2713 | { | 2703 | { |
2714 | if (obj->last_fenced_seqno) { | 2704 | if (obj->last_fenced_seqno) { |
2715 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); | 2705 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
@@ -2719,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) | |||
2719 | obj->last_fenced_seqno = 0; | 2709 | obj->last_fenced_seqno = 0; |
2720 | } | 2710 | } |
2721 | 2711 | ||
2722 | /* Ensure that all CPU reads are completed before installing a fence | ||
2723 | * and all writes before removing the fence. | ||
2724 | */ | ||
2725 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) | ||
2726 | mb(); | ||
2727 | |||
2728 | obj->fenced_gpu_access = false; | 2712 | obj->fenced_gpu_access = false; |
2729 | return 0; | 2713 | return 0; |
2730 | } | 2714 | } |
@@ -2735,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) | |||
2735 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2719 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2736 | int ret; | 2720 | int ret; |
2737 | 2721 | ||
2738 | ret = i915_gem_object_flush_fence(obj); | 2722 | ret = i915_gem_object_wait_fence(obj); |
2739 | if (ret) | 2723 | if (ret) |
2740 | return ret; | 2724 | return ret; |
2741 | 2725 | ||
@@ -2809,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2809 | * will need to serialise the write to the associated fence register? | 2793 | * will need to serialise the write to the associated fence register? |
2810 | */ | 2794 | */ |
2811 | if (obj->fence_dirty) { | 2795 | if (obj->fence_dirty) { |
2812 | ret = i915_gem_object_flush_fence(obj); | 2796 | ret = i915_gem_object_wait_fence(obj); |
2813 | if (ret) | 2797 | if (ret) |
2814 | return ret; | 2798 | return ret; |
2815 | } | 2799 | } |
@@ -2830,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2830 | if (reg->obj) { | 2814 | if (reg->obj) { |
2831 | struct drm_i915_gem_object *old = reg->obj; | 2815 | struct drm_i915_gem_object *old = reg->obj; |
2832 | 2816 | ||
2833 | ret = i915_gem_object_flush_fence(old); | 2817 | ret = i915_gem_object_wait_fence(old); |
2834 | if (ret) | 2818 | if (ret) |
2835 | return ret; | 2819 | return ret; |
2836 | 2820 | ||
@@ -2931,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2931 | bool mappable, fenceable; | 2915 | bool mappable, fenceable; |
2932 | int ret; | 2916 | int ret; |
2933 | 2917 | ||
2934 | if (obj->madv != I915_MADV_WILLNEED) { | ||
2935 | DRM_ERROR("Attempting to bind a purgeable object\n"); | ||
2936 | return -EINVAL; | ||
2937 | } | ||
2938 | |||
2939 | fence_size = i915_gem_get_gtt_size(dev, | 2918 | fence_size = i915_gem_get_gtt_size(dev, |
2940 | obj->base.size, | 2919 | obj->base.size, |
2941 | obj->tiling_mode); | 2920 | obj->tiling_mode); |
2942 | fence_alignment = i915_gem_get_gtt_alignment(dev, | 2921 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
2943 | obj->base.size, | 2922 | obj->base.size, |
2944 | obj->tiling_mode); | 2923 | obj->tiling_mode, true); |
2945 | unfenced_alignment = | 2924 | unfenced_alignment = |
2946 | i915_gem_get_unfenced_gtt_alignment(dev, | 2925 | i915_gem_get_gtt_alignment(dev, |
2947 | obj->base.size, | 2926 | obj->base.size, |
2948 | obj->tiling_mode); | 2927 | obj->tiling_mode, false); |
2949 | 2928 | ||
2950 | if (alignment == 0) | 2929 | if (alignment == 0) |
2951 | alignment = map_and_fenceable ? fence_alignment : | 2930 | alignment = map_and_fenceable ? fence_alignment : |
@@ -2961,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2961 | * before evicting everything in a vain attempt to find space. | 2940 | * before evicting everything in a vain attempt to find space. |
2962 | */ | 2941 | */ |
2963 | if (obj->base.size > | 2942 | if (obj->base.size > |
2964 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | 2943 | (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { |
2965 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 2944 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2966 | return -E2BIG; | 2945 | return -E2BIG; |
2967 | } | 2946 | } |
@@ -2982,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
2982 | if (map_and_fenceable) | 2961 | if (map_and_fenceable) |
2983 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, | 2962 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
2984 | size, alignment, obj->cache_level, | 2963 | size, alignment, obj->cache_level, |
2985 | 0, dev_priv->mm.gtt_mappable_end); | 2964 | 0, dev_priv->gtt.mappable_end); |
2986 | else | 2965 | else |
2987 | ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, | 2966 | ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
2988 | size, alignment, obj->cache_level); | 2967 | size, alignment, obj->cache_level); |
@@ -3022,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
3022 | (node->start & (fence_alignment - 1)) == 0; | 3001 | (node->start & (fence_alignment - 1)) == 0; |
3023 | 3002 | ||
3024 | mappable = | 3003 | mappable = |
3025 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; | 3004 | obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; |
3026 | 3005 | ||
3027 | obj->map_and_fenceable = mappable && fenceable; | 3006 | obj->map_and_fenceable = mappable && fenceable; |
3028 | 3007 | ||
@@ -3130,6 +3109,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3130 | 3109 | ||
3131 | i915_gem_object_flush_cpu_write_domain(obj); | 3110 | i915_gem_object_flush_cpu_write_domain(obj); |
3132 | 3111 | ||
3112 | /* Serialise direct access to this object with the barriers for | ||
3113 | * coherent writes from the GPU, by effectively invalidating the | ||
3114 | * GTT domain upon first access. | ||
3115 | */ | ||
3116 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) | ||
3117 | mb(); | ||
3118 | |||
3133 | old_write_domain = obj->base.write_domain; | 3119 | old_write_domain = obj->base.write_domain; |
3134 | old_read_domains = obj->base.read_domains; | 3120 | old_read_domains = obj->base.read_domains; |
3135 | 3121 | ||
@@ -3436,11 +3422,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3436 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 3422 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); |
3437 | struct drm_i915_gem_request *request; | 3423 | struct drm_i915_gem_request *request; |
3438 | struct intel_ring_buffer *ring = NULL; | 3424 | struct intel_ring_buffer *ring = NULL; |
3425 | unsigned reset_counter; | ||
3439 | u32 seqno = 0; | 3426 | u32 seqno = 0; |
3440 | int ret; | 3427 | int ret; |
3441 | 3428 | ||
3442 | if (atomic_read(&dev_priv->mm.wedged)) | 3429 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
3443 | return -EIO; | 3430 | if (ret) |
3431 | return ret; | ||
3432 | |||
3433 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); | ||
3434 | if (ret) | ||
3435 | return ret; | ||
3444 | 3436 | ||
3445 | spin_lock(&file_priv->mm.lock); | 3437 | spin_lock(&file_priv->mm.lock); |
3446 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { | 3438 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
@@ -3450,12 +3442,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3450 | ring = request->ring; | 3442 | ring = request->ring; |
3451 | seqno = request->seqno; | 3443 | seqno = request->seqno; |
3452 | } | 3444 | } |
3445 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | ||
3453 | spin_unlock(&file_priv->mm.lock); | 3446 | spin_unlock(&file_priv->mm.lock); |
3454 | 3447 | ||
3455 | if (seqno == 0) | 3448 | if (seqno == 0) |
3456 | return 0; | 3449 | return 0; |
3457 | 3450 | ||
3458 | ret = __wait_seqno(ring, seqno, true, NULL); | 3451 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
3459 | if (ret == 0) | 3452 | if (ret == 0) |
3460 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | 3453 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
3461 | 3454 | ||
@@ -3853,7 +3846,7 @@ i915_gem_idle(struct drm_device *dev) | |||
3853 | * And not confound mm.suspended! | 3846 | * And not confound mm.suspended! |
3854 | */ | 3847 | */ |
3855 | dev_priv->mm.suspended = 1; | 3848 | dev_priv->mm.suspended = 1; |
3856 | del_timer_sync(&dev_priv->hangcheck_timer); | 3849 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
3857 | 3850 | ||
3858 | i915_kernel_lost_context(dev); | 3851 | i915_kernel_lost_context(dev); |
3859 | i915_gem_cleanup_ringbuffer(dev); | 3852 | i915_gem_cleanup_ringbuffer(dev); |
@@ -3953,8 +3946,6 @@ i915_gem_init_hw(struct drm_device *dev) | |||
3953 | 3946 | ||
3954 | i915_gem_init_swizzling(dev); | 3947 | i915_gem_init_swizzling(dev); |
3955 | 3948 | ||
3956 | dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000; | ||
3957 | |||
3958 | ret = intel_init_render_ring_buffer(dev); | 3949 | ret = intel_init_render_ring_buffer(dev); |
3959 | if (ret) | 3950 | if (ret) |
3960 | return ret; | 3951 | return ret; |
@@ -3971,6 +3962,10 @@ i915_gem_init_hw(struct drm_device *dev) | |||
3971 | goto cleanup_bsd_ring; | 3962 | goto cleanup_bsd_ring; |
3972 | } | 3963 | } |
3973 | 3964 | ||
3965 | ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); | ||
3966 | if (ret) | ||
3967 | return ret; | ||
3968 | |||
3974 | /* | 3969 | /* |
3975 | * XXX: There was some w/a described somewhere suggesting loading | 3970 | * XXX: There was some w/a described somewhere suggesting loading |
3976 | * contexts before PPGTT. | 3971 | * contexts before PPGTT. |
@@ -4028,9 +4023,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4028 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4023 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4029 | return 0; | 4024 | return 0; |
4030 | 4025 | ||
4031 | if (atomic_read(&dev_priv->mm.wedged)) { | 4026 | if (i915_reset_in_progress(&dev_priv->gpu_error)) { |
4032 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 4027 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
4033 | atomic_set(&dev_priv->mm.wedged, 0); | 4028 | atomic_set(&dev_priv->gpu_error.reset_counter, 0); |
4034 | } | 4029 | } |
4035 | 4030 | ||
4036 | mutex_lock(&dev->struct_mutex); | 4031 | mutex_lock(&dev->struct_mutex); |
@@ -4114,7 +4109,7 @@ i915_gem_load(struct drm_device *dev) | |||
4114 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4109 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4115 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4110 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4116 | i915_gem_retire_work_handler); | 4111 | i915_gem_retire_work_handler); |
4117 | init_completion(&dev_priv->error_completion); | 4112 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
4118 | 4113 | ||
4119 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | 4114 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4120 | if (IS_GEN3(dev)) { | 4115 | if (IS_GEN3(dev)) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 776a3225184c..c86d5d9356fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
80 | if (mappable) | 80 | if (mappable) |
81 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, | 81 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, |
82 | min_size, alignment, cache_level, | 82 | min_size, alignment, cache_level, |
83 | 0, dev_priv->mm.gtt_mappable_end); | 83 | 0, dev_priv->gtt.mappable_end); |
84 | else | 84 | else |
85 | drm_mm_init_scan(&dev_priv->mm.gtt_space, | 85 | drm_mm_init_scan(&dev_priv->mm.gtt_space, |
86 | min_size, alignment, cache_level); | 86 | min_size, alignment, cache_level); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 163bb52bd3b3..27269103b621 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -34,61 +34,133 @@ | |||
34 | #include <linux/dma_remapping.h> | 34 | #include <linux/dma_remapping.h> |
35 | 35 | ||
36 | struct eb_objects { | 36 | struct eb_objects { |
37 | struct list_head objects; | ||
37 | int and; | 38 | int and; |
38 | struct hlist_head buckets[0]; | 39 | union { |
40 | struct drm_i915_gem_object *lut[0]; | ||
41 | struct hlist_head buckets[0]; | ||
42 | }; | ||
39 | }; | 43 | }; |
40 | 44 | ||
41 | static struct eb_objects * | 45 | static struct eb_objects * |
42 | eb_create(int size) | 46 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
43 | { | 47 | { |
44 | struct eb_objects *eb; | 48 | struct eb_objects *eb = NULL; |
45 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | 49 | |
46 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); | 50 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
47 | while (count > size) | 51 | int size = args->buffer_count; |
48 | count >>= 1; | 52 | size *= sizeof(struct drm_i915_gem_object *); |
49 | eb = kzalloc(count*sizeof(struct hlist_head) + | 53 | size += sizeof(struct eb_objects); |
50 | sizeof(struct eb_objects), | 54 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
51 | GFP_KERNEL); | 55 | } |
52 | if (eb == NULL) | 56 | |
53 | return eb; | 57 | if (eb == NULL) { |
54 | 58 | int size = args->buffer_count; | |
55 | eb->and = count - 1; | 59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
60 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); | ||
61 | while (count > 2*size) | ||
62 | count >>= 1; | ||
63 | eb = kzalloc(count*sizeof(struct hlist_head) + | ||
64 | sizeof(struct eb_objects), | ||
65 | GFP_TEMPORARY); | ||
66 | if (eb == NULL) | ||
67 | return eb; | ||
68 | |||
69 | eb->and = count - 1; | ||
70 | } else | ||
71 | eb->and = -args->buffer_count; | ||
72 | |||
73 | INIT_LIST_HEAD(&eb->objects); | ||
56 | return eb; | 74 | return eb; |
57 | } | 75 | } |
58 | 76 | ||
59 | static void | 77 | static void |
60 | eb_reset(struct eb_objects *eb) | 78 | eb_reset(struct eb_objects *eb) |
61 | { | 79 | { |
62 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); | 80 | if (eb->and >= 0) |
81 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); | ||
63 | } | 82 | } |
64 | 83 | ||
65 | static void | 84 | static int |
66 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) | 85 | eb_lookup_objects(struct eb_objects *eb, |
86 | struct drm_i915_gem_exec_object2 *exec, | ||
87 | const struct drm_i915_gem_execbuffer2 *args, | ||
88 | struct drm_file *file) | ||
67 | { | 89 | { |
68 | hlist_add_head(&obj->exec_node, | 90 | int i; |
69 | &eb->buckets[obj->exec_handle & eb->and]); | 91 | |
92 | spin_lock(&file->table_lock); | ||
93 | for (i = 0; i < args->buffer_count; i++) { | ||
94 | struct drm_i915_gem_object *obj; | ||
95 | |||
96 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); | ||
97 | if (obj == NULL) { | ||
98 | spin_unlock(&file->table_lock); | ||
99 | DRM_DEBUG("Invalid object handle %d at index %d\n", | ||
100 | exec[i].handle, i); | ||
101 | return -ENOENT; | ||
102 | } | ||
103 | |||
104 | if (!list_empty(&obj->exec_list)) { | ||
105 | spin_unlock(&file->table_lock); | ||
106 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", | ||
107 | obj, exec[i].handle, i); | ||
108 | return -EINVAL; | ||
109 | } | ||
110 | |||
111 | drm_gem_object_reference(&obj->base); | ||
112 | list_add_tail(&obj->exec_list, &eb->objects); | ||
113 | |||
114 | obj->exec_entry = &exec[i]; | ||
115 | if (eb->and < 0) { | ||
116 | eb->lut[i] = obj; | ||
117 | } else { | ||
118 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; | ||
119 | obj->exec_handle = handle; | ||
120 | hlist_add_head(&obj->exec_node, | ||
121 | &eb->buckets[handle & eb->and]); | ||
122 | } | ||
123 | } | ||
124 | spin_unlock(&file->table_lock); | ||
125 | |||
126 | return 0; | ||
70 | } | 127 | } |
71 | 128 | ||
72 | static struct drm_i915_gem_object * | 129 | static struct drm_i915_gem_object * |
73 | eb_get_object(struct eb_objects *eb, unsigned long handle) | 130 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
74 | { | 131 | { |
75 | struct hlist_head *head; | 132 | if (eb->and < 0) { |
76 | struct hlist_node *node; | 133 | if (handle >= -eb->and) |
77 | struct drm_i915_gem_object *obj; | 134 | return NULL; |
135 | return eb->lut[handle]; | ||
136 | } else { | ||
137 | struct hlist_head *head; | ||
138 | struct hlist_node *node; | ||
78 | 139 | ||
79 | head = &eb->buckets[handle & eb->and]; | 140 | head = &eb->buckets[handle & eb->and]; |
80 | hlist_for_each(node, head) { | 141 | hlist_for_each(node, head) { |
81 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); | 142 | struct drm_i915_gem_object *obj; |
82 | if (obj->exec_handle == handle) | ||
83 | return obj; | ||
84 | } | ||
85 | 143 | ||
86 | return NULL; | 144 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
145 | if (obj->exec_handle == handle) | ||
146 | return obj; | ||
147 | } | ||
148 | return NULL; | ||
149 | } | ||
87 | } | 150 | } |
88 | 151 | ||
89 | static void | 152 | static void |
90 | eb_destroy(struct eb_objects *eb) | 153 | eb_destroy(struct eb_objects *eb) |
91 | { | 154 | { |
155 | while (!list_empty(&eb->objects)) { | ||
156 | struct drm_i915_gem_object *obj; | ||
157 | |||
158 | obj = list_first_entry(&eb->objects, | ||
159 | struct drm_i915_gem_object, | ||
160 | exec_list); | ||
161 | list_del_init(&obj->exec_list); | ||
162 | drm_gem_object_unreference(&obj->base); | ||
163 | } | ||
92 | kfree(eb); | 164 | kfree(eb); |
93 | } | 165 | } |
94 | 166 | ||
@@ -209,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
209 | 281 | ||
210 | /* Map the page containing the relocation we're going to perform. */ | 282 | /* Map the page containing the relocation we're going to perform. */ |
211 | reloc->offset += obj->gtt_offset; | 283 | reloc->offset += obj->gtt_offset; |
212 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 284 | reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
213 | reloc->offset & PAGE_MASK); | 285 | reloc->offset & PAGE_MASK); |
214 | reloc_entry = (uint32_t __iomem *) | 286 | reloc_entry = (uint32_t __iomem *) |
215 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | 287 | (reloc_page + (reloc->offset & ~PAGE_MASK)); |
@@ -288,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, | |||
288 | 360 | ||
289 | static int | 361 | static int |
290 | i915_gem_execbuffer_relocate(struct drm_device *dev, | 362 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
291 | struct eb_objects *eb, | 363 | struct eb_objects *eb) |
292 | struct list_head *objects) | ||
293 | { | 364 | { |
294 | struct drm_i915_gem_object *obj; | 365 | struct drm_i915_gem_object *obj; |
295 | int ret = 0; | 366 | int ret = 0; |
@@ -302,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, | |||
302 | * lockdep complains vehemently. | 373 | * lockdep complains vehemently. |
303 | */ | 374 | */ |
304 | pagefault_disable(); | 375 | pagefault_disable(); |
305 | list_for_each_entry(obj, objects, exec_list) { | 376 | list_for_each_entry(obj, &eb->objects, exec_list) { |
306 | ret = i915_gem_execbuffer_relocate_object(obj, eb); | 377 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
307 | if (ret) | 378 | if (ret) |
308 | break; | 379 | break; |
@@ -324,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) | |||
324 | 395 | ||
325 | static int | 396 | static int |
326 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, | 397 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
327 | struct intel_ring_buffer *ring) | 398 | struct intel_ring_buffer *ring, |
399 | bool *need_reloc) | ||
328 | { | 400 | { |
329 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 401 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
330 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; | 402 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
@@ -365,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, | |||
365 | obj->has_aliasing_ppgtt_mapping = 1; | 437 | obj->has_aliasing_ppgtt_mapping = 1; |
366 | } | 438 | } |
367 | 439 | ||
368 | entry->offset = obj->gtt_offset; | 440 | if (entry->offset != obj->gtt_offset) { |
441 | entry->offset = obj->gtt_offset; | ||
442 | *need_reloc = true; | ||
443 | } | ||
444 | |||
445 | if (entry->flags & EXEC_OBJECT_WRITE) { | ||
446 | obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; | ||
447 | obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; | ||
448 | } | ||
449 | |||
450 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT && | ||
451 | !obj->has_global_gtt_mapping) | ||
452 | i915_gem_gtt_bind_object(obj, obj->cache_level); | ||
453 | |||
369 | return 0; | 454 | return 0; |
370 | } | 455 | } |
371 | 456 | ||
@@ -391,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) | |||
391 | static int | 476 | static int |
392 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | 477 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
393 | struct drm_file *file, | 478 | struct drm_file *file, |
394 | struct list_head *objects) | 479 | struct list_head *objects, |
480 | bool *need_relocs) | ||
395 | { | 481 | { |
396 | struct drm_i915_gem_object *obj; | 482 | struct drm_i915_gem_object *obj; |
397 | struct list_head ordered_objects; | 483 | struct list_head ordered_objects; |
@@ -419,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
419 | else | 505 | else |
420 | list_move_tail(&obj->exec_list, &ordered_objects); | 506 | list_move_tail(&obj->exec_list, &ordered_objects); |
421 | 507 | ||
422 | obj->base.pending_read_domains = 0; | 508 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
423 | obj->base.pending_write_domain = 0; | 509 | obj->base.pending_write_domain = 0; |
424 | obj->pending_fenced_gpu_access = false; | 510 | obj->pending_fenced_gpu_access = false; |
425 | } | 511 | } |
@@ -459,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
459 | (need_mappable && !obj->map_and_fenceable)) | 545 | (need_mappable && !obj->map_and_fenceable)) |
460 | ret = i915_gem_object_unbind(obj); | 546 | ret = i915_gem_object_unbind(obj); |
461 | else | 547 | else |
462 | ret = i915_gem_execbuffer_reserve_object(obj, ring); | 548 | ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
463 | if (ret) | 549 | if (ret) |
464 | goto err; | 550 | goto err; |
465 | } | 551 | } |
@@ -469,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, | |||
469 | if (obj->gtt_space) | 555 | if (obj->gtt_space) |
470 | continue; | 556 | continue; |
471 | 557 | ||
472 | ret = i915_gem_execbuffer_reserve_object(obj, ring); | 558 | ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
473 | if (ret) | 559 | if (ret) |
474 | goto err; | 560 | goto err; |
475 | } | 561 | } |
@@ -489,21 +575,22 @@ err: /* Decrement pin count for bound objects */ | |||
489 | 575 | ||
490 | static int | 576 | static int |
491 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | 577 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
578 | struct drm_i915_gem_execbuffer2 *args, | ||
492 | struct drm_file *file, | 579 | struct drm_file *file, |
493 | struct intel_ring_buffer *ring, | 580 | struct intel_ring_buffer *ring, |
494 | struct list_head *objects, | ||
495 | struct eb_objects *eb, | 581 | struct eb_objects *eb, |
496 | struct drm_i915_gem_exec_object2 *exec, | 582 | struct drm_i915_gem_exec_object2 *exec) |
497 | int count) | ||
498 | { | 583 | { |
499 | struct drm_i915_gem_relocation_entry *reloc; | 584 | struct drm_i915_gem_relocation_entry *reloc; |
500 | struct drm_i915_gem_object *obj; | 585 | struct drm_i915_gem_object *obj; |
586 | bool need_relocs; | ||
501 | int *reloc_offset; | 587 | int *reloc_offset; |
502 | int i, total, ret; | 588 | int i, total, ret; |
589 | int count = args->buffer_count; | ||
503 | 590 | ||
504 | /* We may process another execbuffer during the unlock... */ | 591 | /* We may process another execbuffer during the unlock... */ |
505 | while (!list_empty(objects)) { | 592 | while (!list_empty(&eb->objects)) { |
506 | obj = list_first_entry(objects, | 593 | obj = list_first_entry(&eb->objects, |
507 | struct drm_i915_gem_object, | 594 | struct drm_i915_gem_object, |
508 | exec_list); | 595 | exec_list); |
509 | list_del_init(&obj->exec_list); | 596 | list_del_init(&obj->exec_list); |
@@ -550,27 +637,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
550 | 637 | ||
551 | /* reacquire the objects */ | 638 | /* reacquire the objects */ |
552 | eb_reset(eb); | 639 | eb_reset(eb); |
553 | for (i = 0; i < count; i++) { | 640 | ret = eb_lookup_objects(eb, exec, args, file); |
554 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | 641 | if (ret) |
555 | exec[i].handle)); | 642 | goto err; |
556 | if (&obj->base == NULL) { | ||
557 | DRM_DEBUG("Invalid object handle %d at index %d\n", | ||
558 | exec[i].handle, i); | ||
559 | ret = -ENOENT; | ||
560 | goto err; | ||
561 | } | ||
562 | |||
563 | list_add_tail(&obj->exec_list, objects); | ||
564 | obj->exec_handle = exec[i].handle; | ||
565 | obj->exec_entry = &exec[i]; | ||
566 | eb_add_object(eb, obj); | ||
567 | } | ||
568 | 643 | ||
569 | ret = i915_gem_execbuffer_reserve(ring, file, objects); | 644 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
645 | ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); | ||
570 | if (ret) | 646 | if (ret) |
571 | goto err; | 647 | goto err; |
572 | 648 | ||
573 | list_for_each_entry(obj, objects, exec_list) { | 649 | list_for_each_entry(obj, &eb->objects, exec_list) { |
574 | int offset = obj->exec_entry - exec; | 650 | int offset = obj->exec_entry - exec; |
575 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, | 651 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
576 | reloc + reloc_offset[offset]); | 652 | reloc + reloc_offset[offset]); |
@@ -624,6 +700,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, | |||
624 | static bool | 700 | static bool |
625 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | 701 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
626 | { | 702 | { |
703 | if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) | ||
704 | return false; | ||
705 | |||
627 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; | 706 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
628 | } | 707 | } |
629 | 708 | ||
@@ -637,6 +716,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
637 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | 716 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
638 | int length; /* limited by fault_in_pages_readable() */ | 717 | int length; /* limited by fault_in_pages_readable() */ |
639 | 718 | ||
719 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) | ||
720 | return -EINVAL; | ||
721 | |||
640 | /* First check for malicious input causing overflow */ | 722 | /* First check for malicious input causing overflow */ |
641 | if (exec[i].relocation_count > | 723 | if (exec[i].relocation_count > |
642 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | 724 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) |
@@ -644,9 +726,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
644 | 726 | ||
645 | length = exec[i].relocation_count * | 727 | length = exec[i].relocation_count * |
646 | sizeof(struct drm_i915_gem_relocation_entry); | 728 | sizeof(struct drm_i915_gem_relocation_entry); |
647 | if (!access_ok(VERIFY_READ, ptr, length)) | ||
648 | return -EFAULT; | ||
649 | |||
650 | /* we may also need to update the presumed offsets */ | 729 | /* we may also need to update the presumed offsets */ |
651 | if (!access_ok(VERIFY_WRITE, ptr, length)) | 730 | if (!access_ok(VERIFY_WRITE, ptr, length)) |
652 | return -EFAULT; | 731 | return -EFAULT; |
@@ -668,8 +747,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
668 | u32 old_read = obj->base.read_domains; | 747 | u32 old_read = obj->base.read_domains; |
669 | u32 old_write = obj->base.write_domain; | 748 | u32 old_write = obj->base.write_domain; |
670 | 749 | ||
671 | obj->base.read_domains = obj->base.pending_read_domains; | ||
672 | obj->base.write_domain = obj->base.pending_write_domain; | 750 | obj->base.write_domain = obj->base.pending_write_domain; |
751 | if (obj->base.write_domain == 0) | ||
752 | obj->base.pending_read_domains |= obj->base.read_domains; | ||
753 | obj->base.read_domains = obj->base.pending_read_domains; | ||
673 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | 754 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
674 | 755 | ||
675 | i915_gem_object_move_to_active(obj, ring); | 756 | i915_gem_object_move_to_active(obj, ring); |
@@ -728,21 +809,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
728 | struct drm_i915_gem_exec_object2 *exec) | 809 | struct drm_i915_gem_exec_object2 *exec) |
729 | { | 810 | { |
730 | drm_i915_private_t *dev_priv = dev->dev_private; | 811 | drm_i915_private_t *dev_priv = dev->dev_private; |
731 | struct list_head objects; | ||
732 | struct eb_objects *eb; | 812 | struct eb_objects *eb; |
733 | struct drm_i915_gem_object *batch_obj; | 813 | struct drm_i915_gem_object *batch_obj; |
734 | struct drm_clip_rect *cliprects = NULL; | 814 | struct drm_clip_rect *cliprects = NULL; |
735 | struct intel_ring_buffer *ring; | 815 | struct intel_ring_buffer *ring; |
736 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); | 816 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
737 | u32 exec_start, exec_len; | 817 | u32 exec_start, exec_len; |
738 | u32 mask; | 818 | u32 mask, flags; |
739 | u32 flags; | ||
740 | int ret, mode, i; | 819 | int ret, mode, i; |
820 | bool need_relocs; | ||
741 | 821 | ||
742 | if (!i915_gem_check_execbuffer(args)) { | 822 | if (!i915_gem_check_execbuffer(args)) |
743 | DRM_DEBUG("execbuf with invalid offset/length\n"); | ||
744 | return -EINVAL; | 823 | return -EINVAL; |
745 | } | ||
746 | 824 | ||
747 | ret = validate_exec_list(exec, args->buffer_count); | 825 | ret = validate_exec_list(exec, args->buffer_count); |
748 | if (ret) | 826 | if (ret) |
@@ -863,7 +941,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
863 | goto pre_mutex_err; | 941 | goto pre_mutex_err; |
864 | } | 942 | } |
865 | 943 | ||
866 | eb = eb_create(args->buffer_count); | 944 | eb = eb_create(args); |
867 | if (eb == NULL) { | 945 | if (eb == NULL) { |
868 | mutex_unlock(&dev->struct_mutex); | 946 | mutex_unlock(&dev->struct_mutex); |
869 | ret = -ENOMEM; | 947 | ret = -ENOMEM; |
@@ -871,51 +949,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
871 | } | 949 | } |
872 | 950 | ||
873 | /* Look up object handles */ | 951 | /* Look up object handles */ |
874 | INIT_LIST_HEAD(&objects); | 952 | ret = eb_lookup_objects(eb, exec, args, file); |
875 | for (i = 0; i < args->buffer_count; i++) { | 953 | if (ret) |
876 | struct drm_i915_gem_object *obj; | 954 | goto err; |
877 | |||
878 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, | ||
879 | exec[i].handle)); | ||
880 | if (&obj->base == NULL) { | ||
881 | DRM_DEBUG("Invalid object handle %d at index %d\n", | ||
882 | exec[i].handle, i); | ||
883 | /* prevent error path from reading uninitialized data */ | ||
884 | ret = -ENOENT; | ||
885 | goto err; | ||
886 | } | ||
887 | |||
888 | if (!list_empty(&obj->exec_list)) { | ||
889 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", | ||
890 | obj, exec[i].handle, i); | ||
891 | ret = -EINVAL; | ||
892 | goto err; | ||
893 | } | ||
894 | |||
895 | list_add_tail(&obj->exec_list, &objects); | ||
896 | obj->exec_handle = exec[i].handle; | ||
897 | obj->exec_entry = &exec[i]; | ||
898 | eb_add_object(eb, obj); | ||
899 | } | ||
900 | 955 | ||
901 | /* take note of the batch buffer before we might reorder the lists */ | 956 | /* take note of the batch buffer before we might reorder the lists */ |
902 | batch_obj = list_entry(objects.prev, | 957 | batch_obj = list_entry(eb->objects.prev, |
903 | struct drm_i915_gem_object, | 958 | struct drm_i915_gem_object, |
904 | exec_list); | 959 | exec_list); |
905 | 960 | ||
906 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 961 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
907 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); | 962 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
963 | ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); | ||
908 | if (ret) | 964 | if (ret) |
909 | goto err; | 965 | goto err; |
910 | 966 | ||
911 | /* The objects are in their final locations, apply the relocations. */ | 967 | /* The objects are in their final locations, apply the relocations. */ |
912 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); | 968 | if (need_relocs) |
969 | ret = i915_gem_execbuffer_relocate(dev, eb); | ||
913 | if (ret) { | 970 | if (ret) { |
914 | if (ret == -EFAULT) { | 971 | if (ret == -EFAULT) { |
915 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, | 972 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
916 | &objects, eb, | 973 | eb, exec); |
917 | exec, | ||
918 | args->buffer_count); | ||
919 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 974 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
920 | } | 975 | } |
921 | if (ret) | 976 | if (ret) |
@@ -937,7 +992,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
937 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) | 992 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
938 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); | 993 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
939 | 994 | ||
940 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); | 995 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
941 | if (ret) | 996 | if (ret) |
942 | goto err; | 997 | goto err; |
943 | 998 | ||
@@ -991,20 +1046,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
991 | 1046 | ||
992 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); | 1047 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
993 | 1048 | ||
994 | i915_gem_execbuffer_move_to_active(&objects, ring); | 1049 | i915_gem_execbuffer_move_to_active(&eb->objects, ring); |
995 | i915_gem_execbuffer_retire_commands(dev, file, ring); | 1050 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
996 | 1051 | ||
997 | err: | 1052 | err: |
998 | eb_destroy(eb); | 1053 | eb_destroy(eb); |
999 | while (!list_empty(&objects)) { | ||
1000 | struct drm_i915_gem_object *obj; | ||
1001 | |||
1002 | obj = list_first_entry(&objects, | ||
1003 | struct drm_i915_gem_object, | ||
1004 | exec_list); | ||
1005 | list_del_init(&obj->exec_list); | ||
1006 | drm_gem_object_unreference(&obj->base); | ||
1007 | } | ||
1008 | 1054 | ||
1009 | mutex_unlock(&dev->struct_mutex); | 1055 | mutex_unlock(&dev->struct_mutex); |
1010 | 1056 | ||
@@ -1113,7 +1159,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1113 | } | 1159 | } |
1114 | 1160 | ||
1115 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, | 1161 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1116 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | 1162 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
1117 | if (exec2_list == NULL) | 1163 | if (exec2_list == NULL) |
1118 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), | 1164 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
1119 | args->buffer_count); | 1165 | args->buffer_count); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a4af0f79e972..bdaca3f47988 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t; | |||
44 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | 44 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
45 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | 45 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
46 | 46 | ||
47 | static inline gtt_pte_t pte_encode(struct drm_device *dev, | 47 | static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, |
48 | dma_addr_t addr, | 48 | dma_addr_t addr, |
49 | enum i915_cache_level level) | 49 | enum i915_cache_level level) |
50 | { | 50 | { |
51 | gtt_pte_t pte = GEN6_PTE_VALID; | 51 | gtt_pte_t pte = GEN6_PTE_VALID; |
52 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 52 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev, | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 79 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
80 | static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | 80 | static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
81 | unsigned first_entry, | 81 | unsigned first_entry, |
82 | unsigned num_entries) | 82 | unsigned num_entries) |
83 | { | 83 | { |
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | |||
87 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 87 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
88 | unsigned last_pte, i; | 88 | unsigned last_pte, i; |
89 | 89 | ||
90 | scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, | 90 | scratch_pte = gen6_pte_encode(ppgtt->dev, |
91 | I915_CACHE_LLC); | 91 | ppgtt->scratch_page_dma_addr, |
92 | I915_CACHE_LLC); | ||
92 | 93 | ||
93 | while (num_entries) { | 94 | while (num_entries) { |
94 | last_pte = first_pte + num_entries; | 95 | last_pte = first_pte + num_entries; |
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | |||
108 | } | 109 | } |
109 | } | 110 | } |
110 | 111 | ||
111 | int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | 112 | static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, |
113 | struct sg_table *pages, | ||
114 | unsigned first_entry, | ||
115 | enum i915_cache_level cache_level) | ||
112 | { | 116 | { |
117 | gtt_pte_t *pt_vaddr; | ||
118 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | ||
119 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
120 | unsigned i, j, m, segment_len; | ||
121 | dma_addr_t page_addr; | ||
122 | struct scatterlist *sg; | ||
123 | |||
124 | /* init sg walking */ | ||
125 | sg = pages->sgl; | ||
126 | i = 0; | ||
127 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
128 | m = 0; | ||
129 | |||
130 | while (i < pages->nents) { | ||
131 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
132 | |||
133 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | ||
134 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | ||
135 | pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr, | ||
136 | cache_level); | ||
137 | |||
138 | /* grab the next page */ | ||
139 | if (++m == segment_len) { | ||
140 | if (++i == pages->nents) | ||
141 | break; | ||
142 | |||
143 | sg = sg_next(sg); | ||
144 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
145 | m = 0; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | kunmap_atomic(pt_vaddr); | ||
150 | |||
151 | first_pte = 0; | ||
152 | act_pd++; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) | ||
157 | { | ||
158 | int i; | ||
159 | |||
160 | if (ppgtt->pt_dma_addr) { | ||
161 | for (i = 0; i < ppgtt->num_pd_entries; i++) | ||
162 | pci_unmap_page(ppgtt->dev->pdev, | ||
163 | ppgtt->pt_dma_addr[i], | ||
164 | 4096, PCI_DMA_BIDIRECTIONAL); | ||
165 | } | ||
166 | |||
167 | kfree(ppgtt->pt_dma_addr); | ||
168 | for (i = 0; i < ppgtt->num_pd_entries; i++) | ||
169 | __free_page(ppgtt->pt_pages[i]); | ||
170 | kfree(ppgtt->pt_pages); | ||
171 | kfree(ppgtt); | ||
172 | } | ||
173 | |||
174 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | ||
175 | { | ||
176 | struct drm_device *dev = ppgtt->dev; | ||
113 | struct drm_i915_private *dev_priv = dev->dev_private; | 177 | struct drm_i915_private *dev_priv = dev->dev_private; |
114 | struct i915_hw_ppgtt *ppgtt; | ||
115 | unsigned first_pd_entry_in_global_pt; | 178 | unsigned first_pd_entry_in_global_pt; |
116 | int i; | 179 | int i; |
117 | int ret = -ENOMEM; | 180 | int ret = -ENOMEM; |
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
119 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 | 182 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 |
120 | * entries. For aliasing ppgtt support we just steal them at the end for | 183 | * entries. For aliasing ppgtt support we just steal them at the end for |
121 | * now. */ | 184 | * now. */ |
122 | first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; | 185 | first_pd_entry_in_global_pt = |
186 | gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; | ||
123 | 187 | ||
124 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | ||
125 | if (!ppgtt) | ||
126 | return ret; | ||
127 | |||
128 | ppgtt->dev = dev; | ||
129 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; | 188 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
189 | ppgtt->clear_range = gen6_ppgtt_clear_range; | ||
190 | ppgtt->insert_entries = gen6_ppgtt_insert_entries; | ||
191 | ppgtt->cleanup = gen6_ppgtt_cleanup; | ||
130 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, | 192 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
131 | GFP_KERNEL); | 193 | GFP_KERNEL); |
132 | if (!ppgtt->pt_pages) | 194 | if (!ppgtt->pt_pages) |
133 | goto err_ppgtt; | 195 | return -ENOMEM; |
134 | 196 | ||
135 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 197 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
136 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); | 198 | ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
138 | goto err_pt_alloc; | 200 | goto err_pt_alloc; |
139 | } | 201 | } |
140 | 202 | ||
141 | if (dev_priv->mm.gtt->needs_dmar) { | 203 | ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, |
142 | ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) | 204 | GFP_KERNEL); |
143 | *ppgtt->num_pd_entries, | 205 | if (!ppgtt->pt_dma_addr) |
144 | GFP_KERNEL); | 206 | goto err_pt_alloc; |
145 | if (!ppgtt->pt_dma_addr) | ||
146 | goto err_pt_alloc; | ||
147 | 207 | ||
148 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 208 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
149 | dma_addr_t pt_addr; | 209 | dma_addr_t pt_addr; |
150 | 210 | ||
151 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], | 211 | pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, |
152 | 0, 4096, | 212 | PCI_DMA_BIDIRECTIONAL); |
153 | PCI_DMA_BIDIRECTIONAL); | ||
154 | 213 | ||
155 | if (pci_dma_mapping_error(dev->pdev, | 214 | if (pci_dma_mapping_error(dev->pdev, pt_addr)) { |
156 | pt_addr)) { | 215 | ret = -EIO; |
157 | ret = -EIO; | 216 | goto err_pd_pin; |
158 | goto err_pd_pin; | ||
159 | 217 | ||
160 | } | ||
161 | ppgtt->pt_dma_addr[i] = pt_addr; | ||
162 | } | 218 | } |
219 | ppgtt->pt_dma_addr[i] = pt_addr; | ||
163 | } | 220 | } |
164 | 221 | ||
165 | ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; | 222 | ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; |
166 | 223 | ||
167 | i915_ppgtt_clear_range(ppgtt, 0, | 224 | ppgtt->clear_range(ppgtt, 0, |
168 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); | 225 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
169 | 226 | ||
170 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); | 227 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); |
171 | 228 | ||
172 | dev_priv->mm.aliasing_ppgtt = ppgtt; | ||
173 | |||
174 | return 0; | 229 | return 0; |
175 | 230 | ||
176 | err_pd_pin: | 231 | err_pd_pin: |
@@ -186,94 +241,57 @@ err_pt_alloc: | |||
186 | __free_page(ppgtt->pt_pages[i]); | 241 | __free_page(ppgtt->pt_pages[i]); |
187 | } | 242 | } |
188 | kfree(ppgtt->pt_pages); | 243 | kfree(ppgtt->pt_pages); |
189 | err_ppgtt: | ||
190 | kfree(ppgtt); | ||
191 | 244 | ||
192 | return ret; | 245 | return ret; |
193 | } | 246 | } |
194 | 247 | ||
195 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | 248 | static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) |
196 | { | 249 | { |
197 | struct drm_i915_private *dev_priv = dev->dev_private; | 250 | struct drm_i915_private *dev_priv = dev->dev_private; |
198 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | 251 | struct i915_hw_ppgtt *ppgtt; |
199 | int i; | 252 | int ret; |
200 | 253 | ||
254 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | ||
201 | if (!ppgtt) | 255 | if (!ppgtt) |
202 | return; | 256 | return -ENOMEM; |
203 | 257 | ||
204 | if (ppgtt->pt_dma_addr) { | 258 | ppgtt->dev = dev; |
205 | for (i = 0; i < ppgtt->num_pd_entries; i++) | ||
206 | pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], | ||
207 | 4096, PCI_DMA_BIDIRECTIONAL); | ||
208 | } | ||
209 | 259 | ||
210 | kfree(ppgtt->pt_dma_addr); | 260 | ret = gen6_ppgtt_init(ppgtt); |
211 | for (i = 0; i < ppgtt->num_pd_entries; i++) | 261 | if (ret) |
212 | __free_page(ppgtt->pt_pages[i]); | 262 | kfree(ppgtt); |
213 | kfree(ppgtt->pt_pages); | 263 | else |
214 | kfree(ppgtt); | 264 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
265 | |||
266 | return ret; | ||
215 | } | 267 | } |
216 | 268 | ||
217 | static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, | 269 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) |
218 | const struct sg_table *pages, | ||
219 | unsigned first_entry, | ||
220 | enum i915_cache_level cache_level) | ||
221 | { | 270 | { |
222 | gtt_pte_t *pt_vaddr; | 271 | struct drm_i915_private *dev_priv = dev->dev_private; |
223 | unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; | 272 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
224 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | ||
225 | unsigned i, j, m, segment_len; | ||
226 | dma_addr_t page_addr; | ||
227 | struct scatterlist *sg; | ||
228 | |||
229 | /* init sg walking */ | ||
230 | sg = pages->sgl; | ||
231 | i = 0; | ||
232 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
233 | m = 0; | ||
234 | |||
235 | while (i < pages->nents) { | ||
236 | pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); | ||
237 | |||
238 | for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { | ||
239 | page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | ||
240 | pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, | ||
241 | cache_level); | ||
242 | |||
243 | /* grab the next page */ | ||
244 | if (++m == segment_len) { | ||
245 | if (++i == pages->nents) | ||
246 | break; | ||
247 | |||
248 | sg = sg_next(sg); | ||
249 | segment_len = sg_dma_len(sg) >> PAGE_SHIFT; | ||
250 | m = 0; | ||
251 | } | ||
252 | } | ||
253 | 273 | ||
254 | kunmap_atomic(pt_vaddr); | 274 | if (!ppgtt) |
275 | return; | ||
255 | 276 | ||
256 | first_pte = 0; | 277 | ppgtt->cleanup(ppgtt); |
257 | act_pd++; | ||
258 | } | ||
259 | } | 278 | } |
260 | 279 | ||
261 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | 280 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
262 | struct drm_i915_gem_object *obj, | 281 | struct drm_i915_gem_object *obj, |
263 | enum i915_cache_level cache_level) | 282 | enum i915_cache_level cache_level) |
264 | { | 283 | { |
265 | i915_ppgtt_insert_sg_entries(ppgtt, | 284 | ppgtt->insert_entries(ppgtt, obj->pages, |
266 | obj->pages, | 285 | obj->gtt_space->start >> PAGE_SHIFT, |
267 | obj->gtt_space->start >> PAGE_SHIFT, | 286 | cache_level); |
268 | cache_level); | ||
269 | } | 287 | } |
270 | 288 | ||
271 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | 289 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
272 | struct drm_i915_gem_object *obj) | 290 | struct drm_i915_gem_object *obj) |
273 | { | 291 | { |
274 | i915_ppgtt_clear_range(ppgtt, | 292 | ppgtt->clear_range(ppgtt, |
275 | obj->gtt_space->start >> PAGE_SHIFT, | 293 | obj->gtt_space->start >> PAGE_SHIFT, |
276 | obj->base.size >> PAGE_SHIFT); | 294 | obj->base.size >> PAGE_SHIFT); |
277 | } | 295 | } |
278 | 296 | ||
279 | void i915_gem_init_ppgtt(struct drm_device *dev) | 297 | void i915_gem_init_ppgtt(struct drm_device *dev) |
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev) | |||
290 | return; | 308 | return; |
291 | 309 | ||
292 | 310 | ||
293 | pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); | 311 | pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); |
294 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | 312 | for (i = 0; i < ppgtt->num_pd_entries; i++) { |
295 | dma_addr_t pt_addr; | 313 | dma_addr_t pt_addr; |
296 | 314 | ||
297 | if (dev_priv->mm.gtt->needs_dmar) | 315 | pt_addr = ppgtt->pt_dma_addr[i]; |
298 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
299 | else | ||
300 | pt_addr = page_to_phys(ppgtt->pt_pages[i]); | ||
301 | |||
302 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | 316 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
303 | pd_entry |= GEN6_PDE_VALID; | 317 | pd_entry |= GEN6_PDE_VALID; |
304 | 318 | ||
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev) | |||
338 | } | 352 | } |
339 | } | 353 | } |
340 | 354 | ||
355 | extern int intel_iommu_gfx_mapped; | ||
356 | /* Certain Gen5 chipsets require require idling the GPU before | ||
357 | * unmapping anything from the GTT when VT-d is enabled. | ||
358 | */ | ||
359 | static inline bool needs_idle_maps(struct drm_device *dev) | ||
360 | { | ||
361 | #ifdef CONFIG_INTEL_IOMMU | ||
362 | /* Query intel_iommu to see if we need the workaround. Presumably that | ||
363 | * was loaded first. | ||
364 | */ | ||
365 | if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) | ||
366 | return true; | ||
367 | #endif | ||
368 | return false; | ||
369 | } | ||
370 | |||
341 | static bool do_idling(struct drm_i915_private *dev_priv) | 371 | static bool do_idling(struct drm_i915_private *dev_priv) |
342 | { | 372 | { |
343 | bool ret = dev_priv->mm.interruptible; | 373 | bool ret = dev_priv->mm.interruptible; |
344 | 374 | ||
345 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { | 375 | if (unlikely(dev_priv->gtt.do_idle_maps)) { |
346 | dev_priv->mm.interruptible = false; | 376 | dev_priv->mm.interruptible = false; |
347 | if (i915_gpu_idle(dev_priv->dev)) { | 377 | if (i915_gpu_idle(dev_priv->dev)) { |
348 | DRM_ERROR("Couldn't idle GPU\n"); | 378 | DRM_ERROR("Couldn't idle GPU\n"); |
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv) | |||
356 | 386 | ||
357 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | 387 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
358 | { | 388 | { |
359 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) | 389 | if (unlikely(dev_priv->gtt.do_idle_maps)) |
360 | dev_priv->mm.interruptible = interruptible; | 390 | dev_priv->mm.interruptible = interruptible; |
361 | } | 391 | } |
362 | 392 | ||
363 | |||
364 | static void i915_ggtt_clear_range(struct drm_device *dev, | ||
365 | unsigned first_entry, | ||
366 | unsigned num_entries) | ||
367 | { | ||
368 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
369 | gtt_pte_t scratch_pte; | ||
370 | gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry; | ||
371 | const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; | ||
372 | int i; | ||
373 | |||
374 | if (INTEL_INFO(dev)->gen < 6) { | ||
375 | intel_gtt_clear_range(first_entry, num_entries); | ||
376 | return; | ||
377 | } | ||
378 | |||
379 | if (WARN(num_entries > max_entries, | ||
380 | "First entry = %d; Num entries = %d (max=%d)\n", | ||
381 | first_entry, num_entries, max_entries)) | ||
382 | num_entries = max_entries; | ||
383 | |||
384 | scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); | ||
385 | for (i = 0; i < num_entries; i++) | ||
386 | iowrite32(scratch_pte, >t_base[i]); | ||
387 | readl(gtt_base); | ||
388 | } | ||
389 | |||
390 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 393 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
391 | { | 394 | { |
392 | struct drm_i915_private *dev_priv = dev->dev_private; | 395 | struct drm_i915_private *dev_priv = dev->dev_private; |
393 | struct drm_i915_gem_object *obj; | 396 | struct drm_i915_gem_object *obj; |
394 | 397 | ||
395 | /* First fill our portion of the GTT with scratch pages */ | 398 | /* First fill our portion of the GTT with scratch pages */ |
396 | i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, | 399 | dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, |
397 | (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); | 400 | dev_priv->gtt.total / PAGE_SIZE); |
398 | 401 | ||
399 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { | 402 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
400 | i915_gem_clflush_object(obj); | 403 | i915_gem_clflush_object(obj); |
@@ -423,17 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | |||
423 | * within the global GTT as well as accessible by the GPU through the GMADR | 426 | * within the global GTT as well as accessible by the GPU through the GMADR |
424 | * mapped BAR (dev_priv->mm.gtt->gtt). | 427 | * mapped BAR (dev_priv->mm.gtt->gtt). |
425 | */ | 428 | */ |
426 | static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, | 429 | static void gen6_ggtt_insert_entries(struct drm_device *dev, |
427 | enum i915_cache_level level) | 430 | struct sg_table *st, |
431 | unsigned int first_entry, | ||
432 | enum i915_cache_level level) | ||
428 | { | 433 | { |
429 | struct drm_device *dev = obj->base.dev; | ||
430 | struct drm_i915_private *dev_priv = dev->dev_private; | 434 | struct drm_i915_private *dev_priv = dev->dev_private; |
431 | struct sg_table *st = obj->pages; | ||
432 | struct scatterlist *sg = st->sgl; | 435 | struct scatterlist *sg = st->sgl; |
433 | const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; | ||
434 | const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; | ||
435 | gtt_pte_t __iomem *gtt_entries = | 436 | gtt_pte_t __iomem *gtt_entries = |
436 | (gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry; | 437 | (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
437 | int unused, i = 0; | 438 | int unused, i = 0; |
438 | unsigned int len, m = 0; | 439 | unsigned int len, m = 0; |
439 | dma_addr_t addr; | 440 | dma_addr_t addr; |
@@ -442,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, | |||
442 | len = sg_dma_len(sg) >> PAGE_SHIFT; | 443 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
443 | for (m = 0; m < len; m++) { | 444 | for (m = 0; m < len; m++) { |
444 | addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | 445 | addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
445 | iowrite32(pte_encode(dev, addr, level), >t_entries[i]); | 446 | iowrite32(gen6_pte_encode(dev, addr, level), |
447 | >t_entries[i]); | ||
446 | i++; | 448 | i++; |
447 | } | 449 | } |
448 | } | 450 | } |
449 | 451 | ||
450 | BUG_ON(i > max_entries); | ||
451 | BUG_ON(i != obj->base.size / PAGE_SIZE); | ||
452 | |||
453 | /* XXX: This serves as a posting read to make sure that the PTE has | 452 | /* XXX: This serves as a posting read to make sure that the PTE has |
454 | * actually been updated. There is some concern that even though | 453 | * actually been updated. There is some concern that even though |
455 | * registers and PTEs are within the same BAR that they are potentially | 454 | * registers and PTEs are within the same BAR that they are potentially |
@@ -457,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, | |||
457 | * hardware should work, we must keep this posting read for paranoia. | 456 | * hardware should work, we must keep this posting read for paranoia. |
458 | */ | 457 | */ |
459 | if (i != 0) | 458 | if (i != 0) |
460 | WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); | 459 | WARN_ON(readl(>t_entries[i-1]) |
460 | != gen6_pte_encode(dev, addr, level)); | ||
461 | 461 | ||
462 | /* This next bit makes the above posting read even more important. We | 462 | /* This next bit makes the above posting read even more important. We |
463 | * want to flush the TLBs only after we're certain all the PTE updates | 463 | * want to flush the TLBs only after we're certain all the PTE updates |
@@ -467,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, | |||
467 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 467 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
468 | } | 468 | } |
469 | 469 | ||
470 | static void gen6_ggtt_clear_range(struct drm_device *dev, | ||
471 | unsigned int first_entry, | ||
472 | unsigned int num_entries) | ||
473 | { | ||
474 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
475 | gtt_pte_t scratch_pte; | ||
476 | gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | ||
477 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | ||
478 | int i; | ||
479 | |||
480 | if (WARN(num_entries > max_entries, | ||
481 | "First entry = %d; Num entries = %d (max=%d)\n", | ||
482 | first_entry, num_entries, max_entries)) | ||
483 | num_entries = max_entries; | ||
484 | |||
485 | scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, | ||
486 | I915_CACHE_LLC); | ||
487 | for (i = 0; i < num_entries; i++) | ||
488 | iowrite32(scratch_pte, >t_base[i]); | ||
489 | readl(gtt_base); | ||
490 | } | ||
491 | |||
492 | |||
493 | static void i915_ggtt_insert_entries(struct drm_device *dev, | ||
494 | struct sg_table *st, | ||
495 | unsigned int pg_start, | ||
496 | enum i915_cache_level cache_level) | ||
497 | { | ||
498 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | ||
499 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | ||
500 | |||
501 | intel_gtt_insert_sg_entries(st, pg_start, flags); | ||
502 | |||
503 | } | ||
504 | |||
505 | static void i915_ggtt_clear_range(struct drm_device *dev, | ||
506 | unsigned int first_entry, | ||
507 | unsigned int num_entries) | ||
508 | { | ||
509 | intel_gtt_clear_range(first_entry, num_entries); | ||
510 | } | ||
511 | |||
512 | |||
470 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | 513 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
471 | enum i915_cache_level cache_level) | 514 | enum i915_cache_level cache_level) |
472 | { | 515 | { |
473 | struct drm_device *dev = obj->base.dev; | 516 | struct drm_device *dev = obj->base.dev; |
474 | if (INTEL_INFO(dev)->gen < 6) { | 517 | struct drm_i915_private *dev_priv = dev->dev_private; |
475 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | 518 | |
476 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | 519 | dev_priv->gtt.gtt_insert_entries(dev, obj->pages, |
477 | intel_gtt_insert_sg_entries(obj->pages, | 520 | obj->gtt_space->start >> PAGE_SHIFT, |
478 | obj->gtt_space->start >> PAGE_SHIFT, | 521 | cache_level); |
479 | flags); | ||
480 | } else { | ||
481 | gen6_ggtt_bind_object(obj, cache_level); | ||
482 | } | ||
483 | 522 | ||
484 | obj->has_global_gtt_mapping = 1; | 523 | obj->has_global_gtt_mapping = 1; |
485 | } | 524 | } |
486 | 525 | ||
487 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 526 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
488 | { | 527 | { |
489 | i915_ggtt_clear_range(obj->base.dev, | 528 | struct drm_device *dev = obj->base.dev; |
490 | obj->gtt_space->start >> PAGE_SHIFT, | 529 | struct drm_i915_private *dev_priv = dev->dev_private; |
491 | obj->base.size >> PAGE_SHIFT); | 530 | |
531 | dev_priv->gtt.gtt_clear_range(obj->base.dev, | ||
532 | obj->gtt_space->start >> PAGE_SHIFT, | ||
533 | obj->base.size >> PAGE_SHIFT); | ||
492 | 534 | ||
493 | obj->has_global_gtt_mapping = 0; | 535 | obj->has_global_gtt_mapping = 0; |
494 | } | 536 | } |
@@ -525,17 +567,27 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, | |||
525 | *end -= 4096; | 567 | *end -= 4096; |
526 | } | 568 | } |
527 | } | 569 | } |
528 | |||
529 | void i915_gem_setup_global_gtt(struct drm_device *dev, | 570 | void i915_gem_setup_global_gtt(struct drm_device *dev, |
530 | unsigned long start, | 571 | unsigned long start, |
531 | unsigned long mappable_end, | 572 | unsigned long mappable_end, |
532 | unsigned long end) | 573 | unsigned long end) |
533 | { | 574 | { |
575 | /* Let GEM Manage all of the aperture. | ||
576 | * | ||
577 | * However, leave one page at the end still bound to the scratch page. | ||
578 | * There are a number of places where the hardware apparently prefetches | ||
579 | * past the end of the object, and we've seen multiple hangs with the | ||
580 | * GPU head pointer stuck in a batchbuffer bound at the last page of the | ||
581 | * aperture. One page should be enough to keep any prefetching inside | ||
582 | * of the aperture. | ||
583 | */ | ||
534 | drm_i915_private_t *dev_priv = dev->dev_private; | 584 | drm_i915_private_t *dev_priv = dev->dev_private; |
535 | struct drm_mm_node *entry; | 585 | struct drm_mm_node *entry; |
536 | struct drm_i915_gem_object *obj; | 586 | struct drm_i915_gem_object *obj; |
537 | unsigned long hole_start, hole_end; | 587 | unsigned long hole_start, hole_end; |
538 | 588 | ||
589 | BUG_ON(mappable_end > end); | ||
590 | |||
539 | /* Subtract the guard page ... */ | 591 | /* Subtract the guard page ... */ |
540 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); | 592 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); |
541 | if (!HAS_LLC(dev)) | 593 | if (!HAS_LLC(dev)) |
@@ -554,24 +606,20 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, | |||
554 | obj->has_global_gtt_mapping = 1; | 606 | obj->has_global_gtt_mapping = 1; |
555 | } | 607 | } |
556 | 608 | ||
557 | dev_priv->mm.gtt_start = start; | 609 | dev_priv->gtt.start = start; |
558 | dev_priv->mm.gtt_mappable_end = mappable_end; | 610 | dev_priv->gtt.total = end - start; |
559 | dev_priv->mm.gtt_end = end; | ||
560 | dev_priv->mm.gtt_total = end - start; | ||
561 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | ||
562 | 611 | ||
563 | /* Clear any non-preallocated blocks */ | 612 | /* Clear any non-preallocated blocks */ |
564 | drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, | 613 | drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, |
565 | hole_start, hole_end) { | 614 | hole_start, hole_end) { |
566 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", | 615 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
567 | hole_start, hole_end); | 616 | hole_start, hole_end); |
568 | i915_ggtt_clear_range(dev, | 617 | dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, |
569 | hole_start / PAGE_SIZE, | 618 | (hole_end-hole_start) / PAGE_SIZE); |
570 | (hole_end-hole_start) / PAGE_SIZE); | ||
571 | } | 619 | } |
572 | 620 | ||
573 | /* And finally clear the reserved guard page */ | 621 | /* And finally clear the reserved guard page */ |
574 | i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1); | 622 | dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); |
575 | } | 623 | } |
576 | 624 | ||
577 | static bool | 625 | static bool |
@@ -593,12 +641,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev) | |||
593 | { | 641 | { |
594 | struct drm_i915_private *dev_priv = dev->dev_private; | 642 | struct drm_i915_private *dev_priv = dev->dev_private; |
595 | unsigned long gtt_size, mappable_size; | 643 | unsigned long gtt_size, mappable_size; |
596 | int ret; | ||
597 | 644 | ||
598 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; | 645 | gtt_size = dev_priv->gtt.total; |
599 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 646 | mappable_size = dev_priv->gtt.mappable_end; |
600 | 647 | ||
601 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { | 648 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
649 | int ret; | ||
602 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the | 650 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
603 | * aperture accordingly when using aliasing ppgtt. */ | 651 | * aperture accordingly when using aliasing ppgtt. */ |
604 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; | 652 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
@@ -606,23 +654,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev) | |||
606 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | 654 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
607 | 655 | ||
608 | ret = i915_gem_init_aliasing_ppgtt(dev); | 656 | ret = i915_gem_init_aliasing_ppgtt(dev); |
609 | if (ret) { | 657 | if (!ret) |
610 | mutex_unlock(&dev->struct_mutex); | ||
611 | return; | 658 | return; |
612 | } | 659 | |
613 | } else { | 660 | DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); |
614 | /* Let GEM Manage all of the aperture. | 661 | drm_mm_takedown(&dev_priv->mm.gtt_space); |
615 | * | 662 | gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
616 | * However, leave one page at the end still bound to the scratch | ||
617 | * page. There are a number of places where the hardware | ||
618 | * apparently prefetches past the end of the object, and we've | ||
619 | * seen multiple hangs with the GPU head pointer stuck in a | ||
620 | * batchbuffer bound at the last page of the aperture. One page | ||
621 | * should be enough to keep any prefetching inside of the | ||
622 | * aperture. | ||
623 | */ | ||
624 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | ||
625 | } | 663 | } |
664 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | ||
626 | } | 665 | } |
627 | 666 | ||
628 | static int setup_scratch_page(struct drm_device *dev) | 667 | static int setup_scratch_page(struct drm_device *dev) |
@@ -645,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev) | |||
645 | #else | 684 | #else |
646 | dma_addr = page_to_phys(page); | 685 | dma_addr = page_to_phys(page); |
647 | #endif | 686 | #endif |
648 | dev_priv->mm.gtt->scratch_page = page; | 687 | dev_priv->gtt.scratch_page = page; |
649 | dev_priv->mm.gtt->scratch_page_dma = dma_addr; | 688 | dev_priv->gtt.scratch_page_dma = dma_addr; |
650 | 689 | ||
651 | return 0; | 690 | return 0; |
652 | } | 691 | } |
@@ -654,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev) | |||
654 | static void teardown_scratch_page(struct drm_device *dev) | 693 | static void teardown_scratch_page(struct drm_device *dev) |
655 | { | 694 | { |
656 | struct drm_i915_private *dev_priv = dev->dev_private; | 695 | struct drm_i915_private *dev_priv = dev->dev_private; |
657 | set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); | 696 | set_pages_wb(dev_priv->gtt.scratch_page, 1); |
658 | pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, | 697 | pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, |
659 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 698 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
660 | put_page(dev_priv->mm.gtt->scratch_page); | 699 | put_page(dev_priv->gtt.scratch_page); |
661 | __free_page(dev_priv->mm.gtt->scratch_page); | 700 | __free_page(dev_priv->gtt.scratch_page); |
662 | } | 701 | } |
663 | 702 | ||
664 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | 703 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
@@ -668,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | |||
668 | return snb_gmch_ctl << 20; | 707 | return snb_gmch_ctl << 20; |
669 | } | 708 | } |
670 | 709 | ||
671 | static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) | 710 | static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
672 | { | 711 | { |
673 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; | 712 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
674 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; | 713 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
675 | return snb_gmch_ctl << 25; /* 32 MB units */ | 714 | return snb_gmch_ctl << 25; /* 32 MB units */ |
676 | } | 715 | } |
677 | 716 | ||
678 | static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) | 717 | static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) |
679 | { | 718 | { |
680 | static const int stolen_decoder[] = { | 719 | static const int stolen_decoder[] = { |
681 | 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; | 720 | 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; |
@@ -684,103 +723,122 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) | |||
684 | return stolen_decoder[snb_gmch_ctl] << 20; | 723 | return stolen_decoder[snb_gmch_ctl] << 20; |
685 | } | 724 | } |
686 | 725 | ||
687 | int i915_gem_gtt_init(struct drm_device *dev) | 726 | static int gen6_gmch_probe(struct drm_device *dev, |
727 | size_t *gtt_total, | ||
728 | size_t *stolen) | ||
688 | { | 729 | { |
689 | struct drm_i915_private *dev_priv = dev->dev_private; | 730 | struct drm_i915_private *dev_priv = dev->dev_private; |
690 | phys_addr_t gtt_bus_addr; | 731 | phys_addr_t gtt_bus_addr; |
732 | unsigned int gtt_size; | ||
691 | u16 snb_gmch_ctl; | 733 | u16 snb_gmch_ctl; |
692 | int ret; | 734 | int ret; |
693 | 735 | ||
694 | /* On modern platforms we need not worry ourself with the legacy | 736 | /* 64/512MB is the current min/max we actually know of, but this is just |
695 | * hostbridge query stuff. Skip it entirely | 737 | * a coarse sanity check. |
696 | */ | 738 | */ |
697 | if (INTEL_INFO(dev)->gen < 6) { | 739 | if ((dev_priv->gtt.mappable_end < (64<<20) || |
698 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); | 740 | (dev_priv->gtt.mappable_end > (512<<20)))) { |
699 | if (!ret) { | 741 | DRM_ERROR("Unknown GMADR size (%lx)\n", |
700 | DRM_ERROR("failed to set up gmch\n"); | 742 | dev_priv->gtt.mappable_end); |
701 | return -EIO; | 743 | return -ENXIO; |
702 | } | ||
703 | |||
704 | dev_priv->mm.gtt = intel_gtt_get(); | ||
705 | if (!dev_priv->mm.gtt) { | ||
706 | DRM_ERROR("Failed to initialize GTT\n"); | ||
707 | intel_gmch_remove(); | ||
708 | return -ENODEV; | ||
709 | } | ||
710 | return 0; | ||
711 | } | 744 | } |
712 | 745 | ||
713 | dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); | ||
714 | if (!dev_priv->mm.gtt) | ||
715 | return -ENOMEM; | ||
716 | |||
717 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) | 746 | if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) |
718 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); | 747 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); |
748 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | ||
749 | gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); | ||
719 | 750 | ||
720 | #ifdef CONFIG_INTEL_IOMMU | 751 | if (IS_GEN7(dev)) |
721 | dev_priv->mm.gtt->needs_dmar = 1; | 752 | *stolen = gen7_get_stolen_size(snb_gmch_ctl); |
722 | #endif | 753 | else |
754 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); | ||
755 | |||
756 | *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; | ||
723 | 757 | ||
724 | /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ | 758 | /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ |
725 | gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); | 759 | gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); |
726 | dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); | 760 | dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); |
727 | 761 | if (!dev_priv->gtt.gsm) { | |
728 | /* i9xx_setup */ | 762 | DRM_ERROR("Failed to map the gtt page table\n"); |
729 | pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | 763 | return -ENOMEM; |
730 | dev_priv->mm.gtt->gtt_total_entries = | ||
731 | gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); | ||
732 | if (INTEL_INFO(dev)->gen < 7) | ||
733 | dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); | ||
734 | else | ||
735 | dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); | ||
736 | |||
737 | dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; | ||
738 | /* 64/512MB is the current min/max we actually know of, but this is just a | ||
739 | * coarse sanity check. | ||
740 | */ | ||
741 | if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || | ||
742 | dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { | ||
743 | DRM_ERROR("Unknown GMADR entries (%d)\n", | ||
744 | dev_priv->mm.gtt->gtt_mappable_entries); | ||
745 | ret = -ENXIO; | ||
746 | goto err_out; | ||
747 | } | 764 | } |
748 | 765 | ||
749 | ret = setup_scratch_page(dev); | 766 | ret = setup_scratch_page(dev); |
750 | if (ret) { | 767 | if (ret) |
751 | DRM_ERROR("Scratch setup failed\n"); | 768 | DRM_ERROR("Scratch setup failed\n"); |
752 | goto err_out; | ||
753 | } | ||
754 | 769 | ||
755 | dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr, | 770 | dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; |
756 | dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); | 771 | dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; |
757 | if (!dev_priv->mm.gsm) { | 772 | |
758 | DRM_ERROR("Failed to map the gtt page table\n"); | 773 | return ret; |
759 | teardown_scratch_page(dev); | 774 | } |
760 | ret = -ENOMEM; | 775 | |
761 | goto err_out; | 776 | static void gen6_gmch_remove(struct drm_device *dev) |
777 | { | ||
778 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
779 | iounmap(dev_priv->gtt.gsm); | ||
780 | teardown_scratch_page(dev_priv->dev); | ||
781 | } | ||
782 | |||
783 | static int i915_gmch_probe(struct drm_device *dev, | ||
784 | size_t *gtt_total, | ||
785 | size_t *stolen) | ||
786 | { | ||
787 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
788 | int ret; | ||
789 | |||
790 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); | ||
791 | if (!ret) { | ||
792 | DRM_ERROR("failed to set up gmch\n"); | ||
793 | return -EIO; | ||
762 | } | 794 | } |
763 | 795 | ||
764 | /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ | 796 | intel_gtt_get(gtt_total, stolen); |
765 | DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); | 797 | |
766 | DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); | 798 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
767 | DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); | 799 | dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; |
800 | dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; | ||
768 | 801 | ||
769 | return 0; | 802 | return 0; |
803 | } | ||
770 | 804 | ||
771 | err_out: | 805 | static void i915_gmch_remove(struct drm_device *dev) |
772 | kfree(dev_priv->mm.gtt); | 806 | { |
773 | if (INTEL_INFO(dev)->gen < 6) | 807 | intel_gmch_remove(); |
774 | intel_gmch_remove(); | ||
775 | return ret; | ||
776 | } | 808 | } |
777 | 809 | ||
778 | void i915_gem_gtt_fini(struct drm_device *dev) | 810 | int i915_gem_gtt_init(struct drm_device *dev) |
779 | { | 811 | { |
780 | struct drm_i915_private *dev_priv = dev->dev_private; | 812 | struct drm_i915_private *dev_priv = dev->dev_private; |
781 | iounmap(dev_priv->mm.gsm); | 813 | struct i915_gtt *gtt = &dev_priv->gtt; |
782 | teardown_scratch_page(dev); | 814 | unsigned long gtt_size; |
783 | if (INTEL_INFO(dev)->gen < 6) | 815 | int ret; |
784 | intel_gmch_remove(); | 816 | |
785 | kfree(dev_priv->mm.gtt); | 817 | gtt->mappable_base = pci_resource_start(dev->pdev, 2); |
818 | gtt->mappable_end = pci_resource_len(dev->pdev, 2); | ||
819 | |||
820 | if (INTEL_INFO(dev)->gen <= 5) { | ||
821 | dev_priv->gtt.gtt_probe = i915_gmch_probe; | ||
822 | dev_priv->gtt.gtt_remove = i915_gmch_remove; | ||
823 | } else { | ||
824 | dev_priv->gtt.gtt_probe = gen6_gmch_probe; | ||
825 | dev_priv->gtt.gtt_remove = gen6_gmch_remove; | ||
826 | } | ||
827 | |||
828 | ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, | ||
829 | &dev_priv->gtt.stolen_size); | ||
830 | if (ret) | ||
831 | return ret; | ||
832 | |||
833 | gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t); | ||
834 | |||
835 | /* GMADR is the PCI mmio aperture into the global GTT. */ | ||
836 | DRM_INFO("Memory usable by graphics device = %zdM\n", | ||
837 | dev_priv->gtt.total >> 20); | ||
838 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", | ||
839 | dev_priv->gtt.mappable_end >> 20); | ||
840 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", | ||
841 | dev_priv->gtt.stolen_size >> 20); | ||
842 | |||
843 | return 0; | ||
786 | } | 844 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index f21ae17e298f..69d97cbac13c 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev) | |||
187 | if (dev_priv->mm.stolen_base == 0) | 187 | if (dev_priv->mm.stolen_base == 0) |
188 | return 0; | 188 | return 0; |
189 | 189 | ||
190 | DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", | 190 | DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", |
191 | dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); | 191 | dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); |
192 | 192 | ||
193 | /* Basic memrange allocator for stolen space */ | 193 | /* Basic memrange allocator for stolen space */ |
194 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size); | 194 | drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size); |
195 | 195 | ||
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
@@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, | |||
205 | struct scatterlist *sg; | 205 | struct scatterlist *sg; |
206 | 206 | ||
207 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); | 207 | DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
208 | BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size); | 208 | BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
209 | 209 | ||
210 | /* We hide that we have no struct page backing our stolen object | 210 | /* We hide that we have no struct page backing our stolen object |
211 | * by wrapping the contiguous physical allocation with a fake | 211 | * by wrapping the contiguous physical allocation with a fake |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 65f1d4f3f775..abcba2f5a788 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) | |||
272 | return false; | 272 | return false; |
273 | } | 273 | } |
274 | 274 | ||
275 | /* | 275 | size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); |
276 | * Previous chips need to be aligned to the size of the smallest | ||
277 | * fence register that can contain the object. | ||
278 | */ | ||
279 | if (INTEL_INFO(obj->base.dev)->gen == 3) | ||
280 | size = 1024*1024; | ||
281 | else | ||
282 | size = 512*1024; | ||
283 | |||
284 | while (size < obj->base.size) | ||
285 | size <<= 1; | ||
286 | |||
287 | if (obj->gtt_space->size != size) | 276 | if (obj->gtt_space->size != size) |
288 | return false; | 277 | return false; |
289 | 278 | ||
@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
368 | 357 | ||
369 | obj->map_and_fenceable = | 358 | obj->map_and_fenceable = |
370 | obj->gtt_space == NULL || | 359 | obj->gtt_space == NULL || |
371 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 360 | (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && |
372 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 361 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
373 | 362 | ||
374 | /* Rebind if we need a change of alignment */ | 363 | /* Rebind if we need a change of alignment */ |
375 | if (!obj->map_and_fenceable) { | 364 | if (!obj->map_and_fenceable) { |
376 | u32 unfenced_alignment = | 365 | u32 unfenced_alignment = |
377 | i915_gem_get_unfenced_gtt_alignment(dev, | 366 | i915_gem_get_gtt_alignment(dev, obj->base.size, |
378 | obj->base.size, | 367 | args->tiling_mode, |
379 | args->tiling_mode); | 368 | false); |
380 | if (obj->gtt_offset & (unfenced_alignment - 1)) | 369 | if (obj->gtt_offset & (unfenced_alignment - 1)) |
381 | ret = i915_gem_object_unbind(obj); | 370 | ret = i915_gem_object_unbind(obj); |
382 | } | 371 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6689a61b02a3..5648d846cdbf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev, | |||
356 | 356 | ||
357 | wake_up_all(&ring->irq_queue); | 357 | wake_up_all(&ring->irq_queue); |
358 | if (i915_enable_hangcheck) { | 358 | if (i915_enable_hangcheck) { |
359 | dev_priv->hangcheck_count = 0; | 359 | dev_priv->gpu_error.hangcheck_count = 0; |
360 | mod_timer(&dev_priv->hangcheck_timer, | 360 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
361 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 361 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
362 | } | 362 | } |
363 | } | 363 | } |
@@ -862,23 +862,60 @@ done: | |||
862 | */ | 862 | */ |
863 | static void i915_error_work_func(struct work_struct *work) | 863 | static void i915_error_work_func(struct work_struct *work) |
864 | { | 864 | { |
865 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 865 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
866 | error_work); | 866 | work); |
867 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, | ||
868 | gpu_error); | ||
867 | struct drm_device *dev = dev_priv->dev; | 869 | struct drm_device *dev = dev_priv->dev; |
870 | struct intel_ring_buffer *ring; | ||
868 | char *error_event[] = { "ERROR=1", NULL }; | 871 | char *error_event[] = { "ERROR=1", NULL }; |
869 | char *reset_event[] = { "RESET=1", NULL }; | 872 | char *reset_event[] = { "RESET=1", NULL }; |
870 | char *reset_done_event[] = { "ERROR=0", NULL }; | 873 | char *reset_done_event[] = { "ERROR=0", NULL }; |
874 | int i, ret; | ||
871 | 875 | ||
872 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 876 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
873 | 877 | ||
874 | if (atomic_read(&dev_priv->mm.wedged)) { | 878 | /* |
879 | * Note that there's only one work item which does gpu resets, so we | ||
880 | * need not worry about concurrent gpu resets potentially incrementing | ||
881 | * error->reset_counter twice. We only need to take care of another | ||
882 | * racing irq/hangcheck declaring the gpu dead for a second time. A | ||
883 | * quick check for that is good enough: schedule_work ensures the | ||
884 | * correct ordering between hang detection and this work item, and since | ||
885 | * the reset in-progress bit is only ever set by code outside of this | ||
886 | * work we don't need to worry about any other races. | ||
887 | */ | ||
888 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | ||
875 | DRM_DEBUG_DRIVER("resetting chip\n"); | 889 | DRM_DEBUG_DRIVER("resetting chip\n"); |
876 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | 890 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
877 | if (!i915_reset(dev)) { | 891 | reset_event); |
878 | atomic_set(&dev_priv->mm.wedged, 0); | 892 | |
879 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | 893 | ret = i915_reset(dev); |
894 | |||
895 | if (ret == 0) { | ||
896 | /* | ||
897 | * After all the gem state is reset, increment the reset | ||
898 | * counter and wake up everyone waiting for the reset to | ||
899 | * complete. | ||
900 | * | ||
901 | * Since unlock operations are a one-sided barrier only, | ||
902 | * we need to insert a barrier here to order any seqno | ||
903 | * updates before | ||
904 | * the counter increment. | ||
905 | */ | ||
906 | smp_mb__before_atomic_inc(); | ||
907 | atomic_inc(&dev_priv->gpu_error.reset_counter); | ||
908 | |||
909 | kobject_uevent_env(&dev->primary->kdev.kobj, | ||
910 | KOBJ_CHANGE, reset_done_event); | ||
911 | } else { | ||
912 | atomic_set(&error->reset_counter, I915_WEDGED); | ||
880 | } | 913 | } |
881 | complete_all(&dev_priv->error_completion); | 914 | |
915 | for_each_ring(ring, dev_priv, i) | ||
916 | wake_up_all(&ring->irq_queue); | ||
917 | |||
918 | wake_up_all(&dev_priv->gpu_error.reset_queue); | ||
882 | } | 919 | } |
883 | } | 920 | } |
884 | 921 | ||
@@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
939 | goto unwind; | 976 | goto unwind; |
940 | 977 | ||
941 | local_irq_save(flags); | 978 | local_irq_save(flags); |
942 | if (reloc_offset < dev_priv->mm.gtt_mappable_end && | 979 | if (reloc_offset < dev_priv->gtt.mappable_end && |
943 | src->has_global_gtt_mapping) { | 980 | src->has_global_gtt_mapping) { |
944 | void __iomem *s; | 981 | void __iomem *s; |
945 | 982 | ||
@@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, | |||
948 | * captures what the GPU read. | 985 | * captures what the GPU read. |
949 | */ | 986 | */ |
950 | 987 | ||
951 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 988 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
952 | reloc_offset); | 989 | reloc_offset); |
953 | memcpy_fromio(d, s, PAGE_SIZE); | 990 | memcpy_fromio(d, s, PAGE_SIZE); |
954 | io_mapping_unmap_atomic(s); | 991 | io_mapping_unmap_atomic(s); |
@@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1255 | unsigned long flags; | 1292 | unsigned long flags; |
1256 | int i, pipe; | 1293 | int i, pipe; |
1257 | 1294 | ||
1258 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1295 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1259 | error = dev_priv->first_error; | 1296 | error = dev_priv->gpu_error.first_error; |
1260 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1297 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
1261 | if (error) | 1298 | if (error) |
1262 | return; | 1299 | return; |
1263 | 1300 | ||
@@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1268 | return; | 1305 | return; |
1269 | } | 1306 | } |
1270 | 1307 | ||
1271 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", | 1308 | DRM_INFO("capturing error event; look for more information in" |
1309 | "/sys/kernel/debug/dri/%d/i915_error_state\n", | ||
1272 | dev->primary->index); | 1310 | dev->primary->index); |
1273 | 1311 | ||
1274 | kref_init(&error->ref); | 1312 | kref_init(&error->ref); |
@@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
1341 | error->overlay = intel_overlay_capture_error_state(dev); | 1379 | error->overlay = intel_overlay_capture_error_state(dev); |
1342 | error->display = intel_display_capture_error_state(dev); | 1380 | error->display = intel_display_capture_error_state(dev); |
1343 | 1381 | ||
1344 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1382 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1345 | if (dev_priv->first_error == NULL) { | 1383 | if (dev_priv->gpu_error.first_error == NULL) { |
1346 | dev_priv->first_error = error; | 1384 | dev_priv->gpu_error.first_error = error; |
1347 | error = NULL; | 1385 | error = NULL; |
1348 | } | 1386 | } |
1349 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1387 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
1350 | 1388 | ||
1351 | if (error) | 1389 | if (error) |
1352 | i915_error_state_free(&error->ref); | 1390 | i915_error_state_free(&error->ref); |
@@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev) | |||
1358 | struct drm_i915_error_state *error; | 1396 | struct drm_i915_error_state *error; |
1359 | unsigned long flags; | 1397 | unsigned long flags; |
1360 | 1398 | ||
1361 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1399 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1362 | error = dev_priv->first_error; | 1400 | error = dev_priv->gpu_error.first_error; |
1363 | dev_priv->first_error = NULL; | 1401 | dev_priv->gpu_error.first_error = NULL; |
1364 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1402 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
1365 | 1403 | ||
1366 | if (error) | 1404 | if (error) |
1367 | kref_put(&error->ref, i915_error_state_free); | 1405 | kref_put(&error->ref, i915_error_state_free); |
@@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged) | |||
1482 | i915_report_and_clear_eir(dev); | 1520 | i915_report_and_clear_eir(dev); |
1483 | 1521 | ||
1484 | if (wedged) { | 1522 | if (wedged) { |
1485 | INIT_COMPLETION(dev_priv->error_completion); | 1523 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
1486 | atomic_set(&dev_priv->mm.wedged, 1); | 1524 | &dev_priv->gpu_error.reset_counter); |
1487 | 1525 | ||
1488 | /* | 1526 | /* |
1489 | * Wakeup waiting processes so they don't hang | 1527 | * Wakeup waiting processes so that the reset work item |
1528 | * doesn't deadlock trying to grab various locks. | ||
1490 | */ | 1529 | */ |
1491 | for_each_ring(ring, dev_priv, i) | 1530 | for_each_ring(ring, dev_priv, i) |
1492 | wake_up_all(&ring->irq_queue); | 1531 | wake_up_all(&ring->irq_queue); |
1493 | } | 1532 | } |
1494 | 1533 | ||
1495 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1534 | queue_work(dev_priv->wq, &dev_priv->gpu_error.work); |
1496 | } | 1535 | } |
1497 | 1536 | ||
1498 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | 1537 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
@@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev) | |||
1723 | { | 1762 | { |
1724 | drm_i915_private_t *dev_priv = dev->dev_private; | 1763 | drm_i915_private_t *dev_priv = dev->dev_private; |
1725 | 1764 | ||
1726 | if (dev_priv->hangcheck_count++ > 1) { | 1765 | if (dev_priv->gpu_error.hangcheck_count++ > 1) { |
1727 | bool hung = true; | 1766 | bool hung = true; |
1728 | 1767 | ||
1729 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1768 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
@@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1782 | goto repeat; | 1821 | goto repeat; |
1783 | } | 1822 | } |
1784 | 1823 | ||
1785 | dev_priv->hangcheck_count = 0; | 1824 | dev_priv->gpu_error.hangcheck_count = 0; |
1786 | return; | 1825 | return; |
1787 | } | 1826 | } |
1788 | 1827 | ||
1789 | i915_get_extra_instdone(dev, instdone); | 1828 | i915_get_extra_instdone(dev, instdone); |
1790 | if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && | 1829 | if (memcmp(dev_priv->gpu_error.last_acthd, acthd, |
1791 | memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { | 1830 | sizeof(acthd)) == 0 && |
1831 | memcmp(dev_priv->gpu_error.prev_instdone, instdone, | ||
1832 | sizeof(instdone)) == 0) { | ||
1792 | if (i915_hangcheck_hung(dev)) | 1833 | if (i915_hangcheck_hung(dev)) |
1793 | return; | 1834 | return; |
1794 | } else { | 1835 | } else { |
1795 | dev_priv->hangcheck_count = 0; | 1836 | dev_priv->gpu_error.hangcheck_count = 0; |
1796 | 1837 | ||
1797 | memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); | 1838 | memcpy(dev_priv->gpu_error.last_acthd, acthd, |
1798 | memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); | 1839 | sizeof(acthd)); |
1840 | memcpy(dev_priv->gpu_error.prev_instdone, instdone, | ||
1841 | sizeof(instdone)); | ||
1799 | } | 1842 | } |
1800 | 1843 | ||
1801 | repeat: | 1844 | repeat: |
1802 | /* Reset timer case chip hangs without another request being added */ | 1845 | /* Reset timer case chip hangs without another request being added */ |
1803 | mod_timer(&dev_priv->hangcheck_timer, | 1846 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
1804 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 1847 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
1805 | } | 1848 | } |
1806 | 1849 | ||
@@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1892 | DE_AUX_CHANNEL_A; | 1935 | DE_AUX_CHANNEL_A; |
1893 | u32 render_irqs; | 1936 | u32 render_irqs; |
1894 | u32 hotplug_mask; | 1937 | u32 hotplug_mask; |
1938 | u32 pch_irq_mask; | ||
1895 | 1939 | ||
1896 | dev_priv->irq_mask = ~display_mask; | 1940 | dev_priv->irq_mask = ~display_mask; |
1897 | 1941 | ||
@@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1935 | SDE_AUX_MASK); | 1979 | SDE_AUX_MASK); |
1936 | } | 1980 | } |
1937 | 1981 | ||
1938 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1982 | pch_irq_mask = ~hotplug_mask; |
1939 | 1983 | ||
1940 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1984 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
1941 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | 1985 | I915_WRITE(SDEIMR, pch_irq_mask); |
1942 | I915_WRITE(SDEIER, hotplug_mask); | 1986 | I915_WRITE(SDEIER, hotplug_mask); |
1943 | POSTING_READ(SDEIER); | 1987 | POSTING_READ(SDEIER); |
1944 | 1988 | ||
@@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | |||
1966 | DE_AUX_CHANNEL_A_IVB; | 2010 | DE_AUX_CHANNEL_A_IVB; |
1967 | u32 render_irqs; | 2011 | u32 render_irqs; |
1968 | u32 hotplug_mask; | 2012 | u32 hotplug_mask; |
2013 | u32 pch_irq_mask; | ||
1969 | 2014 | ||
1970 | dev_priv->irq_mask = ~display_mask; | 2015 | dev_priv->irq_mask = ~display_mask; |
1971 | 2016 | ||
@@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | |||
1995 | SDE_PORTD_HOTPLUG_CPT | | 2040 | SDE_PORTD_HOTPLUG_CPT | |
1996 | SDE_GMBUS_CPT | | 2041 | SDE_GMBUS_CPT | |
1997 | SDE_AUX_MASK_CPT); | 2042 | SDE_AUX_MASK_CPT); |
1998 | dev_priv->pch_irq_mask = ~hotplug_mask; | 2043 | pch_irq_mask = ~hotplug_mask; |
1999 | 2044 | ||
2000 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 2045 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2001 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | 2046 | I915_WRITE(SDEIMR, pch_irq_mask); |
2002 | I915_WRITE(SDEIER, hotplug_mask); | 2047 | I915_WRITE(SDEIER, hotplug_mask); |
2003 | POSTING_READ(SDEIER); | 2048 | POSTING_READ(SDEIER); |
2004 | 2049 | ||
@@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev) | |||
2767 | struct drm_i915_private *dev_priv = dev->dev_private; | 2812 | struct drm_i915_private *dev_priv = dev->dev_private; |
2768 | 2813 | ||
2769 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 2814 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
2770 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 2815 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
2771 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | 2816 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
2772 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | 2817 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
2773 | 2818 | ||
2774 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 2819 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
2820 | i915_hangcheck_elapsed, | ||
2775 | (unsigned long) dev); | 2821 | (unsigned long) dev); |
2776 | 2822 | ||
2777 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 2823 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3b039f4268e3..e2b592a68f58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -141,9 +141,15 @@ | |||
141 | #define VGA_MSR_MEM_EN (1<<1) | 141 | #define VGA_MSR_MEM_EN (1<<1) |
142 | #define VGA_MSR_CGA_MODE (1<<0) | 142 | #define VGA_MSR_CGA_MODE (1<<0) |
143 | 143 | ||
144 | #define VGA_SR_INDEX 0x3c4 | 144 | /* |
145 | * SR01 is the only VGA register touched on non-UMS setups. | ||
146 | * VLV doesn't do UMS, so the sequencer index/data registers | ||
147 | * are the only VGA registers which need to include | ||
148 | * display_mmio_offset. | ||
149 | */ | ||
150 | #define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4) | ||
145 | #define SR01 1 | 151 | #define SR01 1 |
146 | #define VGA_SR_DATA 0x3c5 | 152 | #define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) |
147 | 153 | ||
148 | #define VGA_AR_INDEX 0x3c0 | 154 | #define VGA_AR_INDEX 0x3c0 |
149 | #define VGA_AR_VID_EN (1<<5) | 155 | #define VGA_AR_VID_EN (1<<5) |
@@ -336,17 +342,19 @@ | |||
336 | * 0x801c/3c: core clock bits | 342 | * 0x801c/3c: core clock bits |
337 | * 0x8048/68: low pass filter coefficients | 343 | * 0x8048/68: low pass filter coefficients |
338 | * 0x8100: fast clock controls | 344 | * 0x8100: fast clock controls |
345 | * | ||
346 | * DPIO is VLV only. | ||
339 | */ | 347 | */ |
340 | #define DPIO_PKT 0x2100 | 348 | #define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) |
341 | #define DPIO_RID (0<<24) | 349 | #define DPIO_RID (0<<24) |
342 | #define DPIO_OP_WRITE (1<<16) | 350 | #define DPIO_OP_WRITE (1<<16) |
343 | #define DPIO_OP_READ (0<<16) | 351 | #define DPIO_OP_READ (0<<16) |
344 | #define DPIO_PORTID (0x12<<8) | 352 | #define DPIO_PORTID (0x12<<8) |
345 | #define DPIO_BYTE (0xf<<4) | 353 | #define DPIO_BYTE (0xf<<4) |
346 | #define DPIO_BUSY (1<<0) /* status only */ | 354 | #define DPIO_BUSY (1<<0) /* status only */ |
347 | #define DPIO_DATA 0x2104 | 355 | #define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104) |
348 | #define DPIO_REG 0x2108 | 356 | #define DPIO_REG (VLV_DISPLAY_BASE + 0x2108) |
349 | #define DPIO_CTL 0x2110 | 357 | #define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) |
350 | #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ | 358 | #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ |
351 | #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ | 359 | #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ |
352 | #define DPIO_SFR_BYPASS (1<<1) | 360 | #define DPIO_SFR_BYPASS (1<<1) |
@@ -554,13 +562,13 @@ | |||
554 | #define IIR 0x020a4 | 562 | #define IIR 0x020a4 |
555 | #define IMR 0x020a8 | 563 | #define IMR 0x020a8 |
556 | #define ISR 0x020ac | 564 | #define ISR 0x020ac |
557 | #define VLV_GUNIT_CLOCK_GATE 0x182060 | 565 | #define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) |
558 | #define GCFG_DIS (1<<8) | 566 | #define GCFG_DIS (1<<8) |
559 | #define VLV_IIR_RW 0x182084 | 567 | #define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) |
560 | #define VLV_IER 0x1820a0 | 568 | #define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) |
561 | #define VLV_IIR 0x1820a4 | 569 | #define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) |
562 | #define VLV_IMR 0x1820a8 | 570 | #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) |
563 | #define VLV_ISR 0x1820ac | 571 | #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) |
564 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) | 572 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) |
565 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) | 573 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) |
566 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) | 574 | #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) |
@@ -733,6 +741,7 @@ | |||
733 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) | 741 | #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) |
734 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) | 742 | #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) |
735 | #define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ | 743 | #define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ |
744 | #define GEN7_FF_VS_REF_CNT_FFME (1 << 15) | ||
736 | #define GEN7_FF_VS_SCHED_HS1 (0x5<<12) | 745 | #define GEN7_FF_VS_SCHED_HS1 (0x5<<12) |
737 | #define GEN7_FF_VS_SCHED_HS0 (0x3<<12) | 746 | #define GEN7_FF_VS_SCHED_HS0 (0x3<<12) |
738 | #define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ | 747 | #define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ |
@@ -919,8 +928,8 @@ | |||
919 | #define VGA1_PD_P1_DIV_2 (1 << 13) | 928 | #define VGA1_PD_P1_DIV_2 (1 << 13) |
920 | #define VGA1_PD_P1_SHIFT 8 | 929 | #define VGA1_PD_P1_SHIFT 8 |
921 | #define VGA1_PD_P1_MASK (0x1f << 8) | 930 | #define VGA1_PD_P1_MASK (0x1f << 8) |
922 | #define _DPLL_A 0x06014 | 931 | #define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) |
923 | #define _DPLL_B 0x06018 | 932 | #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) |
924 | #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) | 933 | #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) |
925 | #define DPLL_VCO_ENABLE (1 << 31) | 934 | #define DPLL_VCO_ENABLE (1 << 31) |
926 | #define DPLL_DVO_HIGH_SPEED (1 << 30) | 935 | #define DPLL_DVO_HIGH_SPEED (1 << 30) |
@@ -979,7 +988,7 @@ | |||
979 | #define SDVO_MULTIPLIER_MASK 0x000000ff | 988 | #define SDVO_MULTIPLIER_MASK 0x000000ff |
980 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 | 989 | #define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
981 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 | 990 | #define SDVO_MULTIPLIER_SHIFT_VGA 0 |
982 | #define _DPLL_A_MD 0x0601c /* 965+ only */ | 991 | #define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ |
983 | /* | 992 | /* |
984 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. | 993 | * UDI pixel divider, controlling how many pixels are stuffed into a packet. |
985 | * | 994 | * |
@@ -1016,7 +1025,7 @@ | |||
1016 | */ | 1025 | */ |
1017 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f | 1026 | #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f |
1018 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 | 1027 | #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 |
1019 | #define _DPLL_B_MD 0x06020 /* 965+ only */ | 1028 | #define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ |
1020 | #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) | 1029 | #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) |
1021 | 1030 | ||
1022 | #define _FPA0 0x06040 | 1031 | #define _FPA0 0x06040 |
@@ -1159,15 +1168,15 @@ | |||
1159 | #define RAMCLK_GATE_D 0x6210 /* CRL only */ | 1168 | #define RAMCLK_GATE_D 0x6210 /* CRL only */ |
1160 | #define DEUC 0x6214 /* CRL only */ | 1169 | #define DEUC 0x6214 /* CRL only */ |
1161 | 1170 | ||
1162 | #define FW_BLC_SELF_VLV 0x6500 | 1171 | #define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) |
1163 | #define FW_CSPWRDWNEN (1<<15) | 1172 | #define FW_CSPWRDWNEN (1<<15) |
1164 | 1173 | ||
1165 | /* | 1174 | /* |
1166 | * Palette regs | 1175 | * Palette regs |
1167 | */ | 1176 | */ |
1168 | 1177 | ||
1169 | #define _PALETTE_A 0x0a000 | 1178 | #define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) |
1170 | #define _PALETTE_B 0x0a800 | 1179 | #define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) |
1171 | #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) | 1180 | #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) |
1172 | 1181 | ||
1173 | /* MCH MMIO space */ | 1182 | /* MCH MMIO space */ |
@@ -1532,26 +1541,26 @@ | |||
1532 | */ | 1541 | */ |
1533 | 1542 | ||
1534 | /* Pipe A timing regs */ | 1543 | /* Pipe A timing regs */ |
1535 | #define _HTOTAL_A 0x60000 | 1544 | #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) |
1536 | #define _HBLANK_A 0x60004 | 1545 | #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) |
1537 | #define _HSYNC_A 0x60008 | 1546 | #define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) |
1538 | #define _VTOTAL_A 0x6000c | 1547 | #define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) |
1539 | #define _VBLANK_A 0x60010 | 1548 | #define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) |
1540 | #define _VSYNC_A 0x60014 | 1549 | #define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) |
1541 | #define _PIPEASRC 0x6001c | 1550 | #define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) |
1542 | #define _BCLRPAT_A 0x60020 | 1551 | #define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) |
1543 | #define _VSYNCSHIFT_A 0x60028 | 1552 | #define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) |
1544 | 1553 | ||
1545 | /* Pipe B timing regs */ | 1554 | /* Pipe B timing regs */ |
1546 | #define _HTOTAL_B 0x61000 | 1555 | #define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) |
1547 | #define _HBLANK_B 0x61004 | 1556 | #define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) |
1548 | #define _HSYNC_B 0x61008 | 1557 | #define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) |
1549 | #define _VTOTAL_B 0x6100c | 1558 | #define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) |
1550 | #define _VBLANK_B 0x61010 | 1559 | #define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) |
1551 | #define _VSYNC_B 0x61014 | 1560 | #define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) |
1552 | #define _PIPEBSRC 0x6101c | 1561 | #define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) |
1553 | #define _BCLRPAT_B 0x61020 | 1562 | #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) |
1554 | #define _VSYNCSHIFT_B 0x61028 | 1563 | #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) |
1555 | 1564 | ||
1556 | 1565 | ||
1557 | #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) | 1566 | #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) |
@@ -1612,7 +1621,7 @@ | |||
1612 | 1621 | ||
1613 | 1622 | ||
1614 | /* Hotplug control (945+ only) */ | 1623 | /* Hotplug control (945+ only) */ |
1615 | #define PORT_HOTPLUG_EN 0x61110 | 1624 | #define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) |
1616 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) | 1625 | #define HDMIB_HOTPLUG_INT_EN (1 << 29) |
1617 | #define DPB_HOTPLUG_INT_EN (1 << 29) | 1626 | #define DPB_HOTPLUG_INT_EN (1 << 29) |
1618 | #define HDMIC_HOTPLUG_INT_EN (1 << 28) | 1627 | #define HDMIC_HOTPLUG_INT_EN (1 << 28) |
@@ -1639,7 +1648,7 @@ | |||
1639 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | 1648 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
1640 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1649 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
1641 | 1650 | ||
1642 | #define PORT_HOTPLUG_STAT 0x61114 | 1651 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
1643 | /* HDMI/DP bits are gen4+ */ | 1652 | /* HDMI/DP bits are gen4+ */ |
1644 | #define DPB_HOTPLUG_LIVE_STATUS (1 << 29) | 1653 | #define DPB_HOTPLUG_LIVE_STATUS (1 << 29) |
1645 | #define DPC_HOTPLUG_LIVE_STATUS (1 << 28) | 1654 | #define DPC_HOTPLUG_LIVE_STATUS (1 << 28) |
@@ -1858,7 +1867,7 @@ | |||
1858 | #define PP_DIVISOR 0x61210 | 1867 | #define PP_DIVISOR 0x61210 |
1859 | 1868 | ||
1860 | /* Panel fitting */ | 1869 | /* Panel fitting */ |
1861 | #define PFIT_CONTROL 0x61230 | 1870 | #define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) |
1862 | #define PFIT_ENABLE (1 << 31) | 1871 | #define PFIT_ENABLE (1 << 31) |
1863 | #define PFIT_PIPE_MASK (3 << 29) | 1872 | #define PFIT_PIPE_MASK (3 << 29) |
1864 | #define PFIT_PIPE_SHIFT 29 | 1873 | #define PFIT_PIPE_SHIFT 29 |
@@ -1876,7 +1885,7 @@ | |||
1876 | #define PFIT_SCALING_PROGRAMMED (1 << 26) | 1885 | #define PFIT_SCALING_PROGRAMMED (1 << 26) |
1877 | #define PFIT_SCALING_PILLAR (2 << 26) | 1886 | #define PFIT_SCALING_PILLAR (2 << 26) |
1878 | #define PFIT_SCALING_LETTER (3 << 26) | 1887 | #define PFIT_SCALING_LETTER (3 << 26) |
1879 | #define PFIT_PGM_RATIOS 0x61234 | 1888 | #define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) |
1880 | /* Pre-965 */ | 1889 | /* Pre-965 */ |
1881 | #define PFIT_VERT_SCALE_SHIFT 20 | 1890 | #define PFIT_VERT_SCALE_SHIFT 20 |
1882 | #define PFIT_VERT_SCALE_MASK 0xfff00000 | 1891 | #define PFIT_VERT_SCALE_MASK 0xfff00000 |
@@ -1888,7 +1897,7 @@ | |||
1888 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 | 1897 | #define PFIT_HORIZ_SCALE_SHIFT_965 0 |
1889 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff | 1898 | #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff |
1890 | 1899 | ||
1891 | #define PFIT_AUTO_RATIOS 0x61238 | 1900 | #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) |
1892 | 1901 | ||
1893 | /* Backlight control */ | 1902 | /* Backlight control */ |
1894 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1903 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
@@ -2618,10 +2627,10 @@ | |||
2618 | /* Display & cursor control */ | 2627 | /* Display & cursor control */ |
2619 | 2628 | ||
2620 | /* Pipe A */ | 2629 | /* Pipe A */ |
2621 | #define _PIPEADSL 0x70000 | 2630 | #define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) |
2622 | #define DSL_LINEMASK_GEN2 0x00000fff | 2631 | #define DSL_LINEMASK_GEN2 0x00000fff |
2623 | #define DSL_LINEMASK_GEN3 0x00001fff | 2632 | #define DSL_LINEMASK_GEN3 0x00001fff |
2624 | #define _PIPEACONF 0x70008 | 2633 | #define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) |
2625 | #define PIPECONF_ENABLE (1<<31) | 2634 | #define PIPECONF_ENABLE (1<<31) |
2626 | #define PIPECONF_DISABLE 0 | 2635 | #define PIPECONF_DISABLE 0 |
2627 | #define PIPECONF_DOUBLE_WIDE (1<<30) | 2636 | #define PIPECONF_DOUBLE_WIDE (1<<30) |
@@ -2650,6 +2659,7 @@ | |||
2650 | #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ | 2659 | #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ |
2651 | #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ | 2660 | #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ |
2652 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) | 2661 | #define PIPECONF_CXSR_DOWNCLOCK (1<<16) |
2662 | #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) | ||
2653 | #define PIPECONF_BPC_MASK (0x7 << 5) | 2663 | #define PIPECONF_BPC_MASK (0x7 << 5) |
2654 | #define PIPECONF_8BPC (0<<5) | 2664 | #define PIPECONF_8BPC (0<<5) |
2655 | #define PIPECONF_10BPC (1<<5) | 2665 | #define PIPECONF_10BPC (1<<5) |
@@ -2661,7 +2671,7 @@ | |||
2661 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) | 2671 | #define PIPECONF_DITHER_TYPE_ST1 (1<<2) |
2662 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) | 2672 | #define PIPECONF_DITHER_TYPE_ST2 (2<<2) |
2663 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) | 2673 | #define PIPECONF_DITHER_TYPE_TEMP (3<<2) |
2664 | #define _PIPEASTAT 0x70024 | 2674 | #define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) |
2665 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) | 2675 | #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) |
2666 | #define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) | 2676 | #define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) |
2667 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) | 2677 | #define PIPE_CRC_ERROR_ENABLE (1UL<<29) |
@@ -2672,7 +2682,7 @@ | |||
2672 | #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) | 2682 | #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) |
2673 | #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) | 2683 | #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) |
2674 | #define PIPE_DPST_EVENT_ENABLE (1UL<<23) | 2684 | #define PIPE_DPST_EVENT_ENABLE (1UL<<23) |
2675 | #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) | 2685 | #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) |
2676 | #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) | 2686 | #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) |
2677 | #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) | 2687 | #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) |
2678 | #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) | 2688 | #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) |
@@ -2682,7 +2692,7 @@ | |||
2682 | #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) | 2692 | #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) |
2683 | #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) | 2693 | #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) |
2684 | #define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) | 2694 | #define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) |
2685 | #define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) | 2695 | #define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) |
2686 | #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) | 2696 | #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) |
2687 | #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) | 2697 | #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) |
2688 | #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) | 2698 | #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) |
@@ -2706,7 +2716,7 @@ | |||
2706 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) | 2716 | #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) |
2707 | #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) | 2717 | #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) |
2708 | 2718 | ||
2709 | #define VLV_DPFLIPSTAT 0x70028 | 2719 | #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) |
2710 | #define PIPEB_LINE_COMPARE_INT_EN (1<<29) | 2720 | #define PIPEB_LINE_COMPARE_INT_EN (1<<29) |
2711 | #define PIPEB_HLINE_INT_EN (1<<28) | 2721 | #define PIPEB_HLINE_INT_EN (1<<28) |
2712 | #define PIPEB_VBLANK_INT_EN (1<<27) | 2722 | #define PIPEB_VBLANK_INT_EN (1<<27) |
@@ -2720,7 +2730,7 @@ | |||
2720 | #define SPRITEA_FLIPDONE_INT_EN (1<<17) | 2730 | #define SPRITEA_FLIPDONE_INT_EN (1<<17) |
2721 | #define PLANEA_FLIPDONE_INT_EN (1<<16) | 2731 | #define PLANEA_FLIPDONE_INT_EN (1<<16) |
2722 | 2732 | ||
2723 | #define DPINVGTT 0x7002c /* VLV only */ | 2733 | #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ |
2724 | #define CURSORB_INVALID_GTT_INT_EN (1<<23) | 2734 | #define CURSORB_INVALID_GTT_INT_EN (1<<23) |
2725 | #define CURSORA_INVALID_GTT_INT_EN (1<<22) | 2735 | #define CURSORA_INVALID_GTT_INT_EN (1<<22) |
2726 | #define SPRITED_INVALID_GTT_INT_EN (1<<21) | 2736 | #define SPRITED_INVALID_GTT_INT_EN (1<<21) |
@@ -2748,7 +2758,7 @@ | |||
2748 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ | 2758 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ |
2749 | #define DSPARB_AEND_SHIFT 0 | 2759 | #define DSPARB_AEND_SHIFT 0 |
2750 | 2760 | ||
2751 | #define DSPFW1 0x70034 | 2761 | #define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) |
2752 | #define DSPFW_SR_SHIFT 23 | 2762 | #define DSPFW_SR_SHIFT 23 |
2753 | #define DSPFW_SR_MASK (0x1ff<<23) | 2763 | #define DSPFW_SR_MASK (0x1ff<<23) |
2754 | #define DSPFW_CURSORB_SHIFT 16 | 2764 | #define DSPFW_CURSORB_SHIFT 16 |
@@ -2756,11 +2766,11 @@ | |||
2756 | #define DSPFW_PLANEB_SHIFT 8 | 2766 | #define DSPFW_PLANEB_SHIFT 8 |
2757 | #define DSPFW_PLANEB_MASK (0x7f<<8) | 2767 | #define DSPFW_PLANEB_MASK (0x7f<<8) |
2758 | #define DSPFW_PLANEA_MASK (0x7f) | 2768 | #define DSPFW_PLANEA_MASK (0x7f) |
2759 | #define DSPFW2 0x70038 | 2769 | #define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) |
2760 | #define DSPFW_CURSORA_MASK 0x00003f00 | 2770 | #define DSPFW_CURSORA_MASK 0x00003f00 |
2761 | #define DSPFW_CURSORA_SHIFT 8 | 2771 | #define DSPFW_CURSORA_SHIFT 8 |
2762 | #define DSPFW_PLANEC_MASK (0x7f) | 2772 | #define DSPFW_PLANEC_MASK (0x7f) |
2763 | #define DSPFW3 0x7003c | 2773 | #define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) |
2764 | #define DSPFW_HPLL_SR_EN (1<<31) | 2774 | #define DSPFW_HPLL_SR_EN (1<<31) |
2765 | #define DSPFW_CURSOR_SR_SHIFT 24 | 2775 | #define DSPFW_CURSOR_SR_SHIFT 24 |
2766 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) | 2776 | #define PINEVIEW_SELF_REFRESH_EN (1<<30) |
@@ -2772,13 +2782,13 @@ | |||
2772 | /* drain latency register values*/ | 2782 | /* drain latency register values*/ |
2773 | #define DRAIN_LATENCY_PRECISION_32 32 | 2783 | #define DRAIN_LATENCY_PRECISION_32 32 |
2774 | #define DRAIN_LATENCY_PRECISION_16 16 | 2784 | #define DRAIN_LATENCY_PRECISION_16 16 |
2775 | #define VLV_DDL1 0x70050 | 2785 | #define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) |
2776 | #define DDL_CURSORA_PRECISION_32 (1<<31) | 2786 | #define DDL_CURSORA_PRECISION_32 (1<<31) |
2777 | #define DDL_CURSORA_PRECISION_16 (0<<31) | 2787 | #define DDL_CURSORA_PRECISION_16 (0<<31) |
2778 | #define DDL_CURSORA_SHIFT 24 | 2788 | #define DDL_CURSORA_SHIFT 24 |
2779 | #define DDL_PLANEA_PRECISION_32 (1<<7) | 2789 | #define DDL_PLANEA_PRECISION_32 (1<<7) |
2780 | #define DDL_PLANEA_PRECISION_16 (0<<7) | 2790 | #define DDL_PLANEA_PRECISION_16 (0<<7) |
2781 | #define VLV_DDL2 0x70054 | 2791 | #define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) |
2782 | #define DDL_CURSORB_PRECISION_32 (1<<31) | 2792 | #define DDL_CURSORB_PRECISION_32 (1<<31) |
2783 | #define DDL_CURSORB_PRECISION_16 (0<<31) | 2793 | #define DDL_CURSORB_PRECISION_16 (0<<31) |
2784 | #define DDL_CURSORB_SHIFT 24 | 2794 | #define DDL_CURSORB_SHIFT 24 |
@@ -2922,10 +2932,10 @@ | |||
2922 | * } while (high1 != high2); | 2932 | * } while (high1 != high2); |
2923 | * frame = (high1 << 8) | low1; | 2933 | * frame = (high1 << 8) | low1; |
2924 | */ | 2934 | */ |
2925 | #define _PIPEAFRAMEHIGH 0x70040 | 2935 | #define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) |
2926 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff | 2936 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff |
2927 | #define PIPE_FRAME_HIGH_SHIFT 0 | 2937 | #define PIPE_FRAME_HIGH_SHIFT 0 |
2928 | #define _PIPEAFRAMEPIXEL 0x70044 | 2938 | #define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) |
2929 | #define PIPE_FRAME_LOW_MASK 0xff000000 | 2939 | #define PIPE_FRAME_LOW_MASK 0xff000000 |
2930 | #define PIPE_FRAME_LOW_SHIFT 24 | 2940 | #define PIPE_FRAME_LOW_SHIFT 24 |
2931 | #define PIPE_PIXEL_MASK 0x00ffffff | 2941 | #define PIPE_PIXEL_MASK 0x00ffffff |
@@ -2936,7 +2946,7 @@ | |||
2936 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) | 2946 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) |
2937 | 2947 | ||
2938 | /* Cursor A & B regs */ | 2948 | /* Cursor A & B regs */ |
2939 | #define _CURACNTR 0x70080 | 2949 | #define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) |
2940 | /* Old style CUR*CNTR flags (desktop 8xx) */ | 2950 | /* Old style CUR*CNTR flags (desktop 8xx) */ |
2941 | #define CURSOR_ENABLE 0x80000000 | 2951 | #define CURSOR_ENABLE 0x80000000 |
2942 | #define CURSOR_GAMMA_ENABLE 0x40000000 | 2952 | #define CURSOR_GAMMA_ENABLE 0x40000000 |
@@ -2957,16 +2967,16 @@ | |||
2957 | #define MCURSOR_PIPE_A 0x00 | 2967 | #define MCURSOR_PIPE_A 0x00 |
2958 | #define MCURSOR_PIPE_B (1 << 28) | 2968 | #define MCURSOR_PIPE_B (1 << 28) |
2959 | #define MCURSOR_GAMMA_ENABLE (1 << 26) | 2969 | #define MCURSOR_GAMMA_ENABLE (1 << 26) |
2960 | #define _CURABASE 0x70084 | 2970 | #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) |
2961 | #define _CURAPOS 0x70088 | 2971 | #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) |
2962 | #define CURSOR_POS_MASK 0x007FF | 2972 | #define CURSOR_POS_MASK 0x007FF |
2963 | #define CURSOR_POS_SIGN 0x8000 | 2973 | #define CURSOR_POS_SIGN 0x8000 |
2964 | #define CURSOR_X_SHIFT 0 | 2974 | #define CURSOR_X_SHIFT 0 |
2965 | #define CURSOR_Y_SHIFT 16 | 2975 | #define CURSOR_Y_SHIFT 16 |
2966 | #define CURSIZE 0x700a0 | 2976 | #define CURSIZE 0x700a0 |
2967 | #define _CURBCNTR 0x700c0 | 2977 | #define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) |
2968 | #define _CURBBASE 0x700c4 | 2978 | #define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) |
2969 | #define _CURBPOS 0x700c8 | 2979 | #define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) |
2970 | 2980 | ||
2971 | #define _CURBCNTR_IVB 0x71080 | 2981 | #define _CURBCNTR_IVB 0x71080 |
2972 | #define _CURBBASE_IVB 0x71084 | 2982 | #define _CURBBASE_IVB 0x71084 |
@@ -2981,7 +2991,7 @@ | |||
2981 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) | 2991 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) |
2982 | 2992 | ||
2983 | /* Display A control */ | 2993 | /* Display A control */ |
2984 | #define _DSPACNTR 0x70180 | 2994 | #define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) |
2985 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2995 | #define DISPLAY_PLANE_ENABLE (1<<31) |
2986 | #define DISPLAY_PLANE_DISABLE 0 | 2996 | #define DISPLAY_PLANE_DISABLE 0 |
2987 | #define DISPPLANE_GAMMA_ENABLE (1<<30) | 2997 | #define DISPPLANE_GAMMA_ENABLE (1<<30) |
@@ -3014,14 +3024,14 @@ | |||
3014 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | 3024 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
3015 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ | 3025 | #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
3016 | #define DISPPLANE_TILED (1<<10) | 3026 | #define DISPPLANE_TILED (1<<10) |
3017 | #define _DSPAADDR 0x70184 | 3027 | #define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) |
3018 | #define _DSPASTRIDE 0x70188 | 3028 | #define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) |
3019 | #define _DSPAPOS 0x7018C /* reserved */ | 3029 | #define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ |
3020 | #define _DSPASIZE 0x70190 | 3030 | #define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) |
3021 | #define _DSPASURF 0x7019C /* 965+ only */ | 3031 | #define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ |
3022 | #define _DSPATILEOFF 0x701A4 /* 965+ only */ | 3032 | #define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ |
3023 | #define _DSPAOFFSET 0x701A4 /* HSW */ | 3033 | #define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ |
3024 | #define _DSPASURFLIVE 0x701AC | 3034 | #define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) |
3025 | 3035 | ||
3026 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) | 3036 | #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) |
3027 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) | 3037 | #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) |
@@ -3042,44 +3052,44 @@ | |||
3042 | (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) | 3052 | (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) |
3043 | 3053 | ||
3044 | /* VBIOS flags */ | 3054 | /* VBIOS flags */ |
3045 | #define SWF00 0x71410 | 3055 | #define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) |
3046 | #define SWF01 0x71414 | 3056 | #define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) |
3047 | #define SWF02 0x71418 | 3057 | #define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) |
3048 | #define SWF03 0x7141c | 3058 | #define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) |
3049 | #define SWF04 0x71420 | 3059 | #define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) |
3050 | #define SWF05 0x71424 | 3060 | #define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) |
3051 | #define SWF06 0x71428 | 3061 | #define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) |
3052 | #define SWF10 0x70410 | 3062 | #define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) |
3053 | #define SWF11 0x70414 | 3063 | #define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) |
3054 | #define SWF14 0x71420 | 3064 | #define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) |
3055 | #define SWF30 0x72414 | 3065 | #define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) |
3056 | #define SWF31 0x72418 | 3066 | #define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) |
3057 | #define SWF32 0x7241c | 3067 | #define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) |
3058 | 3068 | ||
3059 | /* Pipe B */ | 3069 | /* Pipe B */ |
3060 | #define _PIPEBDSL 0x71000 | 3070 | #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) |
3061 | #define _PIPEBCONF 0x71008 | 3071 | #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) |
3062 | #define _PIPEBSTAT 0x71024 | 3072 | #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) |
3063 | #define _PIPEBFRAMEHIGH 0x71040 | 3073 | #define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) |
3064 | #define _PIPEBFRAMEPIXEL 0x71044 | 3074 | #define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) |
3065 | #define _PIPEB_FRMCOUNT_GM45 0x71040 | 3075 | #define _PIPEB_FRMCOUNT_GM45 0x71040 |
3066 | #define _PIPEB_FLIPCOUNT_GM45 0x71044 | 3076 | #define _PIPEB_FLIPCOUNT_GM45 0x71044 |
3067 | 3077 | ||
3068 | 3078 | ||
3069 | /* Display B control */ | 3079 | /* Display B control */ |
3070 | #define _DSPBCNTR 0x71180 | 3080 | #define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) |
3071 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) | 3081 | #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) |
3072 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 | 3082 | #define DISPPLANE_ALPHA_TRANS_DISABLE 0 |
3073 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 | 3083 | #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 |
3074 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) | 3084 | #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) |
3075 | #define _DSPBADDR 0x71184 | 3085 | #define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) |
3076 | #define _DSPBSTRIDE 0x71188 | 3086 | #define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) |
3077 | #define _DSPBPOS 0x7118C | 3087 | #define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) |
3078 | #define _DSPBSIZE 0x71190 | 3088 | #define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) |
3079 | #define _DSPBSURF 0x7119C | 3089 | #define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) |
3080 | #define _DSPBTILEOFF 0x711A4 | 3090 | #define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) |
3081 | #define _DSPBOFFSET 0x711A4 | 3091 | #define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) |
3082 | #define _DSPBSURFLIVE 0x711AC | 3092 | #define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) |
3083 | 3093 | ||
3084 | /* Sprite A control */ | 3094 | /* Sprite A control */ |
3085 | #define _DVSACNTR 0x72180 | 3095 | #define _DVSACNTR 0x72180 |
@@ -3228,6 +3238,8 @@ | |||
3228 | # define VGA_2X_MODE (1 << 30) | 3238 | # define VGA_2X_MODE (1 << 30) |
3229 | # define VGA_PIPE_B_SELECT (1 << 29) | 3239 | # define VGA_PIPE_B_SELECT (1 << 29) |
3230 | 3240 | ||
3241 | #define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) | ||
3242 | |||
3231 | /* Ironlake */ | 3243 | /* Ironlake */ |
3232 | 3244 | ||
3233 | #define CPU_VGACNTRL 0x41000 | 3245 | #define CPU_VGACNTRL 0x41000 |
@@ -3268,41 +3280,41 @@ | |||
3268 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff | 3280 | #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff |
3269 | 3281 | ||
3270 | 3282 | ||
3271 | #define _PIPEA_DATA_M1 0x60030 | 3283 | #define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) |
3272 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ | 3284 | #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ |
3273 | #define TU_SIZE_MASK 0x7e000000 | 3285 | #define TU_SIZE_MASK 0x7e000000 |
3274 | #define PIPE_DATA_M1_OFFSET 0 | 3286 | #define PIPE_DATA_M1_OFFSET 0 |
3275 | #define _PIPEA_DATA_N1 0x60034 | 3287 | #define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) |
3276 | #define PIPE_DATA_N1_OFFSET 0 | 3288 | #define PIPE_DATA_N1_OFFSET 0 |
3277 | 3289 | ||
3278 | #define _PIPEA_DATA_M2 0x60038 | 3290 | #define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) |
3279 | #define PIPE_DATA_M2_OFFSET 0 | 3291 | #define PIPE_DATA_M2_OFFSET 0 |
3280 | #define _PIPEA_DATA_N2 0x6003c | 3292 | #define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) |
3281 | #define PIPE_DATA_N2_OFFSET 0 | 3293 | #define PIPE_DATA_N2_OFFSET 0 |
3282 | 3294 | ||
3283 | #define _PIPEA_LINK_M1 0x60040 | 3295 | #define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) |
3284 | #define PIPE_LINK_M1_OFFSET 0 | 3296 | #define PIPE_LINK_M1_OFFSET 0 |
3285 | #define _PIPEA_LINK_N1 0x60044 | 3297 | #define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) |
3286 | #define PIPE_LINK_N1_OFFSET 0 | 3298 | #define PIPE_LINK_N1_OFFSET 0 |
3287 | 3299 | ||
3288 | #define _PIPEA_LINK_M2 0x60048 | 3300 | #define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) |
3289 | #define PIPE_LINK_M2_OFFSET 0 | 3301 | #define PIPE_LINK_M2_OFFSET 0 |
3290 | #define _PIPEA_LINK_N2 0x6004c | 3302 | #define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) |
3291 | #define PIPE_LINK_N2_OFFSET 0 | 3303 | #define PIPE_LINK_N2_OFFSET 0 |
3292 | 3304 | ||
3293 | /* PIPEB timing regs are same start from 0x61000 */ | 3305 | /* PIPEB timing regs are same start from 0x61000 */ |
3294 | 3306 | ||
3295 | #define _PIPEB_DATA_M1 0x61030 | 3307 | #define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) |
3296 | #define _PIPEB_DATA_N1 0x61034 | 3308 | #define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) |
3297 | 3309 | ||
3298 | #define _PIPEB_DATA_M2 0x61038 | 3310 | #define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) |
3299 | #define _PIPEB_DATA_N2 0x6103c | 3311 | #define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) |
3300 | 3312 | ||
3301 | #define _PIPEB_LINK_M1 0x61040 | 3313 | #define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) |
3302 | #define _PIPEB_LINK_N1 0x61044 | 3314 | #define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) |
3303 | 3315 | ||
3304 | #define _PIPEB_LINK_M2 0x61048 | 3316 | #define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) |
3305 | #define _PIPEB_LINK_N2 0x6104c | 3317 | #define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) |
3306 | 3318 | ||
3307 | #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) | 3319 | #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
3308 | #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) | 3320 | #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
@@ -3699,13 +3711,13 @@ | |||
3699 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) | 3711 | #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
3700 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) | 3712 | #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
3701 | 3713 | ||
3702 | #define VLV_VIDEO_DIP_CTL_A 0x60200 | 3714 | #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) |
3703 | #define VLV_VIDEO_DIP_DATA_A 0x60208 | 3715 | #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) |
3704 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 | 3716 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) |
3705 | 3717 | ||
3706 | #define VLV_VIDEO_DIP_CTL_B 0x61170 | 3718 | #define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) |
3707 | #define VLV_VIDEO_DIP_DATA_B 0x61174 | 3719 | #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) |
3708 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 | 3720 | #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) |
3709 | 3721 | ||
3710 | #define VLV_TVIDEO_DIP_CTL(pipe) \ | 3722 | #define VLV_TVIDEO_DIP_CTL(pipe) \ |
3711 | _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) | 3723 | _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) |
@@ -3995,17 +4007,17 @@ | |||
3995 | #define LVDS_DETECTED (1 << 1) | 4007 | #define LVDS_DETECTED (1 << 1) |
3996 | 4008 | ||
3997 | /* vlv has 2 sets of panel control regs. */ | 4009 | /* vlv has 2 sets of panel control regs. */ |
3998 | #define PIPEA_PP_STATUS 0x61200 | 4010 | #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) |
3999 | #define PIPEA_PP_CONTROL 0x61204 | 4011 | #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) |
4000 | #define PIPEA_PP_ON_DELAYS 0x61208 | 4012 | #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) |
4001 | #define PIPEA_PP_OFF_DELAYS 0x6120c | 4013 | #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) |
4002 | #define PIPEA_PP_DIVISOR 0x61210 | 4014 | #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) |
4003 | 4015 | ||
4004 | #define PIPEB_PP_STATUS 0x61300 | 4016 | #define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) |
4005 | #define PIPEB_PP_CONTROL 0x61304 | 4017 | #define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) |
4006 | #define PIPEB_PP_ON_DELAYS 0x61308 | 4018 | #define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) |
4007 | #define PIPEB_PP_OFF_DELAYS 0x6130c | 4019 | #define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) |
4008 | #define PIPEB_PP_DIVISOR 0x61310 | 4020 | #define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) |
4009 | 4021 | ||
4010 | #define PCH_PP_STATUS 0xc7200 | 4022 | #define PCH_PP_STATUS 0xc7200 |
4011 | #define PCH_PP_CONTROL 0xc7204 | 4023 | #define PCH_PP_CONTROL 0xc7204 |
@@ -4186,7 +4198,9 @@ | |||
4186 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 | 4198 | #define GEN6_RP_INTERRUPT_LIMITS 0xA014 |
4187 | #define GEN6_RPSTAT1 0xA01C | 4199 | #define GEN6_RPSTAT1 0xA01C |
4188 | #define GEN6_CAGF_SHIFT 8 | 4200 | #define GEN6_CAGF_SHIFT 8 |
4201 | #define HSW_CAGF_SHIFT 7 | ||
4189 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) | 4202 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) |
4203 | #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) | ||
4190 | #define GEN6_RP_CONTROL 0xA024 | 4204 | #define GEN6_RP_CONTROL 0xA024 |
4191 | #define GEN6_RP_MEDIA_TURBO (1<<11) | 4205 | #define GEN6_RP_MEDIA_TURBO (1<<11) |
4192 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) | 4206 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) |
@@ -4297,7 +4311,7 @@ | |||
4297 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 | 4311 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
4298 | #define DOP_CLOCK_GATING_DISABLE (1<<0) | 4312 | #define DOP_CLOCK_GATING_DISABLE (1<<0) |
4299 | 4313 | ||
4300 | #define G4X_AUD_VID_DID 0x62020 | 4314 | #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) |
4301 | #define INTEL_AUDIO_DEVCL 0x808629FB | 4315 | #define INTEL_AUDIO_DEVCL 0x808629FB |
4302 | #define INTEL_AUDIO_DEVBLC 0x80862801 | 4316 | #define INTEL_AUDIO_DEVBLC 0x80862801 |
4303 | #define INTEL_AUDIO_DEVCTG 0x80862802 | 4317 | #define INTEL_AUDIO_DEVCTG 0x80862802 |
@@ -4413,10 +4427,10 @@ | |||
4413 | #define AUDIO_CP_READY_C (1<<9) | 4427 | #define AUDIO_CP_READY_C (1<<9) |
4414 | 4428 | ||
4415 | /* HSW Power Wells */ | 4429 | /* HSW Power Wells */ |
4416 | #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ | 4430 | #define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ |
4417 | #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ | 4431 | #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ |
4418 | #define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ | 4432 | #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ |
4419 | #define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ | 4433 | #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ |
4420 | #define HSW_PWR_WELL_ENABLE (1<<31) | 4434 | #define HSW_PWR_WELL_ENABLE (1<<31) |
4421 | #define HSW_PWR_WELL_STATE (1<<30) | 4435 | #define HSW_PWR_WELL_STATE (1<<30) |
4422 | #define HSW_PWR_WELL_CTL5 0x45410 | 4436 | #define HSW_PWR_WELL_CTL5 0x45410 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 63d4d30c39de..2135f21ea458 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -29,67 +29,6 @@ | |||
29 | #include "intel_drv.h" | 29 | #include "intel_drv.h" |
30 | #include "i915_reg.h" | 30 | #include "i915_reg.h" |
31 | 31 | ||
32 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | ||
33 | { | ||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
35 | u32 dpll_reg; | ||
36 | |||
37 | /* On IVB, 3rd pipe shares PLL with another one */ | ||
38 | if (pipe > 1) | ||
39 | return false; | ||
40 | |||
41 | if (HAS_PCH_SPLIT(dev)) | ||
42 | dpll_reg = _PCH_DPLL(pipe); | ||
43 | else | ||
44 | dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; | ||
45 | |||
46 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); | ||
47 | } | ||
48 | |||
49 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | ||
50 | { | ||
51 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
52 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); | ||
53 | u32 *array; | ||
54 | int i; | ||
55 | |||
56 | if (!i915_pipe_enabled(dev, pipe)) | ||
57 | return; | ||
58 | |||
59 | if (HAS_PCH_SPLIT(dev)) | ||
60 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | ||
61 | |||
62 | if (pipe == PIPE_A) | ||
63 | array = dev_priv->regfile.save_palette_a; | ||
64 | else | ||
65 | array = dev_priv->regfile.save_palette_b; | ||
66 | |||
67 | for (i = 0; i < 256; i++) | ||
68 | array[i] = I915_READ(reg + (i << 2)); | ||
69 | } | ||
70 | |||
71 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | ||
72 | { | ||
73 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
74 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); | ||
75 | u32 *array; | ||
76 | int i; | ||
77 | |||
78 | if (!i915_pipe_enabled(dev, pipe)) | ||
79 | return; | ||
80 | |||
81 | if (HAS_PCH_SPLIT(dev)) | ||
82 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | ||
83 | |||
84 | if (pipe == PIPE_A) | ||
85 | array = dev_priv->regfile.save_palette_a; | ||
86 | else | ||
87 | array = dev_priv->regfile.save_palette_b; | ||
88 | |||
89 | for (i = 0; i < 256; i++) | ||
90 | I915_WRITE(reg + (i << 2), array[i]); | ||
91 | } | ||
92 | |||
93 | static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) | 32 | static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) |
94 | { | 33 | { |
95 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev) | |||
130 | int i; | 69 | int i; |
131 | u16 cr_index, cr_data, st01; | 70 | u16 cr_index, cr_data, st01; |
132 | 71 | ||
72 | /* VGA state */ | ||
73 | dev_priv->regfile.saveVGA0 = I915_READ(VGA0); | ||
74 | dev_priv->regfile.saveVGA1 = I915_READ(VGA1); | ||
75 | dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); | ||
76 | dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev)); | ||
77 | |||
133 | /* VGA color palette registers */ | 78 | /* VGA color palette registers */ |
134 | dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); | 79 | dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); |
135 | 80 | ||
@@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev) | |||
188 | int i; | 133 | int i; |
189 | u16 cr_index, cr_data, st01; | 134 | u16 cr_index, cr_data, st01; |
190 | 135 | ||
136 | /* VGA state */ | ||
137 | I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL); | ||
138 | |||
139 | I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); | ||
140 | I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); | ||
141 | I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); | ||
142 | POSTING_READ(VGA_PD); | ||
143 | udelay(150); | ||
144 | |||
191 | /* MSR bits */ | 145 | /* MSR bits */ |
192 | I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); | 146 | I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); |
193 | if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { | 147 | if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { |
@@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev) | |||
235 | I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); | 189 | I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); |
236 | } | 190 | } |
237 | 191 | ||
238 | static void i915_save_modeset_reg(struct drm_device *dev) | ||
239 | { | ||
240 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
241 | int i; | ||
242 | |||
243 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
244 | return; | ||
245 | |||
246 | /* Cursor state */ | ||
247 | dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); | ||
248 | dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); | ||
249 | dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); | ||
250 | dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); | ||
251 | dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); | ||
252 | dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); | ||
253 | if (IS_GEN2(dev)) | ||
254 | dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); | ||
255 | |||
256 | if (HAS_PCH_SPLIT(dev)) { | ||
257 | dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | ||
258 | dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | ||
259 | } | ||
260 | |||
261 | /* Pipe & plane A info */ | ||
262 | dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); | ||
263 | dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); | ||
264 | if (HAS_PCH_SPLIT(dev)) { | ||
265 | dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); | ||
266 | dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); | ||
267 | dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); | ||
268 | } else { | ||
269 | dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); | ||
270 | dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); | ||
271 | dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); | ||
272 | } | ||
273 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | ||
274 | dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); | ||
275 | dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); | ||
276 | dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); | ||
277 | dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); | ||
278 | dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); | ||
279 | dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); | ||
280 | dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); | ||
281 | if (!HAS_PCH_SPLIT(dev)) | ||
282 | dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); | ||
283 | |||
284 | if (HAS_PCH_SPLIT(dev)) { | ||
285 | dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); | ||
286 | dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); | ||
287 | dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); | ||
288 | dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); | ||
289 | |||
290 | dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); | ||
291 | dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); | ||
292 | |||
293 | dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); | ||
294 | dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); | ||
295 | dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); | ||
296 | |||
297 | dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); | ||
298 | dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); | ||
299 | dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); | ||
300 | dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); | ||
301 | dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); | ||
302 | dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); | ||
303 | dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); | ||
304 | } | ||
305 | |||
306 | dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); | ||
307 | dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); | ||
308 | dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); | ||
309 | dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); | ||
310 | dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); | ||
311 | if (INTEL_INFO(dev)->gen >= 4) { | ||
312 | dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); | ||
313 | dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); | ||
314 | } | ||
315 | i915_save_palette(dev, PIPE_A); | ||
316 | dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); | ||
317 | |||
318 | /* Pipe & plane B info */ | ||
319 | dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); | ||
320 | dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); | ||
321 | if (HAS_PCH_SPLIT(dev)) { | ||
322 | dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); | ||
323 | dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); | ||
324 | dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); | ||
325 | } else { | ||
326 | dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); | ||
327 | dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); | ||
328 | dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); | ||
329 | } | ||
330 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | ||
331 | dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); | ||
332 | dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); | ||
333 | dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); | ||
334 | dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); | ||
335 | dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); | ||
336 | dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); | ||
337 | dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); | ||
338 | if (!HAS_PCH_SPLIT(dev)) | ||
339 | dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); | ||
340 | |||
341 | if (HAS_PCH_SPLIT(dev)) { | ||
342 | dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); | ||
343 | dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); | ||
344 | dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); | ||
345 | dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); | ||
346 | |||
347 | dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); | ||
348 | dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); | ||
349 | |||
350 | dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); | ||
351 | dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); | ||
352 | dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); | ||
353 | |||
354 | dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); | ||
355 | dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); | ||
356 | dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); | ||
357 | dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); | ||
358 | dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); | ||
359 | dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); | ||
360 | dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); | ||
361 | } | ||
362 | |||
363 | dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); | ||
364 | dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); | ||
365 | dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); | ||
366 | dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); | ||
367 | dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); | ||
368 | if (INTEL_INFO(dev)->gen >= 4) { | ||
369 | dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); | ||
370 | dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); | ||
371 | } | ||
372 | i915_save_palette(dev, PIPE_B); | ||
373 | dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); | ||
374 | |||
375 | /* Fences */ | ||
376 | switch (INTEL_INFO(dev)->gen) { | ||
377 | case 7: | ||
378 | case 6: | ||
379 | for (i = 0; i < 16; i++) | ||
380 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
381 | break; | ||
382 | case 5: | ||
383 | case 4: | ||
384 | for (i = 0; i < 16; i++) | ||
385 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
386 | break; | ||
387 | case 3: | ||
388 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
389 | for (i = 0; i < 8; i++) | ||
390 | dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
391 | case 2: | ||
392 | for (i = 0; i < 8; i++) | ||
393 | dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | /* CRT state */ | ||
398 | if (HAS_PCH_SPLIT(dev)) | ||
399 | dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); | ||
400 | else | ||
401 | dev_priv->regfile.saveADPA = I915_READ(ADPA); | ||
402 | |||
403 | return; | ||
404 | } | ||
405 | |||
406 | static void i915_restore_modeset_reg(struct drm_device *dev) | ||
407 | { | ||
408 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
409 | int dpll_a_reg, fpa0_reg, fpa1_reg; | ||
410 | int dpll_b_reg, fpb0_reg, fpb1_reg; | ||
411 | int i; | ||
412 | |||
413 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
414 | return; | ||
415 | |||
416 | /* Fences */ | ||
417 | switch (INTEL_INFO(dev)->gen) { | ||
418 | case 7: | ||
419 | case 6: | ||
420 | for (i = 0; i < 16; i++) | ||
421 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); | ||
422 | break; | ||
423 | case 5: | ||
424 | case 4: | ||
425 | for (i = 0; i < 16; i++) | ||
426 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); | ||
427 | break; | ||
428 | case 3: | ||
429 | case 2: | ||
430 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
431 | for (i = 0; i < 8; i++) | ||
432 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); | ||
433 | for (i = 0; i < 8; i++) | ||
434 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); | ||
435 | break; | ||
436 | } | ||
437 | |||
438 | |||
439 | if (HAS_PCH_SPLIT(dev)) { | ||
440 | dpll_a_reg = _PCH_DPLL_A; | ||
441 | dpll_b_reg = _PCH_DPLL_B; | ||
442 | fpa0_reg = _PCH_FPA0; | ||
443 | fpb0_reg = _PCH_FPB0; | ||
444 | fpa1_reg = _PCH_FPA1; | ||
445 | fpb1_reg = _PCH_FPB1; | ||
446 | } else { | ||
447 | dpll_a_reg = _DPLL_A; | ||
448 | dpll_b_reg = _DPLL_B; | ||
449 | fpa0_reg = _FPA0; | ||
450 | fpb0_reg = _FPB0; | ||
451 | fpa1_reg = _FPA1; | ||
452 | fpb1_reg = _FPB1; | ||
453 | } | ||
454 | |||
455 | if (HAS_PCH_SPLIT(dev)) { | ||
456 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); | ||
457 | I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); | ||
458 | } | ||
459 | |||
460 | /* Pipe & plane A info */ | ||
461 | /* Prime the clock */ | ||
462 | if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { | ||
463 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & | ||
464 | ~DPLL_VCO_ENABLE); | ||
465 | POSTING_READ(dpll_a_reg); | ||
466 | udelay(150); | ||
467 | } | ||
468 | I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); | ||
469 | I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); | ||
470 | /* Actually enable it */ | ||
471 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); | ||
472 | POSTING_READ(dpll_a_reg); | ||
473 | udelay(150); | ||
474 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | ||
475 | I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); | ||
476 | POSTING_READ(_DPLL_A_MD); | ||
477 | } | ||
478 | udelay(150); | ||
479 | |||
480 | /* Restore mode */ | ||
481 | I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); | ||
482 | I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); | ||
483 | I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); | ||
484 | I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); | ||
485 | I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); | ||
486 | I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); | ||
487 | if (!HAS_PCH_SPLIT(dev)) | ||
488 | I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); | ||
489 | |||
490 | if (HAS_PCH_SPLIT(dev)) { | ||
491 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); | ||
492 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); | ||
493 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); | ||
494 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); | ||
495 | |||
496 | I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); | ||
497 | I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); | ||
498 | |||
499 | I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); | ||
500 | I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); | ||
501 | I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); | ||
502 | |||
503 | I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); | ||
504 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); | ||
505 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); | ||
506 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); | ||
507 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); | ||
508 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); | ||
509 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); | ||
510 | } | ||
511 | |||
512 | /* Restore plane info */ | ||
513 | I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); | ||
514 | I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); | ||
515 | I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); | ||
516 | I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); | ||
517 | I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); | ||
518 | if (INTEL_INFO(dev)->gen >= 4) { | ||
519 | I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); | ||
520 | I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); | ||
521 | } | ||
522 | |||
523 | I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); | ||
524 | |||
525 | i915_restore_palette(dev, PIPE_A); | ||
526 | /* Enable the plane */ | ||
527 | I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); | ||
528 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); | ||
529 | |||
530 | /* Pipe & plane B info */ | ||
531 | if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { | ||
532 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & | ||
533 | ~DPLL_VCO_ENABLE); | ||
534 | POSTING_READ(dpll_b_reg); | ||
535 | udelay(150); | ||
536 | } | ||
537 | I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); | ||
538 | I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); | ||
539 | /* Actually enable it */ | ||
540 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); | ||
541 | POSTING_READ(dpll_b_reg); | ||
542 | udelay(150); | ||
543 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | ||
544 | I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); | ||
545 | POSTING_READ(_DPLL_B_MD); | ||
546 | } | ||
547 | udelay(150); | ||
548 | |||
549 | /* Restore mode */ | ||
550 | I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); | ||
551 | I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); | ||
552 | I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); | ||
553 | I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); | ||
554 | I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); | ||
555 | I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); | ||
556 | if (!HAS_PCH_SPLIT(dev)) | ||
557 | I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); | ||
558 | |||
559 | if (HAS_PCH_SPLIT(dev)) { | ||
560 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); | ||
561 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); | ||
562 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); | ||
563 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); | ||
564 | |||
565 | I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); | ||
566 | I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); | ||
567 | |||
568 | I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); | ||
569 | I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); | ||
570 | I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); | ||
571 | |||
572 | I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); | ||
573 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); | ||
574 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); | ||
575 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); | ||
576 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); | ||
577 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); | ||
578 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); | ||
579 | } | ||
580 | |||
581 | /* Restore plane info */ | ||
582 | I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); | ||
583 | I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); | ||
584 | I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); | ||
585 | I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); | ||
586 | I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); | ||
587 | if (INTEL_INFO(dev)->gen >= 4) { | ||
588 | I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); | ||
589 | I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); | ||
590 | } | ||
591 | |||
592 | I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); | ||
593 | |||
594 | i915_restore_palette(dev, PIPE_B); | ||
595 | /* Enable the plane */ | ||
596 | I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); | ||
597 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); | ||
598 | |||
599 | /* Cursor state */ | ||
600 | I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); | ||
601 | I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); | ||
602 | I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); | ||
603 | I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); | ||
604 | I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); | ||
605 | I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); | ||
606 | if (IS_GEN2(dev)) | ||
607 | I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); | ||
608 | |||
609 | /* CRT state */ | ||
610 | if (HAS_PCH_SPLIT(dev)) | ||
611 | I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); | ||
612 | else | ||
613 | I915_WRITE(ADPA, dev_priv->regfile.saveADPA); | ||
614 | |||
615 | return; | ||
616 | } | ||
617 | |||
618 | static void i915_save_display(struct drm_device *dev) | 192 | static void i915_save_display(struct drm_device *dev) |
619 | { | 193 | { |
620 | struct drm_i915_private *dev_priv = dev->dev_private; | 194 | struct drm_i915_private *dev_priv = dev->dev_private; |
621 | 195 | ||
622 | /* Display arbitration control */ | 196 | /* Display arbitration control */ |
623 | dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); | 197 | if (INTEL_INFO(dev)->gen <= 4) |
198 | dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); | ||
624 | 199 | ||
625 | /* This is only meaningful in non-KMS mode */ | 200 | /* This is only meaningful in non-KMS mode */ |
626 | /* Don't regfile.save them in KMS mode */ | 201 | /* Don't regfile.save them in KMS mode */ |
627 | i915_save_modeset_reg(dev); | 202 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
203 | i915_save_display_reg(dev); | ||
628 | 204 | ||
629 | /* LVDS state */ | 205 | /* LVDS state */ |
630 | if (HAS_PCH_SPLIT(dev)) { | 206 | if (HAS_PCH_SPLIT(dev)) { |
@@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev) | |||
658 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); | 234 | dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); |
659 | } | 235 | } |
660 | 236 | ||
661 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
662 | /* Display Port state */ | ||
663 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
664 | dev_priv->regfile.saveDP_B = I915_READ(DP_B); | ||
665 | dev_priv->regfile.saveDP_C = I915_READ(DP_C); | ||
666 | dev_priv->regfile.saveDP_D = I915_READ(DP_D); | ||
667 | dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); | ||
668 | dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); | ||
669 | dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); | ||
670 | dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); | ||
671 | dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); | ||
672 | dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); | ||
673 | dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); | ||
674 | dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); | ||
675 | } | ||
676 | /* FIXME: regfile.save TV & SDVO state */ | ||
677 | } | ||
678 | |||
679 | /* Only regfile.save FBC state on the platform that supports FBC */ | 237 | /* Only regfile.save FBC state on the platform that supports FBC */ |
680 | if (I915_HAS_FBC(dev)) { | 238 | if (I915_HAS_FBC(dev)) { |
681 | if (HAS_PCH_SPLIT(dev)) { | 239 | if (HAS_PCH_SPLIT(dev)) { |
@@ -690,16 +248,8 @@ static void i915_save_display(struct drm_device *dev) | |||
690 | } | 248 | } |
691 | } | 249 | } |
692 | 250 | ||
693 | /* VGA state */ | 251 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
694 | dev_priv->regfile.saveVGA0 = I915_READ(VGA0); | 252 | i915_save_vga(dev); |
695 | dev_priv->regfile.saveVGA1 = I915_READ(VGA1); | ||
696 | dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); | ||
697 | if (HAS_PCH_SPLIT(dev)) | ||
698 | dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); | ||
699 | else | ||
700 | dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); | ||
701 | |||
702 | i915_save_vga(dev); | ||
703 | } | 253 | } |
704 | 254 | ||
705 | static void i915_restore_display(struct drm_device *dev) | 255 | static void i915_restore_display(struct drm_device *dev) |
@@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev) | |||
707 | struct drm_i915_private *dev_priv = dev->dev_private; | 257 | struct drm_i915_private *dev_priv = dev->dev_private; |
708 | 258 | ||
709 | /* Display arbitration */ | 259 | /* Display arbitration */ |
710 | I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); | 260 | if (INTEL_INFO(dev)->gen <= 4) |
261 | I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); | ||
711 | 262 | ||
712 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 263 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
713 | /* Display port ratios (must be done before clock is set) */ | 264 | i915_restore_display_reg(dev); |
714 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
715 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); | ||
716 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); | ||
717 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); | ||
718 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); | ||
719 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); | ||
720 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); | ||
721 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); | ||
722 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); | ||
723 | } | ||
724 | } | ||
725 | |||
726 | /* This is only meaningful in non-KMS mode */ | ||
727 | /* Don't restore them in KMS mode */ | ||
728 | i915_restore_modeset_reg(dev); | ||
729 | 265 | ||
730 | /* LVDS state */ | 266 | /* LVDS state */ |
731 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | 267 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) |
@@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev) | |||
763 | I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); | 299 | I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); |
764 | } | 300 | } |
765 | 301 | ||
766 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
767 | /* Display Port state */ | ||
768 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
769 | I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); | ||
770 | I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); | ||
771 | I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); | ||
772 | } | ||
773 | /* FIXME: restore TV & SDVO state */ | ||
774 | } | ||
775 | |||
776 | /* only restore FBC info on the platform that supports FBC*/ | 302 | /* only restore FBC info on the platform that supports FBC*/ |
777 | intel_disable_fbc(dev); | 303 | intel_disable_fbc(dev); |
778 | if (I915_HAS_FBC(dev)) { | 304 | if (I915_HAS_FBC(dev)) { |
@@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev) | |||
787 | I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); | 313 | I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); |
788 | } | 314 | } |
789 | } | 315 | } |
790 | /* VGA state */ | ||
791 | if (HAS_PCH_SPLIT(dev)) | ||
792 | I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); | ||
793 | else | ||
794 | I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); | ||
795 | 316 | ||
796 | I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); | 317 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
797 | I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); | 318 | i915_restore_vga(dev); |
798 | I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); | 319 | else |
799 | POSTING_READ(VGA_PD); | 320 | i915_redisable_vga(dev); |
800 | udelay(150); | ||
801 | |||
802 | i915_restore_vga(dev); | ||
803 | } | 321 | } |
804 | 322 | ||
805 | int i915_save_state(struct drm_device *dev) | 323 | int i915_save_state(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c new file mode 100644 index 000000000000..985a09716237 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ums.c | |||
@@ -0,0 +1,503 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright 2008 (c) Intel Corporation | ||
4 | * Jesse Barnes <jbarnes@virtuousgeek.org> | ||
5 | * Copyright 2013 (c) Intel Corporation | ||
6 | * Daniel Vetter <daniel.vetter@ffwll.ch> | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the | ||
10 | * "Software"), to deal in the Software without restriction, including | ||
11 | * without limitation the rights to use, copy, modify, merge, publish, | ||
12 | * distribute, sub license, and/or sell copies of the Software, and to | ||
13 | * permit persons to whom the Software is furnished to do so, subject to | ||
14 | * the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice (including the | ||
17 | * next paragraph) shall be included in all copies or substantial portions | ||
18 | * of the Software. | ||
19 | * | ||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | ||
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | ||
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | ||
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | ||
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
27 | */ | ||
28 | |||
29 | #include <drm/drmP.h> | ||
30 | #include <drm/i915_drm.h> | ||
31 | #include "intel_drv.h" | ||
32 | #include "i915_reg.h" | ||
33 | |||
34 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | ||
35 | { | ||
36 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
37 | u32 dpll_reg; | ||
38 | |||
39 | /* On IVB, 3rd pipe shares PLL with another one */ | ||
40 | if (pipe > 1) | ||
41 | return false; | ||
42 | |||
43 | if (HAS_PCH_SPLIT(dev)) | ||
44 | dpll_reg = _PCH_DPLL(pipe); | ||
45 | else | ||
46 | dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; | ||
47 | |||
48 | return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); | ||
49 | } | ||
50 | |||
51 | static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | ||
52 | { | ||
53 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
54 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); | ||
55 | u32 *array; | ||
56 | int i; | ||
57 | |||
58 | if (!i915_pipe_enabled(dev, pipe)) | ||
59 | return; | ||
60 | |||
61 | if (HAS_PCH_SPLIT(dev)) | ||
62 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | ||
63 | |||
64 | if (pipe == PIPE_A) | ||
65 | array = dev_priv->regfile.save_palette_a; | ||
66 | else | ||
67 | array = dev_priv->regfile.save_palette_b; | ||
68 | |||
69 | for (i = 0; i < 256; i++) | ||
70 | array[i] = I915_READ(reg + (i << 2)); | ||
71 | } | ||
72 | |||
73 | static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | ||
74 | { | ||
75 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
76 | unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); | ||
77 | u32 *array; | ||
78 | int i; | ||
79 | |||
80 | if (!i915_pipe_enabled(dev, pipe)) | ||
81 | return; | ||
82 | |||
83 | if (HAS_PCH_SPLIT(dev)) | ||
84 | reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; | ||
85 | |||
86 | if (pipe == PIPE_A) | ||
87 | array = dev_priv->regfile.save_palette_a; | ||
88 | else | ||
89 | array = dev_priv->regfile.save_palette_b; | ||
90 | |||
91 | for (i = 0; i < 256; i++) | ||
92 | I915_WRITE(reg + (i << 2), array[i]); | ||
93 | } | ||
94 | |||
95 | void i915_save_display_reg(struct drm_device *dev) | ||
96 | { | ||
97 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
98 | int i; | ||
99 | |||
100 | /* Cursor state */ | ||
101 | dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); | ||
102 | dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); | ||
103 | dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); | ||
104 | dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); | ||
105 | dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); | ||
106 | dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); | ||
107 | if (IS_GEN2(dev)) | ||
108 | dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); | ||
109 | |||
110 | if (HAS_PCH_SPLIT(dev)) { | ||
111 | dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | ||
112 | dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | ||
113 | } | ||
114 | |||
115 | /* Pipe & plane A info */ | ||
116 | dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); | ||
117 | dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); | ||
118 | if (HAS_PCH_SPLIT(dev)) { | ||
119 | dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); | ||
120 | dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); | ||
121 | dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); | ||
122 | } else { | ||
123 | dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); | ||
124 | dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); | ||
125 | dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); | ||
126 | } | ||
127 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | ||
128 | dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); | ||
129 | dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); | ||
130 | dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); | ||
131 | dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); | ||
132 | dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); | ||
133 | dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); | ||
134 | dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); | ||
135 | if (!HAS_PCH_SPLIT(dev)) | ||
136 | dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); | ||
137 | |||
138 | if (HAS_PCH_SPLIT(dev)) { | ||
139 | dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); | ||
140 | dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); | ||
141 | dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); | ||
142 | dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); | ||
143 | |||
144 | dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); | ||
145 | dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); | ||
146 | |||
147 | dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); | ||
148 | dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); | ||
149 | dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); | ||
150 | |||
151 | dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); | ||
152 | dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); | ||
153 | dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); | ||
154 | dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); | ||
155 | dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); | ||
156 | dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); | ||
157 | dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); | ||
158 | } | ||
159 | |||
160 | dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); | ||
161 | dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); | ||
162 | dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); | ||
163 | dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); | ||
164 | dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); | ||
165 | if (INTEL_INFO(dev)->gen >= 4) { | ||
166 | dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); | ||
167 | dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); | ||
168 | } | ||
169 | i915_save_palette(dev, PIPE_A); | ||
170 | dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); | ||
171 | |||
172 | /* Pipe & plane B info */ | ||
173 | dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); | ||
174 | dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); | ||
175 | if (HAS_PCH_SPLIT(dev)) { | ||
176 | dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); | ||
177 | dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); | ||
178 | dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); | ||
179 | } else { | ||
180 | dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); | ||
181 | dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); | ||
182 | dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); | ||
183 | } | ||
184 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) | ||
185 | dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); | ||
186 | dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); | ||
187 | dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); | ||
188 | dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); | ||
189 | dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); | ||
190 | dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); | ||
191 | dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); | ||
192 | if (!HAS_PCH_SPLIT(dev)) | ||
193 | dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); | ||
194 | |||
195 | if (HAS_PCH_SPLIT(dev)) { | ||
196 | dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); | ||
197 | dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); | ||
198 | dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); | ||
199 | dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); | ||
200 | |||
201 | dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); | ||
202 | dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); | ||
203 | |||
204 | dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); | ||
205 | dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); | ||
206 | dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); | ||
207 | |||
208 | dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); | ||
209 | dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); | ||
210 | dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); | ||
211 | dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); | ||
212 | dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); | ||
213 | dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); | ||
214 | dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); | ||
215 | } | ||
216 | |||
217 | dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); | ||
218 | dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); | ||
219 | dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); | ||
220 | dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); | ||
221 | dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); | ||
222 | if (INTEL_INFO(dev)->gen >= 4) { | ||
223 | dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); | ||
224 | dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); | ||
225 | } | ||
226 | i915_save_palette(dev, PIPE_B); | ||
227 | dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); | ||
228 | |||
229 | /* Fences */ | ||
230 | switch (INTEL_INFO(dev)->gen) { | ||
231 | case 7: | ||
232 | case 6: | ||
233 | for (i = 0; i < 16; i++) | ||
234 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
235 | break; | ||
236 | case 5: | ||
237 | case 4: | ||
238 | for (i = 0; i < 16; i++) | ||
239 | dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
240 | break; | ||
241 | case 3: | ||
242 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
243 | for (i = 0; i < 8; i++) | ||
244 | dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
245 | case 2: | ||
246 | for (i = 0; i < 8; i++) | ||
247 | dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | /* CRT state */ | ||
252 | if (HAS_PCH_SPLIT(dev)) | ||
253 | dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); | ||
254 | else | ||
255 | dev_priv->regfile.saveADPA = I915_READ(ADPA); | ||
256 | |||
257 | /* Display Port state */ | ||
258 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
259 | dev_priv->regfile.saveDP_B = I915_READ(DP_B); | ||
260 | dev_priv->regfile.saveDP_C = I915_READ(DP_C); | ||
261 | dev_priv->regfile.saveDP_D = I915_READ(DP_D); | ||
262 | dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); | ||
263 | dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); | ||
264 | dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); | ||
265 | dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); | ||
266 | dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); | ||
267 | dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); | ||
268 | dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); | ||
269 | dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); | ||
270 | } | ||
271 | /* FIXME: regfile.save TV & SDVO state */ | ||
272 | |||
273 | return; | ||
274 | } | ||
275 | |||
276 | void i915_restore_display_reg(struct drm_device *dev) | ||
277 | { | ||
278 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
279 | int dpll_a_reg, fpa0_reg, fpa1_reg; | ||
280 | int dpll_b_reg, fpb0_reg, fpb1_reg; | ||
281 | int i; | ||
282 | |||
283 | /* Display port ratios (must be done before clock is set) */ | ||
284 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
285 | I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); | ||
286 | I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); | ||
287 | I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); | ||
288 | I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); | ||
289 | I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); | ||
290 | I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); | ||
291 | I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); | ||
292 | I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); | ||
293 | } | ||
294 | |||
295 | /* Fences */ | ||
296 | switch (INTEL_INFO(dev)->gen) { | ||
297 | case 7: | ||
298 | case 6: | ||
299 | for (i = 0; i < 16; i++) | ||
300 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); | ||
301 | break; | ||
302 | case 5: | ||
303 | case 4: | ||
304 | for (i = 0; i < 16; i++) | ||
305 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); | ||
306 | break; | ||
307 | case 3: | ||
308 | case 2: | ||
309 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
310 | for (i = 0; i < 8; i++) | ||
311 | I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); | ||
312 | for (i = 0; i < 8; i++) | ||
313 | I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | |||
318 | if (HAS_PCH_SPLIT(dev)) { | ||
319 | dpll_a_reg = _PCH_DPLL_A; | ||
320 | dpll_b_reg = _PCH_DPLL_B; | ||
321 | fpa0_reg = _PCH_FPA0; | ||
322 | fpb0_reg = _PCH_FPB0; | ||
323 | fpa1_reg = _PCH_FPA1; | ||
324 | fpb1_reg = _PCH_FPB1; | ||
325 | } else { | ||
326 | dpll_a_reg = _DPLL_A; | ||
327 | dpll_b_reg = _DPLL_B; | ||
328 | fpa0_reg = _FPA0; | ||
329 | fpb0_reg = _FPB0; | ||
330 | fpa1_reg = _FPA1; | ||
331 | fpb1_reg = _FPB1; | ||
332 | } | ||
333 | |||
334 | if (HAS_PCH_SPLIT(dev)) { | ||
335 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); | ||
336 | I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); | ||
337 | } | ||
338 | |||
339 | /* Pipe & plane A info */ | ||
340 | /* Prime the clock */ | ||
341 | if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { | ||
342 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & | ||
343 | ~DPLL_VCO_ENABLE); | ||
344 | POSTING_READ(dpll_a_reg); | ||
345 | udelay(150); | ||
346 | } | ||
347 | I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); | ||
348 | I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); | ||
349 | /* Actually enable it */ | ||
350 | I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); | ||
351 | POSTING_READ(dpll_a_reg); | ||
352 | udelay(150); | ||
353 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | ||
354 | I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); | ||
355 | POSTING_READ(_DPLL_A_MD); | ||
356 | } | ||
357 | udelay(150); | ||
358 | |||
359 | /* Restore mode */ | ||
360 | I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); | ||
361 | I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); | ||
362 | I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); | ||
363 | I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); | ||
364 | I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); | ||
365 | I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); | ||
366 | if (!HAS_PCH_SPLIT(dev)) | ||
367 | I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); | ||
368 | |||
369 | if (HAS_PCH_SPLIT(dev)) { | ||
370 | I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); | ||
371 | I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); | ||
372 | I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); | ||
373 | I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); | ||
374 | |||
375 | I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); | ||
376 | I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); | ||
377 | |||
378 | I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); | ||
379 | I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); | ||
380 | I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); | ||
381 | |||
382 | I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); | ||
383 | I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); | ||
384 | I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); | ||
385 | I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); | ||
386 | I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); | ||
387 | I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); | ||
388 | I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); | ||
389 | } | ||
390 | |||
391 | /* Restore plane info */ | ||
392 | I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); | ||
393 | I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); | ||
394 | I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); | ||
395 | I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); | ||
396 | I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); | ||
397 | if (INTEL_INFO(dev)->gen >= 4) { | ||
398 | I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); | ||
399 | I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); | ||
400 | } | ||
401 | |||
402 | I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); | ||
403 | |||
404 | i915_restore_palette(dev, PIPE_A); | ||
405 | /* Enable the plane */ | ||
406 | I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); | ||
407 | I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); | ||
408 | |||
409 | /* Pipe & plane B info */ | ||
410 | if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { | ||
411 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & | ||
412 | ~DPLL_VCO_ENABLE); | ||
413 | POSTING_READ(dpll_b_reg); | ||
414 | udelay(150); | ||
415 | } | ||
416 | I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); | ||
417 | I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); | ||
418 | /* Actually enable it */ | ||
419 | I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); | ||
420 | POSTING_READ(dpll_b_reg); | ||
421 | udelay(150); | ||
422 | if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { | ||
423 | I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); | ||
424 | POSTING_READ(_DPLL_B_MD); | ||
425 | } | ||
426 | udelay(150); | ||
427 | |||
428 | /* Restore mode */ | ||
429 | I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); | ||
430 | I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); | ||
431 | I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); | ||
432 | I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); | ||
433 | I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); | ||
434 | I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); | ||
435 | if (!HAS_PCH_SPLIT(dev)) | ||
436 | I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); | ||
437 | |||
438 | if (HAS_PCH_SPLIT(dev)) { | ||
439 | I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); | ||
440 | I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); | ||
441 | I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); | ||
442 | I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); | ||
443 | |||
444 | I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); | ||
445 | I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); | ||
446 | |||
447 | I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); | ||
448 | I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); | ||
449 | I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); | ||
450 | |||
451 | I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); | ||
452 | I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); | ||
453 | I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); | ||
454 | I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); | ||
455 | I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); | ||
456 | I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); | ||
457 | I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); | ||
458 | } | ||
459 | |||
460 | /* Restore plane info */ | ||
461 | I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); | ||
462 | I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); | ||
463 | I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); | ||
464 | I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); | ||
465 | I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); | ||
466 | if (INTEL_INFO(dev)->gen >= 4) { | ||
467 | I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); | ||
468 | I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); | ||
469 | } | ||
470 | |||
471 | I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); | ||
472 | |||
473 | i915_restore_palette(dev, PIPE_B); | ||
474 | /* Enable the plane */ | ||
475 | I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); | ||
476 | I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); | ||
477 | |||
478 | /* Cursor state */ | ||
479 | I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); | ||
480 | I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); | ||
481 | I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); | ||
482 | I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); | ||
483 | I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); | ||
484 | I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); | ||
485 | if (IS_GEN2(dev)) | ||
486 | I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); | ||
487 | |||
488 | /* CRT state */ | ||
489 | if (HAS_PCH_SPLIT(dev)) | ||
490 | I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); | ||
491 | else | ||
492 | I915_WRITE(ADPA, dev_priv->regfile.saveADPA); | ||
493 | |||
494 | /* Display Port state */ | ||
495 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
496 | I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); | ||
497 | I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); | ||
498 | I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); | ||
499 | } | ||
500 | /* FIXME: restore TV & SDVO state */ | ||
501 | |||
502 | return; | ||
503 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 71a5ebad14fb..68e79f32e100 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
267 | 267 | ||
268 | crt->force_hotplug_required = 0; | 268 | crt->force_hotplug_required = 0; |
269 | 269 | ||
270 | save_adpa = adpa = I915_READ(PCH_ADPA); | 270 | save_adpa = adpa = I915_READ(crt->adpa_reg); |
271 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); | 271 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
272 | 272 | ||
273 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; | 273 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
274 | if (turn_off_dac) | 274 | if (turn_off_dac) |
275 | adpa &= ~ADPA_DAC_ENABLE; | 275 | adpa &= ~ADPA_DAC_ENABLE; |
276 | 276 | ||
277 | I915_WRITE(PCH_ADPA, adpa); | 277 | I915_WRITE(crt->adpa_reg, adpa); |
278 | 278 | ||
279 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | 279 | if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
280 | 1000)) | 280 | 1000)) |
281 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | 281 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
282 | 282 | ||
283 | if (turn_off_dac) { | 283 | if (turn_off_dac) { |
284 | I915_WRITE(PCH_ADPA, save_adpa); | 284 | I915_WRITE(crt->adpa_reg, save_adpa); |
285 | POSTING_READ(PCH_ADPA); | 285 | POSTING_READ(crt->adpa_reg); |
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | /* Check the status to see if both blue and green are on now */ | 289 | /* Check the status to see if both blue and green are on now */ |
290 | adpa = I915_READ(PCH_ADPA); | 290 | adpa = I915_READ(crt->adpa_reg); |
291 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) | 291 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
292 | ret = true; | 292 | ret = true; |
293 | else | 293 | else |
@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
300 | static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) | 300 | static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) |
301 | { | 301 | { |
302 | struct drm_device *dev = connector->dev; | 302 | struct drm_device *dev = connector->dev; |
303 | struct intel_crt *crt = intel_attached_crt(connector); | ||
303 | struct drm_i915_private *dev_priv = dev->dev_private; | 304 | struct drm_i915_private *dev_priv = dev->dev_private; |
304 | u32 adpa; | 305 | u32 adpa; |
305 | bool ret; | 306 | bool ret; |
306 | u32 save_adpa; | 307 | u32 save_adpa; |
307 | 308 | ||
308 | save_adpa = adpa = I915_READ(ADPA); | 309 | save_adpa = adpa = I915_READ(crt->adpa_reg); |
309 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); | 310 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
310 | 311 | ||
311 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; | 312 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
312 | 313 | ||
313 | I915_WRITE(ADPA, adpa); | 314 | I915_WRITE(crt->adpa_reg, adpa); |
314 | 315 | ||
315 | if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | 316 | if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
316 | 1000)) { | 317 | 1000)) { |
317 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | 318 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
318 | I915_WRITE(ADPA, save_adpa); | 319 | I915_WRITE(crt->adpa_reg, save_adpa); |
319 | } | 320 | } |
320 | 321 | ||
321 | /* Check the status to see if both blue and green are on now */ | 322 | /* Check the status to see if both blue and green are on now */ |
322 | adpa = I915_READ(ADPA); | 323 | adpa = I915_READ(crt->adpa_reg); |
323 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) | 324 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
324 | ret = true; | 325 | ret = true; |
325 | else | 326 | else |
@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector) | |||
665 | if (HAS_PCH_SPLIT(dev)) { | 666 | if (HAS_PCH_SPLIT(dev)) { |
666 | u32 adpa; | 667 | u32 adpa; |
667 | 668 | ||
668 | adpa = I915_READ(PCH_ADPA); | 669 | adpa = I915_READ(crt->adpa_reg); |
669 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 670 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
670 | adpa |= ADPA_HOTPLUG_BITS; | 671 | adpa |= ADPA_HOTPLUG_BITS; |
671 | I915_WRITE(PCH_ADPA, adpa); | 672 | I915_WRITE(crt->adpa_reg, adpa); |
672 | POSTING_READ(PCH_ADPA); | 673 | POSTING_READ(crt->adpa_reg); |
673 | 674 | ||
674 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | 675 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); |
675 | crt->force_hotplug_required = 1; | 676 | crt->force_hotplug_required = 1; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 2e904a5cd6cb..cedf4ab5ff16 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
677 | DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", | 677 | DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", |
678 | port_name(port), pipe_name(pipe)); | 678 | port_name(port), pipe_name(pipe)); |
679 | 679 | ||
680 | intel_crtc->eld_vld = false; | ||
680 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 681 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
681 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 682 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
682 | 683 | ||
@@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) | |||
987 | if (cpu_transcoder == TRANSCODER_EDP) { | 988 | if (cpu_transcoder == TRANSCODER_EDP) { |
988 | switch (pipe) { | 989 | switch (pipe) { |
989 | case PIPE_A: | 990 | case PIPE_A: |
990 | temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; | 991 | /* Can only use the always-on power well for eDP when |
992 | * not using the panel fitter, and when not using motion | ||
993 | * blur mitigation (which we don't support). */ | ||
994 | if (dev_priv->pch_pf_size) | ||
995 | temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; | ||
996 | else | ||
997 | temp |= TRANS_DDI_EDP_INPUT_A_ON; | ||
991 | break; | 998 | break; |
992 | case PIPE_B: | 999 | case PIPE_B: |
993 | temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; | 1000 | temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; |
@@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
1287 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) | 1294 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) |
1288 | { | 1295 | { |
1289 | struct drm_encoder *encoder = &intel_encoder->base; | 1296 | struct drm_encoder *encoder = &intel_encoder->base; |
1297 | struct drm_crtc *crtc = encoder->crtc; | ||
1298 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1299 | int pipe = intel_crtc->pipe; | ||
1290 | struct drm_device *dev = encoder->dev; | 1300 | struct drm_device *dev = encoder->dev; |
1291 | struct drm_i915_private *dev_priv = dev->dev_private; | 1301 | struct drm_i915_private *dev_priv = dev->dev_private; |
1292 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | 1302 | enum port port = intel_ddi_get_encoder_port(intel_encoder); |
1293 | int type = intel_encoder->type; | 1303 | int type = intel_encoder->type; |
1304 | uint32_t tmp; | ||
1294 | 1305 | ||
1295 | if (type == INTEL_OUTPUT_HDMI) { | 1306 | if (type == INTEL_OUTPUT_HDMI) { |
1296 | /* In HDMI/DVI mode, the port width, and swing/emphasis values | 1307 | /* In HDMI/DVI mode, the port width, and swing/emphasis values |
@@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
1303 | 1314 | ||
1304 | ironlake_edp_backlight_on(intel_dp); | 1315 | ironlake_edp_backlight_on(intel_dp); |
1305 | } | 1316 | } |
1317 | |||
1318 | if (intel_crtc->eld_vld) { | ||
1319 | tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | ||
1320 | tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); | ||
1321 | I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); | ||
1322 | } | ||
1306 | } | 1323 | } |
1307 | 1324 | ||
1308 | static void intel_disable_ddi(struct intel_encoder *intel_encoder) | 1325 | static void intel_disable_ddi(struct intel_encoder *intel_encoder) |
1309 | { | 1326 | { |
1310 | struct drm_encoder *encoder = &intel_encoder->base; | 1327 | struct drm_encoder *encoder = &intel_encoder->base; |
1328 | struct drm_crtc *crtc = encoder->crtc; | ||
1329 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1330 | int pipe = intel_crtc->pipe; | ||
1311 | int type = intel_encoder->type; | 1331 | int type = intel_encoder->type; |
1332 | struct drm_device *dev = encoder->dev; | ||
1333 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1334 | uint32_t tmp; | ||
1312 | 1335 | ||
1313 | if (type == INTEL_OUTPUT_EDP) { | 1336 | if (type == INTEL_OUTPUT_EDP) { |
1314 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1337 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1315 | 1338 | ||
1316 | ironlake_edp_backlight_off(intel_dp); | 1339 | ironlake_edp_backlight_off(intel_dp); |
1317 | } | 1340 | } |
1341 | |||
1342 | tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); | ||
1343 | tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); | ||
1344 | I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); | ||
1318 | } | 1345 | } |
1319 | 1346 | ||
1320 | int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) | 1347 | int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 53ec6cb0ffdb..0dfecaf599ff 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1214,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1214 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | 1214 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1215 | state = true; | 1215 | state = true; |
1216 | 1216 | ||
1217 | reg = PIPECONF(cpu_transcoder); | 1217 | if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && |
1218 | val = I915_READ(reg); | 1218 | !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { |
1219 | cur_state = !!(val & PIPECONF_ENABLE); | 1219 | cur_state = false; |
1220 | } else { | ||
1221 | reg = PIPECONF(cpu_transcoder); | ||
1222 | val = I915_READ(reg); | ||
1223 | cur_state = !!(val & PIPECONF_ENABLE); | ||
1224 | } | ||
1225 | |||
1220 | WARN(cur_state != state, | 1226 | WARN(cur_state != state, |
1221 | "pipe %c assertion failure (expected %s, current %s)\n", | 1227 | "pipe %c assertion failure (expected %s, current %s)\n", |
1222 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 1228 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
@@ -2220,8 +2226,10 @@ intel_finish_fb(struct drm_framebuffer *old_fb) | |||
2220 | bool was_interruptible = dev_priv->mm.interruptible; | 2226 | bool was_interruptible = dev_priv->mm.interruptible; |
2221 | int ret; | 2227 | int ret; |
2222 | 2228 | ||
2229 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); | ||
2230 | |||
2223 | wait_event(dev_priv->pending_flip_queue, | 2231 | wait_event(dev_priv->pending_flip_queue, |
2224 | atomic_read(&dev_priv->mm.wedged) || | 2232 | i915_reset_in_progress(&dev_priv->gpu_error) || |
2225 | atomic_read(&obj->pending_flip) == 0); | 2233 | atomic_read(&obj->pending_flip) == 0); |
2226 | 2234 | ||
2227 | /* Big Hammer, we also need to ensure that any pending | 2235 | /* Big Hammer, we also need to ensure that any pending |
@@ -2869,7 +2877,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | |||
2869 | unsigned long flags; | 2877 | unsigned long flags; |
2870 | bool pending; | 2878 | bool pending; |
2871 | 2879 | ||
2872 | if (atomic_read(&dev_priv->mm.wedged)) | 2880 | if (i915_reset_in_progress(&dev_priv->gpu_error)) |
2873 | return false; | 2881 | return false; |
2874 | 2882 | ||
2875 | spin_lock_irqsave(&dev->event_lock, flags); | 2883 | spin_lock_irqsave(&dev->event_lock, flags); |
@@ -2887,6 +2895,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2887 | if (crtc->fb == NULL) | 2895 | if (crtc->fb == NULL) |
2888 | return; | 2896 | return; |
2889 | 2897 | ||
2898 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); | ||
2899 | |||
2890 | wait_event(dev_priv->pending_flip_queue, | 2900 | wait_event(dev_priv->pending_flip_queue, |
2891 | !intel_crtc_has_pending_flip(crtc)); | 2901 | !intel_crtc_has_pending_flip(crtc)); |
2892 | 2902 | ||
@@ -3717,10 +3727,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
3717 | struct drm_device *dev = crtc->dev; | 3727 | struct drm_device *dev = crtc->dev; |
3718 | struct drm_connector *connector; | 3728 | struct drm_connector *connector; |
3719 | struct drm_i915_private *dev_priv = dev->dev_private; | 3729 | struct drm_i915_private *dev_priv = dev->dev_private; |
3730 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3720 | 3731 | ||
3721 | /* crtc should still be enabled when we disable it. */ | 3732 | /* crtc should still be enabled when we disable it. */ |
3722 | WARN_ON(!crtc->enabled); | 3733 | WARN_ON(!crtc->enabled); |
3723 | 3734 | ||
3735 | intel_crtc->eld_vld = false; | ||
3724 | dev_priv->display.crtc_disable(crtc); | 3736 | dev_priv->display.crtc_disable(crtc); |
3725 | intel_crtc_update_sarea(crtc, false); | 3737 | intel_crtc_update_sarea(crtc, false); |
3726 | dev_priv->display.off(crtc); | 3738 | dev_priv->display.off(crtc); |
@@ -4867,6 +4879,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
4867 | if (!has_vga) | 4879 | if (!has_vga) |
4868 | return; | 4880 | return; |
4869 | 4881 | ||
4882 | mutex_lock(&dev_priv->dpio_lock); | ||
4883 | |||
4870 | /* XXX: Rip out SDV support once Haswell ships for real. */ | 4884 | /* XXX: Rip out SDV support once Haswell ships for real. */ |
4871 | if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) | 4885 | if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) |
4872 | is_sdv = true; | 4886 | is_sdv = true; |
@@ -5009,6 +5023,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
5009 | tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); | 5023 | tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); |
5010 | tmp |= SBI_DBUFF0_ENABLE; | 5024 | tmp |= SBI_DBUFF0_ENABLE; |
5011 | intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); | 5025 | intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); |
5026 | |||
5027 | mutex_unlock(&dev_priv->dpio_lock); | ||
5012 | } | 5028 | } |
5013 | 5029 | ||
5014 | /* | 5030 | /* |
@@ -5092,6 +5108,11 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, | |||
5092 | else | 5108 | else |
5093 | val |= PIPECONF_PROGRESSIVE; | 5109 | val |= PIPECONF_PROGRESSIVE; |
5094 | 5110 | ||
5111 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) | ||
5112 | val |= PIPECONF_COLOR_RANGE_SELECT; | ||
5113 | else | ||
5114 | val &= ~PIPECONF_COLOR_RANGE_SELECT; | ||
5115 | |||
5095 | I915_WRITE(PIPECONF(pipe), val); | 5116 | I915_WRITE(PIPECONF(pipe), val); |
5096 | POSTING_READ(PIPECONF(pipe)); | 5117 | POSTING_READ(PIPECONF(pipe)); |
5097 | } | 5118 | } |
@@ -5586,6 +5607,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5586 | return fdi_config_ok ? ret : -EINVAL; | 5607 | return fdi_config_ok ? ret : -EINVAL; |
5587 | } | 5608 | } |
5588 | 5609 | ||
5610 | static void haswell_modeset_global_resources(struct drm_device *dev) | ||
5611 | { | ||
5612 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5613 | bool enable = false; | ||
5614 | struct intel_crtc *crtc; | ||
5615 | struct intel_encoder *encoder; | ||
5616 | |||
5617 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { | ||
5618 | if (crtc->pipe != PIPE_A && crtc->base.enabled) | ||
5619 | enable = true; | ||
5620 | /* XXX: Should check for edp transcoder here, but thanks to init | ||
5621 | * sequence that's not yet available. Just in case desktop eDP | ||
5622 | * on PORT D is possible on haswell, too. */ | ||
5623 | } | ||
5624 | |||
5625 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | ||
5626 | base.head) { | ||
5627 | if (encoder->type != INTEL_OUTPUT_EDP && | ||
5628 | encoder->connectors_active) | ||
5629 | enable = true; | ||
5630 | } | ||
5631 | |||
5632 | /* Even the eDP panel fitter is outside the always-on well. */ | ||
5633 | if (dev_priv->pch_pf_size) | ||
5634 | enable = true; | ||
5635 | |||
5636 | intel_set_power_well(dev, enable); | ||
5637 | } | ||
5638 | |||
5589 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, | 5639 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
5590 | struct drm_display_mode *mode, | 5640 | struct drm_display_mode *mode, |
5591 | struct drm_display_mode *adjusted_mode, | 5641 | struct drm_display_mode *adjusted_mode, |
@@ -5618,11 +5668,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
5618 | num_connectors++; | 5668 | num_connectors++; |
5619 | } | 5669 | } |
5620 | 5670 | ||
5621 | if (is_cpu_edp) | ||
5622 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | ||
5623 | else | ||
5624 | intel_crtc->cpu_transcoder = pipe; | ||
5625 | |||
5626 | /* We are not sure yet this won't happen. */ | 5671 | /* We are not sure yet this won't happen. */ |
5627 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", | 5672 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
5628 | INTEL_PCH_TYPE(dev)); | 5673 | INTEL_PCH_TYPE(dev)); |
@@ -5687,6 +5732,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5687 | int pipe = intel_crtc->pipe; | 5732 | int pipe = intel_crtc->pipe; |
5688 | int ret; | 5733 | int ret; |
5689 | 5734 | ||
5735 | if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | ||
5736 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | ||
5737 | else | ||
5738 | intel_crtc->cpu_transcoder = pipe; | ||
5739 | |||
5690 | drm_vblank_pre_modeset(dev, pipe); | 5740 | drm_vblank_pre_modeset(dev, pipe); |
5691 | 5741 | ||
5692 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, | 5742 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
@@ -5783,6 +5833,7 @@ static void haswell_write_eld(struct drm_connector *connector, | |||
5783 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 5833 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5784 | uint8_t *eld = connector->eld; | 5834 | uint8_t *eld = connector->eld; |
5785 | struct drm_device *dev = crtc->dev; | 5835 | struct drm_device *dev = crtc->dev; |
5836 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
5786 | uint32_t eldv; | 5837 | uint32_t eldv; |
5787 | uint32_t i; | 5838 | uint32_t i; |
5788 | int len; | 5839 | int len; |
@@ -5824,6 +5875,7 @@ static void haswell_write_eld(struct drm_connector *connector, | |||
5824 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); | 5875 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
5825 | 5876 | ||
5826 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); | 5877 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); |
5878 | intel_crtc->eld_vld = true; | ||
5827 | 5879 | ||
5828 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 5880 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
5829 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 5881 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
@@ -6717,11 +6769,6 @@ void intel_mark_busy(struct drm_device *dev) | |||
6717 | 6769 | ||
6718 | void intel_mark_idle(struct drm_device *dev) | 6770 | void intel_mark_idle(struct drm_device *dev) |
6719 | { | 6771 | { |
6720 | } | ||
6721 | |||
6722 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj) | ||
6723 | { | ||
6724 | struct drm_device *dev = obj->base.dev; | ||
6725 | struct drm_crtc *crtc; | 6772 | struct drm_crtc *crtc; |
6726 | 6773 | ||
6727 | if (!i915_powersave) | 6774 | if (!i915_powersave) |
@@ -6731,12 +6778,11 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj) | |||
6731 | if (!crtc->fb) | 6778 | if (!crtc->fb) |
6732 | continue; | 6779 | continue; |
6733 | 6780 | ||
6734 | if (to_intel_framebuffer(crtc->fb)->obj == obj) | 6781 | intel_decrease_pllclock(crtc); |
6735 | intel_increase_pllclock(crtc); | ||
6736 | } | 6782 | } |
6737 | } | 6783 | } |
6738 | 6784 | ||
6739 | void intel_mark_fb_idle(struct drm_i915_gem_object *obj) | 6785 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
6740 | { | 6786 | { |
6741 | struct drm_device *dev = obj->base.dev; | 6787 | struct drm_device *dev = obj->base.dev; |
6742 | struct drm_crtc *crtc; | 6788 | struct drm_crtc *crtc; |
@@ -6749,7 +6795,7 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj) | |||
6749 | continue; | 6795 | continue; |
6750 | 6796 | ||
6751 | if (to_intel_framebuffer(crtc->fb)->obj == obj) | 6797 | if (to_intel_framebuffer(crtc->fb)->obj == obj) |
6752 | intel_decrease_pllclock(crtc); | 6798 | intel_increase_pllclock(crtc); |
6753 | } | 6799 | } |
6754 | } | 6800 | } |
6755 | 6801 | ||
@@ -6833,7 +6879,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
6833 | 6879 | ||
6834 | obj = work->old_fb_obj; | 6880 | obj = work->old_fb_obj; |
6835 | 6881 | ||
6836 | wake_up(&dev_priv->pending_flip_queue); | 6882 | wake_up_all(&dev_priv->pending_flip_queue); |
6837 | 6883 | ||
6838 | queue_work(dev_priv->wq, &work->work); | 6884 | queue_work(dev_priv->wq, &work->work); |
6839 | 6885 | ||
@@ -8219,23 +8265,18 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
8219 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 8265 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
8220 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 8266 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
8221 | } else if (IS_VALLEYVIEW(dev)) { | 8267 | } else if (IS_VALLEYVIEW(dev)) { |
8222 | int found; | ||
8223 | |||
8224 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ | 8268 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ |
8225 | if (I915_READ(DP_C) & DP_DETECTED) | 8269 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
8226 | intel_dp_init(dev, DP_C, PORT_C); | 8270 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
8227 | 8271 | ||
8228 | if (I915_READ(SDVOB) & PORT_DETECTED) { | 8272 | if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { |
8229 | /* SDVOB multiplex with HDMIB */ | 8273 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); |
8230 | found = intel_sdvo_init(dev, SDVOB, true); | 8274 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) |
8231 | if (!found) | 8275 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); |
8232 | intel_hdmi_init(dev, SDVOB, PORT_B); | ||
8233 | if (!found && (I915_READ(DP_B) & DP_DETECTED)) | ||
8234 | intel_dp_init(dev, DP_B, PORT_B); | ||
8235 | } | 8276 | } |
8236 | 8277 | ||
8237 | if (I915_READ(SDVOC) & PORT_DETECTED) | 8278 | if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) |
8238 | intel_hdmi_init(dev, SDVOC, PORT_C); | 8279 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); |
8239 | 8280 | ||
8240 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 8281 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
8241 | bool found = false; | 8282 | bool found = false; |
@@ -8495,6 +8536,8 @@ static void intel_init_display(struct drm_device *dev) | |||
8495 | } else if (IS_HASWELL(dev)) { | 8536 | } else if (IS_HASWELL(dev)) { |
8496 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 8537 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
8497 | dev_priv->display.write_eld = haswell_write_eld; | 8538 | dev_priv->display.write_eld = haswell_write_eld; |
8539 | dev_priv->display.modeset_global_resources = | ||
8540 | haswell_modeset_global_resources; | ||
8498 | } | 8541 | } |
8499 | } else if (IS_G4X(dev)) { | 8542 | } else if (IS_G4X(dev)) { |
8500 | dev_priv->display.write_eld = g4x_write_eld; | 8543 | dev_priv->display.write_eld = g4x_write_eld; |
@@ -8617,6 +8660,15 @@ static struct intel_quirk intel_quirks[] = { | |||
8617 | 8660 | ||
8618 | /* Acer Aspire 5734Z must invert backlight brightness */ | 8661 | /* Acer Aspire 5734Z must invert backlight brightness */ |
8619 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, | 8662 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
8663 | |||
8664 | /* Acer/eMachines G725 */ | ||
8665 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, | ||
8666 | |||
8667 | /* Acer/eMachines e725 */ | ||
8668 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | ||
8669 | |||
8670 | /* Acer/Packard Bell NCL20 */ | ||
8671 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | ||
8620 | }; | 8672 | }; |
8621 | 8673 | ||
8622 | static void intel_init_quirks(struct drm_device *dev) | 8674 | static void intel_init_quirks(struct drm_device *dev) |
@@ -8645,12 +8697,7 @@ static void i915_disable_vga(struct drm_device *dev) | |||
8645 | { | 8697 | { |
8646 | struct drm_i915_private *dev_priv = dev->dev_private; | 8698 | struct drm_i915_private *dev_priv = dev->dev_private; |
8647 | u8 sr1; | 8699 | u8 sr1; |
8648 | u32 vga_reg; | 8700 | u32 vga_reg = i915_vgacntrl_reg(dev); |
8649 | |||
8650 | if (HAS_PCH_SPLIT(dev)) | ||
8651 | vga_reg = CPU_VGACNTRL; | ||
8652 | else | ||
8653 | vga_reg = VGACNTRL; | ||
8654 | 8701 | ||
8655 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | 8702 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
8656 | outb(SR01, VGA_SR_INDEX); | 8703 | outb(SR01, VGA_SR_INDEX); |
@@ -8665,10 +8712,7 @@ static void i915_disable_vga(struct drm_device *dev) | |||
8665 | 8712 | ||
8666 | void intel_modeset_init_hw(struct drm_device *dev) | 8713 | void intel_modeset_init_hw(struct drm_device *dev) |
8667 | { | 8714 | { |
8668 | /* We attempt to init the necessary power wells early in the initialization | 8715 | intel_init_power_well(dev); |
8669 | * time, so the subsystems that expect power to be enabled can work. | ||
8670 | */ | ||
8671 | intel_init_power_wells(dev); | ||
8672 | 8716 | ||
8673 | intel_prepare_ddi(dev); | 8717 | intel_prepare_ddi(dev); |
8674 | 8718 | ||
@@ -8710,7 +8754,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
8710 | dev->mode_config.max_width = 8192; | 8754 | dev->mode_config.max_width = 8192; |
8711 | dev->mode_config.max_height = 8192; | 8755 | dev->mode_config.max_height = 8192; |
8712 | } | 8756 | } |
8713 | dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; | 8757 | dev->mode_config.fb_base = dev_priv->gtt.mappable_base; |
8714 | 8758 | ||
8715 | DRM_DEBUG_KMS("%d display pipe%s available.\n", | 8759 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
8716 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); | 8760 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
@@ -8912,20 +8956,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
8912 | * the crtc fixup. */ | 8956 | * the crtc fixup. */ |
8913 | } | 8957 | } |
8914 | 8958 | ||
8915 | static void i915_redisable_vga(struct drm_device *dev) | 8959 | void i915_redisable_vga(struct drm_device *dev) |
8916 | { | 8960 | { |
8917 | struct drm_i915_private *dev_priv = dev->dev_private; | 8961 | struct drm_i915_private *dev_priv = dev->dev_private; |
8918 | u32 vga_reg; | 8962 | u32 vga_reg = i915_vgacntrl_reg(dev); |
8919 | |||
8920 | if (HAS_PCH_SPLIT(dev)) | ||
8921 | vga_reg = CPU_VGACNTRL; | ||
8922 | else | ||
8923 | vga_reg = VGACNTRL; | ||
8924 | 8963 | ||
8925 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { | 8964 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { |
8926 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 8965 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
8927 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | 8966 | i915_disable_vga(dev); |
8928 | POSTING_READ(vga_reg); | ||
8929 | } | 8967 | } |
8930 | } | 8968 | } |
8931 | 8969 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e64c75727702..15afcf86ad67 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -763,6 +763,22 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, | |||
763 | return false; | 763 | return false; |
764 | 764 | ||
765 | bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; | 765 | bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; |
766 | |||
767 | if (intel_dp->color_range_auto) { | ||
768 | /* | ||
769 | * See: | ||
770 | * CEA-861-E - 5.1 Default Encoding Parameters | ||
771 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry | ||
772 | */ | ||
773 | if (bpp != 18 && drm_mode_cea_vic(adjusted_mode) > 1) | ||
774 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | ||
775 | else | ||
776 | intel_dp->color_range = 0; | ||
777 | } | ||
778 | |||
779 | if (intel_dp->color_range) | ||
780 | adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; | ||
781 | |||
766 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); | 782 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); |
767 | 783 | ||
768 | for (clock = 0; clock <= max_clock; clock++) { | 784 | for (clock = 0; clock <= max_clock; clock++) { |
@@ -967,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
967 | else | 983 | else |
968 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; | 984 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
969 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { | 985 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
970 | intel_dp->DP |= intel_dp->color_range; | 986 | if (!HAS_PCH_SPLIT(dev)) |
987 | intel_dp->DP |= intel_dp->color_range; | ||
971 | 988 | ||
972 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 989 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
973 | intel_dp->DP |= DP_SYNC_HS_HIGH; | 990 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
@@ -1770,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1770 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | 1787 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
1771 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { | 1788 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1772 | case DP_TRAINING_PATTERN_DISABLE: | 1789 | case DP_TRAINING_PATTERN_DISABLE: |
1773 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; | ||
1774 | I915_WRITE(DP_TP_CTL(port), temp); | ||
1775 | 1790 | ||
1776 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & | 1791 | if (port != PORT_A) { |
1777 | DP_TP_STATUS_IDLE_DONE), 1)) | 1792 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; |
1778 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | 1793 | I915_WRITE(DP_TP_CTL(port), temp); |
1794 | |||
1795 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & | ||
1796 | DP_TP_STATUS_IDLE_DONE), 1)) | ||
1797 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); | ||
1798 | |||
1799 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
1800 | } | ||
1779 | 1801 | ||
1780 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; | ||
1781 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; | 1802 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
1782 | 1803 | ||
1783 | break; | 1804 | break; |
@@ -2276,16 +2297,17 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
2276 | { | 2297 | { |
2277 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 2298 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2278 | struct drm_i915_private *dev_priv = dev->dev_private; | 2299 | struct drm_i915_private *dev_priv = dev->dev_private; |
2300 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
2279 | uint32_t bit; | 2301 | uint32_t bit; |
2280 | 2302 | ||
2281 | switch (intel_dp->output_reg) { | 2303 | switch (intel_dig_port->port) { |
2282 | case DP_B: | 2304 | case PORT_B: |
2283 | bit = DPB_HOTPLUG_LIVE_STATUS; | 2305 | bit = DPB_HOTPLUG_LIVE_STATUS; |
2284 | break; | 2306 | break; |
2285 | case DP_C: | 2307 | case PORT_C: |
2286 | bit = DPC_HOTPLUG_LIVE_STATUS; | 2308 | bit = DPC_HOTPLUG_LIVE_STATUS; |
2287 | break; | 2309 | break; |
2288 | case DP_D: | 2310 | case PORT_D: |
2289 | bit = DPD_HOTPLUG_LIVE_STATUS; | 2311 | bit = DPD_HOTPLUG_LIVE_STATUS; |
2290 | break; | 2312 | break; |
2291 | default: | 2313 | default: |
@@ -2459,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector, | |||
2459 | } | 2481 | } |
2460 | 2482 | ||
2461 | if (property == dev_priv->broadcast_rgb_property) { | 2483 | if (property == dev_priv->broadcast_rgb_property) { |
2462 | if (val == !!intel_dp->color_range) | 2484 | switch (val) { |
2463 | return 0; | 2485 | case INTEL_BROADCAST_RGB_AUTO: |
2464 | 2486 | intel_dp->color_range_auto = true; | |
2465 | intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; | 2487 | break; |
2488 | case INTEL_BROADCAST_RGB_FULL: | ||
2489 | intel_dp->color_range_auto = false; | ||
2490 | intel_dp->color_range = 0; | ||
2491 | break; | ||
2492 | case INTEL_BROADCAST_RGB_LIMITED: | ||
2493 | intel_dp->color_range_auto = false; | ||
2494 | intel_dp->color_range = DP_COLOR_RANGE_16_235; | ||
2495 | break; | ||
2496 | default: | ||
2497 | return -EINVAL; | ||
2498 | } | ||
2466 | goto done; | 2499 | goto done; |
2467 | } | 2500 | } |
2468 | 2501 | ||
@@ -2603,6 +2636,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect | |||
2603 | 2636 | ||
2604 | intel_attach_force_audio_property(connector); | 2637 | intel_attach_force_audio_property(connector); |
2605 | intel_attach_broadcast_rgb_property(connector); | 2638 | intel_attach_broadcast_rgb_property(connector); |
2639 | intel_dp->color_range_auto = true; | ||
2606 | 2640 | ||
2607 | if (is_edp(intel_dp)) { | 2641 | if (is_edp(intel_dp)) { |
2608 | drm_mode_create_scaling_mode_property(connector->dev); | 2642 | drm_mode_create_scaling_mode_property(connector->dev); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 54a034c82061..13afb37d8dec 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -109,6 +109,11 @@ | |||
109 | * timings in the mode to prevent the crtc fixup from overwriting them. | 109 | * timings in the mode to prevent the crtc fixup from overwriting them. |
110 | * Currently only lvds needs that. */ | 110 | * Currently only lvds needs that. */ |
111 | #define INTEL_MODE_CRTC_TIMINGS_SET (0x20) | 111 | #define INTEL_MODE_CRTC_TIMINGS_SET (0x20) |
112 | /* | ||
113 | * Set when limited 16-235 (as opposed to full 0-255) RGB color range is | ||
114 | * to be used. | ||
115 | */ | ||
116 | #define INTEL_MODE_LIMITED_COLOR_RANGE (0x40) | ||
112 | 117 | ||
113 | static inline void | 118 | static inline void |
114 | intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, | 119 | intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, |
@@ -206,6 +211,7 @@ struct intel_crtc { | |||
206 | * some outputs connected to this crtc. | 211 | * some outputs connected to this crtc. |
207 | */ | 212 | */ |
208 | bool active; | 213 | bool active; |
214 | bool eld_vld; | ||
209 | bool primary_disabled; /* is the crtc obscured by a plane? */ | 215 | bool primary_disabled; /* is the crtc obscured by a plane? */ |
210 | bool lowfreq_avail; | 216 | bool lowfreq_avail; |
211 | struct intel_overlay *overlay; | 217 | struct intel_overlay *overlay; |
@@ -284,6 +290,9 @@ struct cxsr_latency { | |||
284 | #define DIP_LEN_AVI 13 | 290 | #define DIP_LEN_AVI 13 |
285 | #define DIP_AVI_PR_1 0 | 291 | #define DIP_AVI_PR_1 0 |
286 | #define DIP_AVI_PR_2 1 | 292 | #define DIP_AVI_PR_2 1 |
293 | #define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) | ||
294 | #define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) | ||
295 | #define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) | ||
287 | 296 | ||
288 | #define DIP_TYPE_SPD 0x83 | 297 | #define DIP_TYPE_SPD 0x83 |
289 | #define DIP_VERSION_SPD 0x1 | 298 | #define DIP_VERSION_SPD 0x1 |
@@ -338,9 +347,11 @@ struct intel_hdmi { | |||
338 | u32 sdvox_reg; | 347 | u32 sdvox_reg; |
339 | int ddc_bus; | 348 | int ddc_bus; |
340 | uint32_t color_range; | 349 | uint32_t color_range; |
350 | bool color_range_auto; | ||
341 | bool has_hdmi_sink; | 351 | bool has_hdmi_sink; |
342 | bool has_audio; | 352 | bool has_audio; |
343 | enum hdmi_force_audio force_audio; | 353 | enum hdmi_force_audio force_audio; |
354 | bool rgb_quant_range_selectable; | ||
344 | void (*write_infoframe)(struct drm_encoder *encoder, | 355 | void (*write_infoframe)(struct drm_encoder *encoder, |
345 | struct dip_infoframe *frame); | 356 | struct dip_infoframe *frame); |
346 | void (*set_infoframes)(struct drm_encoder *encoder, | 357 | void (*set_infoframes)(struct drm_encoder *encoder, |
@@ -357,6 +368,7 @@ struct intel_dp { | |||
357 | bool has_audio; | 368 | bool has_audio; |
358 | enum hdmi_force_audio force_audio; | 369 | enum hdmi_force_audio force_audio; |
359 | uint32_t color_range; | 370 | uint32_t color_range; |
371 | bool color_range_auto; | ||
360 | uint8_t link_bw; | 372 | uint8_t link_bw; |
361 | uint8_t lane_count; | 373 | uint8_t lane_count; |
362 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 374 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
@@ -440,9 +452,8 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, | |||
440 | extern void intel_dvo_init(struct drm_device *dev); | 452 | extern void intel_dvo_init(struct drm_device *dev); |
441 | extern void intel_tv_init(struct drm_device *dev); | 453 | extern void intel_tv_init(struct drm_device *dev); |
442 | extern void intel_mark_busy(struct drm_device *dev); | 454 | extern void intel_mark_busy(struct drm_device *dev); |
443 | extern void intel_mark_idle(struct drm_device *dev); | ||
444 | extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); | 455 | extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); |
445 | extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); | 456 | extern void intel_mark_idle(struct drm_device *dev); |
446 | extern bool intel_lvds_init(struct drm_device *dev); | 457 | extern bool intel_lvds_init(struct drm_device *dev); |
447 | extern bool intel_is_dual_link_lvds(struct drm_device *dev); | 458 | extern bool intel_is_dual_link_lvds(struct drm_device *dev); |
448 | extern void intel_dp_init(struct drm_device *dev, int output_reg, | 459 | extern void intel_dp_init(struct drm_device *dev, int output_reg, |
@@ -655,7 +666,8 @@ extern void intel_update_fbc(struct drm_device *dev); | |||
655 | extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 666 | extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
656 | extern void intel_gpu_ips_teardown(void); | 667 | extern void intel_gpu_ips_teardown(void); |
657 | 668 | ||
658 | extern void intel_init_power_wells(struct drm_device *dev); | 669 | extern void intel_init_power_well(struct drm_device *dev); |
670 | extern void intel_set_power_well(struct drm_device *dev, bool enable); | ||
659 | extern void intel_enable_gt_powersave(struct drm_device *dev); | 671 | extern void intel_enable_gt_powersave(struct drm_device *dev); |
660 | extern void intel_disable_gt_powersave(struct drm_device *dev); | 672 | extern void intel_disable_gt_powersave(struct drm_device *dev); |
661 | extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); | 673 | extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 755c27450a2c..1c510da04d16 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -135,14 +135,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev, | |||
135 | goto out_unpin; | 135 | goto out_unpin; |
136 | } | 136 | } |
137 | info->apertures->ranges[0].base = dev->mode_config.fb_base; | 137 | info->apertures->ranges[0].base = dev->mode_config.fb_base; |
138 | info->apertures->ranges[0].size = | 138 | info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; |
139 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | ||
140 | 139 | ||
141 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; | 140 | info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
142 | info->fix.smem_len = size; | 141 | info->fix.smem_len = size; |
143 | 142 | ||
144 | info->screen_base = | 143 | info->screen_base = |
145 | ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset, | 144 | ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, |
146 | size); | 145 | size); |
147 | if (!info->screen_base) { | 146 | if (!info->screen_base) { |
148 | ret = -ENOSPC; | 147 | ret = -ENOSPC; |
@@ -306,7 +305,8 @@ void intel_fb_restore_mode(struct drm_device *dev) | |||
306 | 305 | ||
307 | /* Be sure to shut off any planes that may be active */ | 306 | /* Be sure to shut off any planes that may be active */ |
308 | list_for_each_entry(plane, &config->plane_list, head) | 307 | list_for_each_entry(plane, &config->plane_list, head) |
309 | plane->funcs->disable_plane(plane); | 308 | if (plane->enabled) |
309 | plane->funcs->disable_plane(plane); | ||
310 | 310 | ||
311 | drm_modeset_unlock_all(dev); | 311 | drm_modeset_unlock_all(dev); |
312 | } | 312 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6387f9b0df99..5b4efd64c2f9 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder, | |||
331 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | 331 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, |
332 | struct drm_display_mode *adjusted_mode) | 332 | struct drm_display_mode *adjusted_mode) |
333 | { | 333 | { |
334 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
334 | struct dip_infoframe avi_if = { | 335 | struct dip_infoframe avi_if = { |
335 | .type = DIP_TYPE_AVI, | 336 | .type = DIP_TYPE_AVI, |
336 | .ver = DIP_VERSION_AVI, | 337 | .ver = DIP_VERSION_AVI, |
@@ -340,6 +341,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | |||
340 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) | 341 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
341 | avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; | 342 | avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; |
342 | 343 | ||
344 | if (intel_hdmi->rgb_quant_range_selectable) { | ||
345 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) | ||
346 | avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; | ||
347 | else | ||
348 | avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; | ||
349 | } | ||
350 | |||
343 | avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); | 351 | avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); |
344 | 352 | ||
345 | intel_set_infoframe(encoder, &avi_if); | 353 | intel_set_infoframe(encoder, &avi_if); |
@@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
364 | struct drm_display_mode *adjusted_mode) | 372 | struct drm_display_mode *adjusted_mode) |
365 | { | 373 | { |
366 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 374 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
367 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 375 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
376 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | ||
368 | u32 reg = VIDEO_DIP_CTL; | 377 | u32 reg = VIDEO_DIP_CTL; |
369 | u32 val = I915_READ(reg); | 378 | u32 val = I915_READ(reg); |
370 | u32 port; | 379 | u32 port; |
@@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, | |||
391 | return; | 400 | return; |
392 | } | 401 | } |
393 | 402 | ||
394 | switch (intel_hdmi->sdvox_reg) { | 403 | switch (intel_dig_port->port) { |
395 | case SDVOB: | 404 | case PORT_B: |
396 | port = VIDEO_DIP_PORT_B; | 405 | port = VIDEO_DIP_PORT_B; |
397 | break; | 406 | break; |
398 | case SDVOC: | 407 | case PORT_C: |
399 | port = VIDEO_DIP_PORT_C; | 408 | port = VIDEO_DIP_PORT_C; |
400 | break; | 409 | break; |
401 | default: | 410 | default: |
@@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, | |||
428 | { | 437 | { |
429 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 438 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
430 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 439 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
431 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 440 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
441 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | ||
432 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 442 | u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
433 | u32 val = I915_READ(reg); | 443 | u32 val = I915_READ(reg); |
434 | u32 port; | 444 | u32 port; |
@@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, | |||
447 | return; | 457 | return; |
448 | } | 458 | } |
449 | 459 | ||
450 | switch (intel_hdmi->sdvox_reg) { | 460 | switch (intel_dig_port->port) { |
451 | case HDMIB: | 461 | case PORT_B: |
452 | port = VIDEO_DIP_PORT_B; | 462 | port = VIDEO_DIP_PORT_B; |
453 | break; | 463 | break; |
454 | case HDMIC: | 464 | case PORT_C: |
455 | port = VIDEO_DIP_PORT_C; | 465 | port = VIDEO_DIP_PORT_C; |
456 | break; | 466 | break; |
457 | case HDMID: | 467 | case PORT_D: |
458 | port = VIDEO_DIP_PORT_D; | 468 | port = VIDEO_DIP_PORT_D; |
459 | break; | 469 | break; |
460 | default: | 470 | default: |
@@ -766,6 +776,20 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
766 | const struct drm_display_mode *mode, | 776 | const struct drm_display_mode *mode, |
767 | struct drm_display_mode *adjusted_mode) | 777 | struct drm_display_mode *adjusted_mode) |
768 | { | 778 | { |
779 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
780 | |||
781 | if (intel_hdmi->color_range_auto) { | ||
782 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ | ||
783 | if (intel_hdmi->has_hdmi_sink && | ||
784 | drm_mode_cea_vic(adjusted_mode) > 1) | ||
785 | intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; | ||
786 | else | ||
787 | intel_hdmi->color_range = 0; | ||
788 | } | ||
789 | |||
790 | if (intel_hdmi->color_range) | ||
791 | adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; | ||
792 | |||
769 | return true; | 793 | return true; |
770 | } | 794 | } |
771 | 795 | ||
@@ -773,13 +797,14 @@ static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) | |||
773 | { | 797 | { |
774 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); | 798 | struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
775 | struct drm_i915_private *dev_priv = dev->dev_private; | 799 | struct drm_i915_private *dev_priv = dev->dev_private; |
800 | struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); | ||
776 | uint32_t bit; | 801 | uint32_t bit; |
777 | 802 | ||
778 | switch (intel_hdmi->sdvox_reg) { | 803 | switch (intel_dig_port->port) { |
779 | case SDVOB: | 804 | case PORT_B: |
780 | bit = HDMIB_HOTPLUG_LIVE_STATUS; | 805 | bit = HDMIB_HOTPLUG_LIVE_STATUS; |
781 | break; | 806 | break; |
782 | case SDVOC: | 807 | case PORT_C: |
783 | bit = HDMIC_HOTPLUG_LIVE_STATUS; | 808 | bit = HDMIC_HOTPLUG_LIVE_STATUS; |
784 | break; | 809 | break; |
785 | default: | 810 | default: |
@@ -811,6 +836,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
811 | 836 | ||
812 | intel_hdmi->has_hdmi_sink = false; | 837 | intel_hdmi->has_hdmi_sink = false; |
813 | intel_hdmi->has_audio = false; | 838 | intel_hdmi->has_audio = false; |
839 | intel_hdmi->rgb_quant_range_selectable = false; | ||
814 | edid = drm_get_edid(connector, | 840 | edid = drm_get_edid(connector, |
815 | intel_gmbus_get_adapter(dev_priv, | 841 | intel_gmbus_get_adapter(dev_priv, |
816 | intel_hdmi->ddc_bus)); | 842 | intel_hdmi->ddc_bus)); |
@@ -822,6 +848,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
822 | intel_hdmi->has_hdmi_sink = | 848 | intel_hdmi->has_hdmi_sink = |
823 | drm_detect_hdmi_monitor(edid); | 849 | drm_detect_hdmi_monitor(edid); |
824 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); | 850 | intel_hdmi->has_audio = drm_detect_monitor_audio(edid); |
851 | intel_hdmi->rgb_quant_range_selectable = | ||
852 | drm_rgb_quant_range_selectable(edid); | ||
825 | } | 853 | } |
826 | kfree(edid); | 854 | kfree(edid); |
827 | } | 855 | } |
@@ -907,10 +935,21 @@ intel_hdmi_set_property(struct drm_connector *connector, | |||
907 | } | 935 | } |
908 | 936 | ||
909 | if (property == dev_priv->broadcast_rgb_property) { | 937 | if (property == dev_priv->broadcast_rgb_property) { |
910 | if (val == !!intel_hdmi->color_range) | 938 | switch (val) { |
911 | return 0; | 939 | case INTEL_BROADCAST_RGB_AUTO: |
912 | 940 | intel_hdmi->color_range_auto = true; | |
913 | intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; | 941 | break; |
942 | case INTEL_BROADCAST_RGB_FULL: | ||
943 | intel_hdmi->color_range_auto = false; | ||
944 | intel_hdmi->color_range = 0; | ||
945 | break; | ||
946 | case INTEL_BROADCAST_RGB_LIMITED: | ||
947 | intel_hdmi->color_range_auto = false; | ||
948 | intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; | ||
949 | break; | ||
950 | default: | ||
951 | return -EINVAL; | ||
952 | } | ||
914 | goto done; | 953 | goto done; |
915 | } | 954 | } |
916 | 955 | ||
@@ -959,6 +998,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c | |||
959 | { | 998 | { |
960 | intel_attach_force_audio_property(connector); | 999 | intel_attach_force_audio_property(connector); |
961 | intel_attach_broadcast_rgb_property(connector); | 1000 | intel_attach_broadcast_rgb_property(connector); |
1001 | intel_hdmi->color_range_auto = true; | ||
962 | } | 1002 | } |
963 | 1003 | ||
964 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 1004 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 7f0904170963..acf8aec9ada7 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -515,6 +515,8 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
515 | 515 | ||
516 | if (HAS_PCH_SPLIT(dev)) | 516 | if (HAS_PCH_SPLIT(dev)) |
517 | dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; | 517 | dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; |
518 | else if (IS_VALLEYVIEW(dev)) | ||
519 | dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; | ||
518 | else | 520 | else |
519 | dev_priv->gpio_mmio_base = 0; | 521 | dev_priv->gpio_mmio_base = 0; |
520 | 522 | ||
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 49249bb97485..0e860f39933d 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -100,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | static const struct drm_prop_enum_list broadcast_rgb_names[] = { | 102 | static const struct drm_prop_enum_list broadcast_rgb_names[] = { |
103 | { 0, "Full" }, | 103 | { INTEL_BROADCAST_RGB_AUTO, "Automatic" }, |
104 | { 1, "Limited 16:235" }, | 104 | { INTEL_BROADCAST_RGB_FULL, "Full" }, |
105 | { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | void | 108 | void |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 7741c22c934c..4d338740f2cb 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
347 | int i = 0; | 347 | int i = 0; |
348 | 348 | ||
349 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | 349 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); |
350 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) | 350 | if (!handle || acpi_bus_get_device(handle, &acpi_dev)) |
351 | return; | 351 | return; |
352 | 352 | ||
353 | if (acpi_is_video_device(acpi_dev)) | 353 | if (acpi_is_video_device(acpi_dev)) |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 1e901c3c18af..67a2501d519d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) | |||
195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; | 196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; |
197 | else | 197 | else |
198 | regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, | 198 | regs = io_mapping_map_wc(dev_priv->gtt.mappable, |
199 | overlay->reg_bo->gtt_offset); | 199 | overlay->reg_bo->gtt_offset); |
200 | 200 | ||
201 | return regs; | 201 | return regs; |
@@ -1434,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | |||
1434 | regs = (struct overlay_registers __iomem *) | 1434 | regs = (struct overlay_registers __iomem *) |
1435 | overlay->reg_bo->phys_obj->handle->vaddr; | 1435 | overlay->reg_bo->phys_obj->handle->vaddr; |
1436 | else | 1436 | else |
1437 | regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 1437 | regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
1438 | overlay->reg_bo->gtt_offset); | 1438 | overlay->reg_bo->gtt_offset); |
1439 | 1439 | ||
1440 | return regs; | 1440 | return regs; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ca9734529229..f7f67360e740 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3687,6 +3687,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) | |||
3687 | reg |= GEN7_FF_VS_SCHED_HW; | 3687 | reg |= GEN7_FF_VS_SCHED_HW; |
3688 | reg |= GEN7_FF_DS_SCHED_HW; | 3688 | reg |= GEN7_FF_DS_SCHED_HW; |
3689 | 3689 | ||
3690 | /* WaVSRefCountFullforceMissDisable */ | ||
3691 | if (IS_HASWELL(dev_priv->dev)) | ||
3692 | reg &= ~GEN7_FF_VS_REF_CNT_FFME; | ||
3693 | |||
3690 | I915_WRITE(GEN7_FF_THREAD_MODE, reg); | 3694 | I915_WRITE(GEN7_FF_THREAD_MODE, reg); |
3691 | } | 3695 | } |
3692 | 3696 | ||
@@ -4050,35 +4054,57 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4050 | dev_priv->display.init_clock_gating(dev); | 4054 | dev_priv->display.init_clock_gating(dev); |
4051 | } | 4055 | } |
4052 | 4056 | ||
4053 | /* Starting with Haswell, we have different power wells for | 4057 | void intel_set_power_well(struct drm_device *dev, bool enable) |
4054 | * different parts of the GPU. This attempts to enable them all. | ||
4055 | */ | ||
4056 | void intel_init_power_wells(struct drm_device *dev) | ||
4057 | { | 4058 | { |
4058 | struct drm_i915_private *dev_priv = dev->dev_private; | 4059 | struct drm_i915_private *dev_priv = dev->dev_private; |
4059 | unsigned long power_wells[] = { | 4060 | bool is_enabled, enable_requested; |
4060 | HSW_PWR_WELL_CTL1, | 4061 | uint32_t tmp; |
4061 | HSW_PWR_WELL_CTL2, | ||
4062 | HSW_PWR_WELL_CTL4 | ||
4063 | }; | ||
4064 | int i; | ||
4065 | 4062 | ||
4066 | if (!IS_HASWELL(dev)) | 4063 | if (!IS_HASWELL(dev)) |
4067 | return; | 4064 | return; |
4068 | 4065 | ||
4069 | mutex_lock(&dev->struct_mutex); | 4066 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
4067 | is_enabled = tmp & HSW_PWR_WELL_STATE; | ||
4068 | enable_requested = tmp & HSW_PWR_WELL_ENABLE; | ||
4070 | 4069 | ||
4071 | for (i = 0; i < ARRAY_SIZE(power_wells); i++) { | 4070 | if (enable) { |
4072 | int well = I915_READ(power_wells[i]); | 4071 | if (!enable_requested) |
4072 | I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); | ||
4073 | 4073 | ||
4074 | if ((well & HSW_PWR_WELL_STATE) == 0) { | 4074 | if (!is_enabled) { |
4075 | I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); | 4075 | DRM_DEBUG_KMS("Enabling power well\n"); |
4076 | if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) | 4076 | if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & |
4077 | DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); | 4077 | HSW_PWR_WELL_STATE), 20)) |
4078 | DRM_ERROR("Timeout enabling power well\n"); | ||
4079 | } | ||
4080 | } else { | ||
4081 | if (enable_requested) { | ||
4082 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | ||
4083 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | ||
4078 | } | 4084 | } |
4079 | } | 4085 | } |
4086 | } | ||
4080 | 4087 | ||
4081 | mutex_unlock(&dev->struct_mutex); | 4088 | /* |
4089 | * Starting with Haswell, we have a "Power Down Well" that can be turned off | ||
4090 | * when not needed anymore. We have 4 registers that can request the power well | ||
4091 | * to be enabled, and it will only be disabled if none of the registers is | ||
4092 | * requesting it to be enabled. | ||
4093 | */ | ||
4094 | void intel_init_power_well(struct drm_device *dev) | ||
4095 | { | ||
4096 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4097 | |||
4098 | if (!IS_HASWELL(dev)) | ||
4099 | return; | ||
4100 | |||
4101 | /* For now, we need the power well to be always enabled. */ | ||
4102 | intel_set_power_well(dev, true); | ||
4103 | |||
4104 | /* We're taking over the BIOS, so clear any requests made by it since | ||
4105 | * the driver is in charge now. */ | ||
4106 | if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) | ||
4107 | I915_WRITE(HSW_PWR_WELL_BIOS, 0); | ||
4082 | } | 4108 | } |
4083 | 4109 | ||
4084 | /* Set up chip specific power management-related functions */ | 4110 | /* Set up chip specific power management-related functions */ |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 59e02691baf3..dc6ae2fa1cee 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1203,7 +1203,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1203 | goto err_unpin; | 1203 | goto err_unpin; |
1204 | 1204 | ||
1205 | ring->virtual_start = | 1205 | ring->virtual_start = |
1206 | ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, | 1206 | ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, |
1207 | ring->size); | 1207 | ring->size); |
1208 | if (ring->virtual_start == NULL) { | 1208 | if (ring->virtual_start == NULL) { |
1209 | DRM_ERROR("Failed to map ringbuffer.\n"); | 1209 | DRM_ERROR("Failed to map ringbuffer.\n"); |
@@ -1223,8 +1223,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1223 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 1223 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
1224 | ring->effective_size -= 128; | 1224 | ring->effective_size -= 128; |
1225 | 1225 | ||
1226 | intel_ring_init_seqno(ring, dev_priv->last_seqno); | ||
1227 | |||
1228 | return 0; | 1226 | return 0; |
1229 | 1227 | ||
1230 | err_unmap: | 1228 | err_unmap: |
@@ -1371,7 +1369,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) | |||
1371 | 1369 | ||
1372 | msleep(1); | 1370 | msleep(1); |
1373 | 1371 | ||
1374 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | 1372 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1373 | dev_priv->mm.interruptible); | ||
1375 | if (ret) | 1374 | if (ret) |
1376 | return ret; | 1375 | return ret; |
1377 | } while (!time_after(jiffies, end)); | 1376 | } while (!time_after(jiffies, end)); |
@@ -1460,7 +1459,8 @@ int intel_ring_begin(struct intel_ring_buffer *ring, | |||
1460 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1459 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1461 | int ret; | 1460 | int ret; |
1462 | 1461 | ||
1463 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | 1462 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1463 | dev_priv->mm.interruptible); | ||
1464 | if (ret) | 1464 | if (ret) |
1465 | return ret; | 1465 | return ret; |
1466 | 1466 | ||
@@ -1491,7 +1491,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring) | |||
1491 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1491 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1492 | 1492 | ||
1493 | ring->tail &= ring->size - 1; | 1493 | ring->tail &= ring->size - 1; |
1494 | if (dev_priv->stop_rings & intel_ring_flag(ring)) | 1494 | if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) |
1495 | return; | 1495 | return; |
1496 | ring->write_tail(ring, ring->tail); | 1496 | ring->write_tail(ring, ring->tail); |
1497 | } | 1497 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 153377bed66a..f01063a2323a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -103,6 +103,7 @@ struct intel_sdvo { | |||
103 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 103 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
104 | */ | 104 | */ |
105 | uint32_t color_range; | 105 | uint32_t color_range; |
106 | bool color_range_auto; | ||
106 | 107 | ||
107 | /** | 108 | /** |
108 | * This is set if we're going to treat the device as TV-out. | 109 | * This is set if we're going to treat the device as TV-out. |
@@ -125,6 +126,7 @@ struct intel_sdvo { | |||
125 | bool is_hdmi; | 126 | bool is_hdmi; |
126 | bool has_hdmi_monitor; | 127 | bool has_hdmi_monitor; |
127 | bool has_hdmi_audio; | 128 | bool has_hdmi_audio; |
129 | bool rgb_quant_range_selectable; | ||
128 | 130 | ||
129 | /** | 131 | /** |
130 | * This is set if we detect output of sdvo device as LVDS and | 132 | * This is set if we detect output of sdvo device as LVDS and |
@@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, | |||
946 | &tx_rate, 1); | 948 | &tx_rate, 1); |
947 | } | 949 | } |
948 | 950 | ||
949 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) | 951 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, |
952 | const struct drm_display_mode *adjusted_mode) | ||
950 | { | 953 | { |
951 | struct dip_infoframe avi_if = { | 954 | struct dip_infoframe avi_if = { |
952 | .type = DIP_TYPE_AVI, | 955 | .type = DIP_TYPE_AVI, |
@@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) | |||
955 | }; | 958 | }; |
956 | uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; | 959 | uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; |
957 | 960 | ||
961 | if (intel_sdvo->rgb_quant_range_selectable) { | ||
962 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) | ||
963 | avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; | ||
964 | else | ||
965 | avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; | ||
966 | } | ||
967 | |||
958 | intel_dip_infoframe_csum(&avi_if); | 968 | intel_dip_infoframe_csum(&avi_if); |
959 | 969 | ||
960 | /* sdvo spec says that the ecc is handled by the hw, and it looks like | 970 | /* sdvo spec says that the ecc is handled by the hw, and it looks like |
@@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | |||
1064 | multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); | 1074 | multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); |
1065 | intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); | 1075 | intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); |
1066 | 1076 | ||
1077 | if (intel_sdvo->color_range_auto) { | ||
1078 | /* See CEA-861-E - 5.1 Default Encoding Parameters */ | ||
1079 | if (intel_sdvo->has_hdmi_monitor && | ||
1080 | drm_mode_cea_vic(adjusted_mode) > 1) | ||
1081 | intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; | ||
1082 | else | ||
1083 | intel_sdvo->color_range = 0; | ||
1084 | } | ||
1085 | |||
1086 | if (intel_sdvo->color_range) | ||
1087 | adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; | ||
1088 | |||
1067 | return true; | 1089 | return true; |
1068 | } | 1090 | } |
1069 | 1091 | ||
@@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1121 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); | 1143 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
1122 | intel_sdvo_set_colorimetry(intel_sdvo, | 1144 | intel_sdvo_set_colorimetry(intel_sdvo, |
1123 | SDVO_COLORIMETRY_RGB256); | 1145 | SDVO_COLORIMETRY_RGB256); |
1124 | intel_sdvo_set_avi_infoframe(intel_sdvo); | 1146 | intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode); |
1125 | } else | 1147 | } else |
1126 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); | 1148 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); |
1127 | 1149 | ||
@@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1153 | /* The real mode polarity is set by the SDVO commands, using | 1175 | /* The real mode polarity is set by the SDVO commands, using |
1154 | * struct intel_sdvo_dtd. */ | 1176 | * struct intel_sdvo_dtd. */ |
1155 | sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; | 1177 | sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; |
1156 | if (intel_sdvo->is_hdmi) | 1178 | if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi) |
1157 | sdvox |= intel_sdvo->color_range; | 1179 | sdvox |= intel_sdvo->color_range; |
1158 | if (INTEL_INFO(dev)->gen < 5) | 1180 | if (INTEL_INFO(dev)->gen < 5) |
1159 | sdvox |= SDVO_BORDER_ENABLE; | 1181 | sdvox |= SDVO_BORDER_ENABLE; |
@@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) | |||
1513 | if (intel_sdvo->is_hdmi) { | 1535 | if (intel_sdvo->is_hdmi) { |
1514 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); | 1536 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
1515 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | 1537 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); |
1538 | intel_sdvo->rgb_quant_range_selectable = | ||
1539 | drm_rgb_quant_range_selectable(edid); | ||
1516 | } | 1540 | } |
1517 | } else | 1541 | } else |
1518 | status = connector_status_disconnected; | 1542 | status = connector_status_disconnected; |
@@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1564 | 1588 | ||
1565 | intel_sdvo->has_hdmi_monitor = false; | 1589 | intel_sdvo->has_hdmi_monitor = false; |
1566 | intel_sdvo->has_hdmi_audio = false; | 1590 | intel_sdvo->has_hdmi_audio = false; |
1591 | intel_sdvo->rgb_quant_range_selectable = false; | ||
1567 | 1592 | ||
1568 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1593 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1569 | ret = connector_status_disconnected; | 1594 | ret = connector_status_disconnected; |
@@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1897 | } | 1922 | } |
1898 | 1923 | ||
1899 | if (property == dev_priv->broadcast_rgb_property) { | 1924 | if (property == dev_priv->broadcast_rgb_property) { |
1900 | if (val == !!intel_sdvo->color_range) | 1925 | switch (val) { |
1901 | return 0; | 1926 | case INTEL_BROADCAST_RGB_AUTO: |
1902 | 1927 | intel_sdvo->color_range_auto = true; | |
1903 | intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; | 1928 | break; |
1929 | case INTEL_BROADCAST_RGB_FULL: | ||
1930 | intel_sdvo->color_range_auto = false; | ||
1931 | intel_sdvo->color_range = 0; | ||
1932 | break; | ||
1933 | case INTEL_BROADCAST_RGB_LIMITED: | ||
1934 | intel_sdvo->color_range_auto = false; | ||
1935 | intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; | ||
1936 | break; | ||
1937 | default: | ||
1938 | return -EINVAL; | ||
1939 | } | ||
1904 | goto done; | 1940 | goto done; |
1905 | } | 1941 | } |
1906 | 1942 | ||
@@ -2197,13 +2233,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, | |||
2197 | } | 2233 | } |
2198 | 2234 | ||
2199 | static void | 2235 | static void |
2200 | intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) | 2236 | intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, |
2237 | struct intel_sdvo_connector *connector) | ||
2201 | { | 2238 | { |
2202 | struct drm_device *dev = connector->base.base.dev; | 2239 | struct drm_device *dev = connector->base.base.dev; |
2203 | 2240 | ||
2204 | intel_attach_force_audio_property(&connector->base.base); | 2241 | intel_attach_force_audio_property(&connector->base.base); |
2205 | if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) | 2242 | if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) { |
2206 | intel_attach_broadcast_rgb_property(&connector->base.base); | 2243 | intel_attach_broadcast_rgb_property(&connector->base.base); |
2244 | intel_sdvo->color_range_auto = true; | ||
2245 | } | ||
2207 | } | 2246 | } |
2208 | 2247 | ||
2209 | static bool | 2248 | static bool |
@@ -2251,7 +2290,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2251 | 2290 | ||
2252 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2291 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2253 | if (intel_sdvo->is_hdmi) | 2292 | if (intel_sdvo->is_hdmi) |
2254 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | 2293 | intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); |
2255 | 2294 | ||
2256 | return true; | 2295 | return true; |
2257 | } | 2296 | } |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 66b2732f175a..e90c8dcc028d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -1063,6 +1063,7 @@ extern u8 *drm_find_cea_extension(struct edid *edid); | |||
1063 | extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); | 1063 | extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); |
1064 | extern bool drm_detect_hdmi_monitor(struct edid *edid); | 1064 | extern bool drm_detect_hdmi_monitor(struct edid *edid); |
1065 | extern bool drm_detect_monitor_audio(struct edid *edid); | 1065 | extern bool drm_detect_monitor_audio(struct edid *edid); |
1066 | extern bool drm_rgb_quant_range_selectable(struct edid *edid); | ||
1066 | extern int drm_mode_page_flip_ioctl(struct drm_device *dev, | 1067 | extern int drm_mode_page_flip_ioctl(struct drm_device *dev, |
1067 | void *data, struct drm_file *file_priv); | 1068 | void *data, struct drm_file *file_priv); |
1068 | extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, | 1069 | extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 3e3a166a2690..cf105557fea9 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
@@ -3,24 +3,7 @@ | |||
3 | #ifndef _DRM_INTEL_GTT_H | 3 | #ifndef _DRM_INTEL_GTT_H |
4 | #define _DRM_INTEL_GTT_H | 4 | #define _DRM_INTEL_GTT_H |
5 | 5 | ||
6 | struct intel_gtt { | 6 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size); |
7 | /* Size of memory reserved for graphics by the BIOS */ | ||
8 | unsigned int stolen_size; | ||
9 | /* Total number of gtt entries. */ | ||
10 | unsigned int gtt_total_entries; | ||
11 | /* Part of the gtt that is mappable by the cpu, for those chips where | ||
12 | * this is not the full gtt. */ | ||
13 | unsigned int gtt_mappable_entries; | ||
14 | /* Whether i915 needs to use the dmar apis or not. */ | ||
15 | unsigned int needs_dmar : 1; | ||
16 | /* Whether we idle the gpu before mapping/unmapping */ | ||
17 | unsigned int do_idle_maps : 1; | ||
18 | /* Share the scratch page dma with ppgtts. */ | ||
19 | dma_addr_t scratch_page_dma; | ||
20 | struct page *scratch_page; | ||
21 | /* needed for ioremap in drm/i915 */ | ||
22 | phys_addr_t gma_bus_addr; | ||
23 | } *intel_gtt_get(void); | ||
24 | 7 | ||
25 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, | 8 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
26 | struct agp_bridge_data *bridge); | 9 | struct agp_bridge_data *bridge); |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index c4d2e9c74002..07d59419fe6b 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
@@ -308,6 +308,8 @@ typedef struct drm_i915_irq_wait { | |||
308 | #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 | 308 | #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 |
309 | #define I915_PARAM_HAS_SECURE_BATCHES 23 | 309 | #define I915_PARAM_HAS_SECURE_BATCHES 23 |
310 | #define I915_PARAM_HAS_PINNED_BATCHES 24 | 310 | #define I915_PARAM_HAS_PINNED_BATCHES 24 |
311 | #define I915_PARAM_HAS_EXEC_NO_RELOC 25 | ||
312 | #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 | ||
311 | 313 | ||
312 | typedef struct drm_i915_getparam { | 314 | typedef struct drm_i915_getparam { |
313 | int param; | 315 | int param; |
@@ -628,7 +630,11 @@ struct drm_i915_gem_exec_object2 { | |||
628 | __u64 offset; | 630 | __u64 offset; |
629 | 631 | ||
630 | #define EXEC_OBJECT_NEEDS_FENCE (1<<0) | 632 | #define EXEC_OBJECT_NEEDS_FENCE (1<<0) |
633 | #define EXEC_OBJECT_NEEDS_GTT (1<<1) | ||
634 | #define EXEC_OBJECT_WRITE (1<<2) | ||
635 | #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) | ||
631 | __u64 flags; | 636 | __u64 flags; |
637 | |||
632 | __u64 rsvd1; | 638 | __u64 rsvd1; |
633 | __u64 rsvd2; | 639 | __u64 rsvd2; |
634 | }; | 640 | }; |
@@ -687,6 +693,20 @@ struct drm_i915_gem_execbuffer2 { | |||
687 | */ | 693 | */ |
688 | #define I915_EXEC_IS_PINNED (1<<10) | 694 | #define I915_EXEC_IS_PINNED (1<<10) |
689 | 695 | ||
696 | /** Provide a hint to the kernel that the command stream and auxilliary | ||
697 | * state buffers already holds the correct presumed addresses and so the | ||
698 | * relocation process may be skipped if no buffers need to be moved in | ||
699 | * preparation for the execbuffer. | ||
700 | */ | ||
701 | #define I915_EXEC_NO_RELOC (1<<11) | ||
702 | |||
703 | /** Use the reloc.handle as an index into the exec object array rather | ||
704 | * than as the per-file handle. | ||
705 | */ | ||
706 | #define I915_EXEC_HANDLE_LUT (1<<12) | ||
707 | |||
708 | #define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) | ||
709 | |||
690 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | 710 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
691 | #define i915_execbuffer2_set_context_id(eb2, context) \ | 711 | #define i915_execbuffer2_set_context_id(eb2, context) \ |
692 | (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK | 712 | (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK |