aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 19:46:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 19:46:44 -0500
commitfffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb (patch)
tree71bc5e597124dbaf7550f1e089d675718b3ed5c0 /drivers/char
parent69086a78bdc973ec0b722be790b146e84ba8a8c4 (diff)
parentbe88298b0a3f771a4802f20c5e66af74bfd1dff1 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm merge from Dave Airlie: "Highlights: - TI LCD controller KMS driver - TI OMAP KMS driver merged from staging - drop gma500 stub driver - the fbcon locking fixes - the vgacon dirty like zebra fix. - open firmware videomode and hdmi common code helpers - major locking rework for kms object handling - pageflip/cursor won't block on polling anymore! - fbcon helper and prime helper cleanups - i915: all over the map, haswell power well enhancements, valleyview macro horrors cleaned up, killing lots of legacy GTT code, - radeon: CS ioctl unification, deprecated UMS support, gpu reset rework, VM fixes - nouveau: reworked thermal code, external dp/tmds encoder support (anx9805), fences sleep instead of polling, - exynos: all over the driver fixes." Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d ("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd") and the new changes that modified that evergreen_dma_cs_parse() function. * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits) drm/tilcdc: only build on arm drm/i915: Revert hdmi HDP pin checks drm/tegra: Add list of framebuffers to debugfs drm/tegra: Fix color expansion drm/tegra: Split DC_CMD_STATE_CONTROL register write drm/tegra: Implement page-flipping support drm/tegra: Implement VBLANK support drm/tegra: Implement .mode_set_base() drm/tegra: Add plane support drm/tegra: Remove bogus tegra_framebuffer structure drm: Add consistency check for page-flipping drm/radeon: Use generic HDMI infoframe helpers drm/tegra: Use generic HDMI infoframe helpers drm: Add EDID helper documentation drm: Add HDMI infoframe helpers video: Add generic HDMI infoframe helpers drm: Add some missing forward declarations drm: Move mode tables to drm_edid.c drm: Remove duplicate drm_mode_cea_vic() gma500: Fix n, m1 and m2 clock limits for sdvo and lvds ...
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/agp/intel-gtt.c128
1 files changed, 73 insertions, 55 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..b8e2014cb9cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
60}; 60};
61 61
62static struct _intel_private { 62static struct _intel_private {
63 struct intel_gtt base;
64 const struct intel_gtt_driver *driver; 63 const struct intel_gtt_driver *driver;
65 struct pci_dev *pcidev; /* device one */ 64 struct pci_dev *pcidev; /* device one */
66 struct pci_dev *bridge_dev; 65 struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
75 struct resource ifp_resource; 74 struct resource ifp_resource;
76 int resource_valid; 75 int resource_valid;
77 struct page *scratch_page; 76 struct page *scratch_page;
77 phys_addr_t scratch_page_dma;
78 int refcount; 78 int refcount;
79 /* Whether i915 needs to use the dmar apis or not. */
80 unsigned int needs_dmar : 1;
81 phys_addr_t gma_bus_addr;
82 /* Size of memory reserved for graphics by the BIOS */
83 unsigned int stolen_size;
84 /* Total number of gtt entries. */
85 unsigned int gtt_total_entries;
86 /* Part of the gtt that is mappable by the cpu, for those chips where
87 * this is not the full gtt. */
88 unsigned int gtt_mappable_entries;
79} intel_private; 89} intel_private;
80 90
81#define INTEL_GTT_GEN intel_private.driver->gen 91#define INTEL_GTT_GEN intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
291 get_page(page); 301 get_page(page);
292 set_pages_uc(page, 1); 302 set_pages_uc(page, 1);
293 303
294 if (intel_private.base.needs_dmar) { 304 if (intel_private.needs_dmar) {
295 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 305 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
296 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 306 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
297 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 307 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
298 return -EINVAL; 308 return -EINVAL;
299 309
300 intel_private.base.scratch_page_dma = dma_addr; 310 intel_private.scratch_page_dma = dma_addr;
301 } else 311 } else
302 intel_private.base.scratch_page_dma = page_to_phys(page); 312 intel_private.scratch_page_dma = page_to_phys(page);
303 313
304 intel_private.scratch_page = page; 314 intel_private.scratch_page = page;
305 315
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
506 /* On previous hardware, the GTT size was just what was 516 /* On previous hardware, the GTT size was just what was
507 * required to map the aperture. 517 * required to map the aperture.
508 */ 518 */
509 return intel_private.base.gtt_mappable_entries; 519 return intel_private.gtt_mappable_entries;
510 } 520 }
511} 521}
512 522
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
546static void intel_gtt_teardown_scratch_page(void) 556static void intel_gtt_teardown_scratch_page(void)
547{ 557{
548 set_pages_wb(intel_private.scratch_page, 1); 558 set_pages_wb(intel_private.scratch_page, 1);
549 pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, 559 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
550 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 560 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
551 put_page(intel_private.scratch_page); 561 put_page(intel_private.scratch_page);
552 __free_page(intel_private.scratch_page); 562 __free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
562 intel_gtt_teardown_scratch_page(); 572 intel_gtt_teardown_scratch_page();
563} 573}
564 574
575/* Certain Gen5 chipsets require require idling the GPU before
576 * unmapping anything from the GTT when VT-d is enabled.
577 */
578static inline int needs_ilk_vtd_wa(void)
579{
580#ifdef CONFIG_INTEL_IOMMU
581 const unsigned short gpu_devid = intel_private.pcidev->device;
582
583 /* Query intel_iommu to see if we need the workaround. Presumably that
584 * was loaded first.
585 */
586 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
587 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
588 intel_iommu_gfx_mapped)
589 return 1;
590#endif
591 return 0;
592}
593
594static bool intel_gtt_can_wc(void)
595{
596 if (INTEL_GTT_GEN <= 2)
597 return false;
598
599 if (INTEL_GTT_GEN >= 6)
600 return false;
601
602 /* Reports of major corruption with ILK vt'd enabled */
603 if (needs_ilk_vtd_wa())
604 return false;
605
606 return true;
607}
608
565static int intel_gtt_init(void) 609static int intel_gtt_init(void)
566{ 610{
567 u32 gma_addr; 611 u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
572 if (ret != 0) 616 if (ret != 0)
573 return ret; 617 return ret;
574 618
575 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 619 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
576 intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 620 intel_private.gtt_total_entries = intel_gtt_total_entries();
577 621
578 /* save the PGETBL reg for resume */ 622 /* save the PGETBL reg for resume */
579 intel_private.PGETBL_save = 623 intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
585 629
586 dev_info(&intel_private.bridge_dev->dev, 630 dev_info(&intel_private.bridge_dev->dev,
587 "detected gtt size: %dK total, %dK mappable\n", 631 "detected gtt size: %dK total, %dK mappable\n",
588 intel_private.base.gtt_total_entries * 4, 632 intel_private.gtt_total_entries * 4,
589 intel_private.base.gtt_mappable_entries * 4); 633 intel_private.gtt_mappable_entries * 4);
590 634
591 gtt_map_size = intel_private.base.gtt_total_entries * 4; 635 gtt_map_size = intel_private.gtt_total_entries * 4;
592 636
593 intel_private.gtt = NULL; 637 intel_private.gtt = NULL;
594 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) 638 if (intel_gtt_can_wc())
595 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, 639 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
596 gtt_map_size); 640 gtt_map_size);
597 if (intel_private.gtt == NULL) 641 if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
602 iounmap(intel_private.registers); 646 iounmap(intel_private.registers);
603 return -ENOMEM; 647 return -ENOMEM;
604 } 648 }
605 intel_private.base.gtt = intel_private.gtt;
606 649
607 global_cache_flush(); /* FIXME: ? */ 650 global_cache_flush(); /* FIXME: ? */
608 651
609 intel_private.base.stolen_size = intel_gtt_stolen_size(); 652 intel_private.stolen_size = intel_gtt_stolen_size();
610 653
611 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 654 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
612 655
613 ret = intel_gtt_setup_scratch_page(); 656 ret = intel_gtt_setup_scratch_page();
614 if (ret != 0) { 657 if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
623 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, 666 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
624 &gma_addr); 667 &gma_addr);
625 668
626 intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 669 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
627 670
628 return 0; 671 return 0;
629} 672}
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
634 unsigned int aper_size; 677 unsigned int aper_size;
635 int i; 678 int i;
636 679
637 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) 680 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
638 / MB(1);
639 681
640 for (i = 0; i < num_sizes; i++) { 682 for (i = 0; i < num_sizes; i++) {
641 if (aper_size == intel_fake_agp_sizes[i].size) { 683 if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
779 return -EIO; 821 return -EIO;
780 822
781 intel_private.clear_fake_agp = true; 823 intel_private.clear_fake_agp = true;
782 agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; 824 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
783 825
784 return 0; 826 return 0;
785} 827}
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
841{ 883{
842 int ret = -EINVAL; 884 int ret = -EINVAL;
843 885
844 if (intel_private.base.do_idle_maps)
845 return -ENODEV;
846
847 if (intel_private.clear_fake_agp) { 886 if (intel_private.clear_fake_agp) {
848 int start = intel_private.base.stolen_size / PAGE_SIZE; 887 int start = intel_private.stolen_size / PAGE_SIZE;
849 int end = intel_private.base.gtt_mappable_entries; 888 int end = intel_private.gtt_mappable_entries;
850 intel_gtt_clear_range(start, end - start); 889 intel_gtt_clear_range(start, end - start);
851 intel_private.clear_fake_agp = false; 890 intel_private.clear_fake_agp = false;
852 } 891 }
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
857 if (mem->page_count == 0) 896 if (mem->page_count == 0)
858 goto out; 897 goto out;
859 898
860 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) 899 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
861 goto out_err; 900 goto out_err;
862 901
863 if (type != mem->type) 902 if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
869 if (!mem->is_flushed) 908 if (!mem->is_flushed)
870 global_cache_flush(); 909 global_cache_flush();
871 910
872 if (intel_private.base.needs_dmar) { 911 if (intel_private.needs_dmar) {
873 struct sg_table st; 912 struct sg_table st;
874 913
875 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); 914 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
895 unsigned int i; 934 unsigned int i;
896 935
897 for (i = first_entry; i < (first_entry + num_entries); i++) { 936 for (i = first_entry; i < (first_entry + num_entries); i++) {
898 intel_private.driver->write_entry(intel_private.base.scratch_page_dma, 937 intel_private.driver->write_entry(intel_private.scratch_page_dma,
899 i, 0); 938 i, 0);
900 } 939 }
901 readl(intel_private.gtt+i-1); 940 readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
908 if (mem->page_count == 0) 947 if (mem->page_count == 0)
909 return 0; 948 return 0;
910 949
911 if (intel_private.base.do_idle_maps)
912 return -ENODEV;
913
914 intel_gtt_clear_range(pg_start, mem->page_count); 950 intel_gtt_clear_range(pg_start, mem->page_count);
915 951
916 if (intel_private.base.needs_dmar) { 952 if (intel_private.needs_dmar) {
917 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); 953 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
918 mem->sg_list = NULL; 954 mem->sg_list = NULL;
919 mem->num_sg = 0; 955 mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
1070 writel(addr | pte_flags, intel_private.gtt + entry); 1106 writel(addr | pte_flags, intel_private.gtt + entry);
1071} 1107}
1072 1108
1073/* Certain Gen5 chipsets require require idling the GPU before
1074 * unmapping anything from the GTT when VT-d is enabled.
1075 */
1076static inline int needs_idle_maps(void)
1077{
1078#ifdef CONFIG_INTEL_IOMMU
1079 const unsigned short gpu_devid = intel_private.pcidev->device;
1080
1081 /* Query intel_iommu to see if we need the workaround. Presumably that
1082 * was loaded first.
1083 */
1084 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
1085 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1086 intel_iommu_gfx_mapped)
1087 return 1;
1088#endif
1089 return 0;
1090}
1091
1092static int i9xx_setup(void) 1109static int i9xx_setup(void)
1093{ 1110{
1094 u32 reg_addr, gtt_addr; 1111 u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
1116 break; 1133 break;
1117 } 1134 }
1118 1135
1119 if (needs_idle_maps())
1120 intel_private.base.do_idle_maps = 1;
1121
1122 intel_i9xx_setup_flush(); 1136 intel_i9xx_setup_flush();
1123 1137
1124 return 0; 1138 return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1390} 1404}
1391EXPORT_SYMBOL(intel_gmch_probe); 1405EXPORT_SYMBOL(intel_gmch_probe);
1392 1406
1393struct intel_gtt *intel_gtt_get(void) 1407void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
1408 phys_addr_t *mappable_base, unsigned long *mappable_end)
1394{ 1409{
1395 return &intel_private.base; 1410 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1411 *stolen_size = intel_private.stolen_size;
1412 *mappable_base = intel_private.gma_bus_addr;
1413 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1396} 1414}
1397EXPORT_SYMBOL(intel_gtt_get); 1415EXPORT_SYMBOL(intel_gtt_get);
1398 1416