diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-06-01 10:20:22 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-09-20 08:22:57 -0400 |
commit | 9da3da660d8c19a54f6e93361d147509be3fff84 (patch) | |
tree | 3479ef13a733975a02dd679e3fc9ae0872e3be25 /drivers/char | |
parent | f60d7f0c1d55a935475ab394955cafddefaa6533 (diff) |
drm/i915: Replace the array of pages with a scatterlist
Rather than have multiple data structures for describing our page layout
in conjunction with the array of pages, we can migrate all users over to
a scatterlist.
One major advantage, other than unifying the page tracking structures,
this offers is that we replace the vmalloc'ed array (which can be up to
a megabyte in size) with a chain of individual pages which helps reduce
memory pressure.
The disadvantage is that we then do not have a simple array to iterate,
or to access randomly. The common case for this is in the relocation
processing, which will typically fit within a single scatterlist page
and so be almost the same cost as the simple array. For iterating over
the array, the extra function call could be optimised away, but in
reality is an insignificant cost of either binding the pages, or
performing the pwrite/pread.
v2: Fix drm_clflush_sg() to not invoke wbinvd as well! And fix the
trivial compile error from rebasing.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/agp/intel-gtt.c | 51 |
1 files changed, 22 insertions, 29 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 58e32f7c3229..7fa655ac24d8 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -84,40 +84,33 @@ static struct _intel_private { | |||
84 | #define IS_IRONLAKE intel_private.driver->is_ironlake | 84 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
85 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable | 85 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
86 | 86 | ||
87 | int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, | 87 | static int intel_gtt_map_memory(struct page **pages, |
88 | struct scatterlist **sg_list, int *num_sg) | 88 | unsigned int num_entries, |
89 | struct sg_table *st) | ||
89 | { | 90 | { |
90 | struct sg_table st; | ||
91 | struct scatterlist *sg; | 91 | struct scatterlist *sg; |
92 | int i; | 92 | int i; |
93 | 93 | ||
94 | if (*sg_list) | ||
95 | return 0; /* already mapped (for e.g. resume */ | ||
96 | |||
97 | DBG("try mapping %lu pages\n", (unsigned long)num_entries); | 94 | DBG("try mapping %lu pages\n", (unsigned long)num_entries); |
98 | 95 | ||
99 | if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) | 96 | if (sg_alloc_table(st, num_entries, GFP_KERNEL)) |
100 | goto err; | 97 | goto err; |
101 | 98 | ||
102 | *sg_list = sg = st.sgl; | 99 | for_each_sg(st->sgl, sg, num_entries, i) |
103 | |||
104 | for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) | ||
105 | sg_set_page(sg, pages[i], PAGE_SIZE, 0); | 100 | sg_set_page(sg, pages[i], PAGE_SIZE, 0); |
106 | 101 | ||
107 | *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, | 102 | if (!pci_map_sg(intel_private.pcidev, |
108 | num_entries, PCI_DMA_BIDIRECTIONAL); | 103 | st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) |
109 | if (unlikely(!*num_sg)) | ||
110 | goto err; | 104 | goto err; |
111 | 105 | ||
112 | return 0; | 106 | return 0; |
113 | 107 | ||
114 | err: | 108 | err: |
115 | sg_free_table(&st); | 109 | sg_free_table(st); |
116 | return -ENOMEM; | 110 | return -ENOMEM; |
117 | } | 111 | } |
118 | EXPORT_SYMBOL(intel_gtt_map_memory); | ||
119 | 112 | ||
120 | void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) | 113 | static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) |
121 | { | 114 | { |
122 | struct sg_table st; | 115 | struct sg_table st; |
123 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | 116 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); |
@@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) | |||
130 | 123 | ||
131 | sg_free_table(&st); | 124 | sg_free_table(&st); |
132 | } | 125 | } |
133 | EXPORT_SYMBOL(intel_gtt_unmap_memory); | ||
134 | 126 | ||
135 | static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) | 127 | static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) |
136 | { | 128 | { |
@@ -879,8 +871,7 @@ static bool i830_check_flags(unsigned int flags) | |||
879 | return false; | 871 | return false; |
880 | } | 872 | } |
881 | 873 | ||
882 | void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, | 874 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
883 | unsigned int sg_len, | ||
884 | unsigned int pg_start, | 875 | unsigned int pg_start, |
885 | unsigned int flags) | 876 | unsigned int flags) |
886 | { | 877 | { |
@@ -892,12 +883,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, | |||
892 | 883 | ||
893 | /* sg may merge pages, but we have to separate | 884 | /* sg may merge pages, but we have to separate |
894 | * per-page addr for GTT */ | 885 | * per-page addr for GTT */ |
895 | for_each_sg(sg_list, sg, sg_len, i) { | 886 | for_each_sg(st->sgl, sg, st->nents, i) { |
896 | len = sg_dma_len(sg) >> PAGE_SHIFT; | 887 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
897 | for (m = 0; m < len; m++) { | 888 | for (m = 0; m < len; m++) { |
898 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); | 889 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
899 | intel_private.driver->write_entry(addr, | 890 | intel_private.driver->write_entry(addr, j, flags); |
900 | j, flags); | ||
901 | j++; | 891 | j++; |
902 | } | 892 | } |
903 | } | 893 | } |
@@ -905,8 +895,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, | |||
905 | } | 895 | } |
906 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); | 896 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
907 | 897 | ||
908 | void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, | 898 | static void intel_gtt_insert_pages(unsigned int first_entry, |
909 | struct page **pages, unsigned int flags) | 899 | unsigned int num_entries, |
900 | struct page **pages, | ||
901 | unsigned int flags) | ||
910 | { | 902 | { |
911 | int i, j; | 903 | int i, j; |
912 | 904 | ||
@@ -917,7 +909,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, | |||
917 | } | 909 | } |
918 | readl(intel_private.gtt+j-1); | 910 | readl(intel_private.gtt+j-1); |
919 | } | 911 | } |
920 | EXPORT_SYMBOL(intel_gtt_insert_pages); | ||
921 | 912 | ||
922 | static int intel_fake_agp_insert_entries(struct agp_memory *mem, | 913 | static int intel_fake_agp_insert_entries(struct agp_memory *mem, |
923 | off_t pg_start, int type) | 914 | off_t pg_start, int type) |
@@ -953,13 +944,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, | |||
953 | global_cache_flush(); | 944 | global_cache_flush(); |
954 | 945 | ||
955 | if (intel_private.base.needs_dmar) { | 946 | if (intel_private.base.needs_dmar) { |
956 | ret = intel_gtt_map_memory(mem->pages, mem->page_count, | 947 | struct sg_table st; |
957 | &mem->sg_list, &mem->num_sg); | 948 | |
949 | ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); | ||
958 | if (ret != 0) | 950 | if (ret != 0) |
959 | return ret; | 951 | return ret; |
960 | 952 | ||
961 | intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, | 953 | intel_gtt_insert_sg_entries(&st, pg_start, type); |
962 | pg_start, type); | 954 | mem->sg_list = st.sgl; |
955 | mem->num_sg = st.nents; | ||
963 | } else | 956 | } else |
964 | intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, | 957 | intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, |
965 | type); | 958 | type); |