diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:18:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-15 12:18:07 -0400 |
commit | 66a4fe0cb80a9fde8cb173289afb863fd279466a (patch) | |
tree | e1180ba7cbd123fb2f0bf510a955704725465776 /drivers/char/agp/intel-agp.c | |
parent | 18240904960a39e582ced8ba8ececb10b8c22dd3 (diff) | |
parent | 121264827656f5f06328b17983c796af17dc5949 (diff) |
Merge branch 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6
* 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6:
agp/intel: remove restore in resume
agp: fix uninorth build
intel-agp: Set dma mask for i915
agp: kill phys_to_gart() and gart_to_phys()
intel-agp: fix sglist allocation to avoid vmalloc()
intel-agp: Move repeated sglist free into separate function
agp: Switch agp_{un,}map_page() to take struct page * argument
agp: tidy up handling of scratch pages w.r.t. DMA API
intel_agp: Use PCI DMA API correctly on chipsets new enough to have IOMMU
agp: Add generic support for graphics dma remapping
agp: Switch mask_memory() method to take address argument again, not page
Diffstat (limited to 'drivers/char/agp/intel-agp.c')
-rw-r--r-- | drivers/char/agp/intel-agp.c | 182 |
1 files changed, 158 insertions, 24 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c58557790585..1540e693d91e 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -10,6 +10,16 @@ | |||
10 | #include <linux/agp_backend.h> | 10 | #include <linux/agp_backend.h> |
11 | #include "agp.h" | 11 | #include "agp.h" |
12 | 12 | ||
13 | /* | ||
14 | * If we have Intel graphics, we're not going to have anything other than | ||
15 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent | ||
16 | * on the Intel IOMMU support (CONFIG_DMAR). | ||
17 | * Only newer chipsets need to bother with this, of course. | ||
18 | */ | ||
19 | #ifdef CONFIG_DMAR | ||
20 | #define USE_PCI_DMA_API 1 | ||
21 | #endif | ||
22 | |||
13 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 | 23 | #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 |
14 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a | 24 | #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a |
15 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 | 25 | #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 |
@@ -172,6 +182,123 @@ static struct _intel_private { | |||
172 | int resource_valid; | 182 | int resource_valid; |
173 | } intel_private; | 183 | } intel_private; |
174 | 184 | ||
185 | #ifdef USE_PCI_DMA_API | ||
186 | static int intel_agp_map_page(struct page *page, dma_addr_t *ret) | ||
187 | { | ||
188 | *ret = pci_map_page(intel_private.pcidev, page, 0, | ||
189 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
190 | if (pci_dma_mapping_error(intel_private.pcidev, *ret)) | ||
191 | return -EINVAL; | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) | ||
196 | { | ||
197 | pci_unmap_page(intel_private.pcidev, dma, | ||
198 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
199 | } | ||
200 | |||
201 | static void intel_agp_free_sglist(struct agp_memory *mem) | ||
202 | { | ||
203 | struct sg_table st; | ||
204 | |||
205 | st.sgl = mem->sg_list; | ||
206 | st.orig_nents = st.nents = mem->page_count; | ||
207 | |||
208 | sg_free_table(&st); | ||
209 | |||
210 | mem->sg_list = NULL; | ||
211 | mem->num_sg = 0; | ||
212 | } | ||
213 | |||
214 | static int intel_agp_map_memory(struct agp_memory *mem) | ||
215 | { | ||
216 | struct sg_table st; | ||
217 | struct scatterlist *sg; | ||
218 | int i; | ||
219 | |||
220 | DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); | ||
221 | |||
222 | if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) | ||
223 | return -ENOMEM; | ||
224 | |||
225 | mem->sg_list = sg = st.sgl; | ||
226 | |||
227 | for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) | ||
228 | sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); | ||
229 | |||
230 | mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, | ||
231 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
232 | if (unlikely(!mem->num_sg)) { | ||
233 | intel_agp_free_sglist(mem); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static void intel_agp_unmap_memory(struct agp_memory *mem) | ||
240 | { | ||
241 | DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); | ||
242 | |||
243 | pci_unmap_sg(intel_private.pcidev, mem->sg_list, | ||
244 | mem->page_count, PCI_DMA_BIDIRECTIONAL); | ||
245 | intel_agp_free_sglist(mem); | ||
246 | } | ||
247 | |||
248 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
249 | off_t pg_start, int mask_type) | ||
250 | { | ||
251 | struct scatterlist *sg; | ||
252 | int i, j; | ||
253 | |||
254 | j = pg_start; | ||
255 | |||
256 | WARN_ON(!mem->num_sg); | ||
257 | |||
258 | if (mem->num_sg == mem->page_count) { | ||
259 | for_each_sg(mem->sg_list, sg, mem->page_count, i) { | ||
260 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
261 | sg_dma_address(sg), mask_type), | ||
262 | intel_private.gtt+j); | ||
263 | j++; | ||
264 | } | ||
265 | } else { | ||
266 | /* sg may merge pages, but we have to seperate | ||
267 | * per-page addr for GTT */ | ||
268 | unsigned int len, m; | ||
269 | |||
270 | for_each_sg(mem->sg_list, sg, mem->num_sg, i) { | ||
271 | len = sg_dma_len(sg) / PAGE_SIZE; | ||
272 | for (m = 0; m < len; m++) { | ||
273 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
274 | sg_dma_address(sg) + m * PAGE_SIZE, | ||
275 | mask_type), | ||
276 | intel_private.gtt+j); | ||
277 | j++; | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | readl(intel_private.gtt+j-1); | ||
282 | } | ||
283 | |||
284 | #else | ||
285 | |||
286 | static void intel_agp_insert_sg_entries(struct agp_memory *mem, | ||
287 | off_t pg_start, int mask_type) | ||
288 | { | ||
289 | int i, j; | ||
290 | |||
291 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
292 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
293 | page_to_phys(mem->pages[i]), mask_type), | ||
294 | intel_private.gtt+j); | ||
295 | } | ||
296 | |||
297 | readl(intel_private.gtt+j-1); | ||
298 | } | ||
299 | |||
300 | #endif | ||
301 | |||
175 | static int intel_i810_fetch_size(void) | 302 | static int intel_i810_fetch_size(void) |
176 | { | 303 | { |
177 | u32 smram_miscc; | 304 | u32 smram_miscc; |
@@ -345,8 +472,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
345 | global_cache_flush(); | 472 | global_cache_flush(); |
346 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 473 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
347 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 474 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
348 | mem->pages[i], | 475 | page_to_phys(mem->pages[i]), mask_type), |
349 | mask_type), | ||
350 | intel_private.registers+I810_PTE_BASE+(j*4)); | 476 | intel_private.registers+I810_PTE_BASE+(j*4)); |
351 | } | 477 | } |
352 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | 478 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); |
@@ -463,9 +589,8 @@ static void intel_i810_free_by_type(struct agp_memory *curr) | |||
463 | } | 589 | } |
464 | 590 | ||
465 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, | 591 | static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, |
466 | struct page *page, int type) | 592 | dma_addr_t addr, int type) |
467 | { | 593 | { |
468 | unsigned long addr = phys_to_gart(page_to_phys(page)); | ||
469 | /* Type checking must be done elsewhere */ | 594 | /* Type checking must be done elsewhere */ |
470 | return addr | bridge->driver->masks[type].mask; | 595 | return addr | bridge->driver->masks[type].mask; |
471 | } | 596 | } |
@@ -853,7 +978,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
853 | 978 | ||
854 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 979 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
855 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 980 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
856 | mem->pages[i], mask_type), | 981 | page_to_phys(mem->pages[i]), mask_type), |
857 | intel_private.registers+I810_PTE_BASE+(j*4)); | 982 | intel_private.registers+I810_PTE_BASE+(j*4)); |
858 | } | 983 | } |
859 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); | 984 | readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); |
@@ -1017,6 +1142,12 @@ static int intel_i915_configure(void) | |||
1017 | 1142 | ||
1018 | intel_i9xx_setup_flush(); | 1143 | intel_i9xx_setup_flush(); |
1019 | 1144 | ||
1145 | #ifdef USE_PCI_DMA_API | ||
1146 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | ||
1147 | dev_err(&intel_private.pcidev->dev, | ||
1148 | "set gfx device dma mask 36bit failed!\n"); | ||
1149 | #endif | ||
1150 | |||
1020 | return 0; | 1151 | return 0; |
1021 | } | 1152 | } |
1022 | 1153 | ||
@@ -1041,7 +1172,7 @@ static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) | |||
1041 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | 1172 | static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, |
1042 | int type) | 1173 | int type) |
1043 | { | 1174 | { |
1044 | int i, j, num_entries; | 1175 | int num_entries; |
1045 | void *temp; | 1176 | void *temp; |
1046 | int ret = -EINVAL; | 1177 | int ret = -EINVAL; |
1047 | int mask_type; | 1178 | int mask_type; |
@@ -1065,7 +1196,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
1065 | if ((pg_start + mem->page_count) > num_entries) | 1196 | if ((pg_start + mem->page_count) > num_entries) |
1066 | goto out_err; | 1197 | goto out_err; |
1067 | 1198 | ||
1068 | /* The i915 can't check the GTT for entries since its read only, | 1199 | /* The i915 can't check the GTT for entries since it's read only; |
1069 | * depend on the caller to make the correct offset decisions. | 1200 | * depend on the caller to make the correct offset decisions. |
1070 | */ | 1201 | */ |
1071 | 1202 | ||
@@ -1081,12 +1212,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, | |||
1081 | if (!mem->is_flushed) | 1212 | if (!mem->is_flushed) |
1082 | global_cache_flush(); | 1213 | global_cache_flush(); |
1083 | 1214 | ||
1084 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 1215 | intel_agp_insert_sg_entries(mem, pg_start, mask_type); |
1085 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
1086 | mem->pages[i], mask_type), intel_private.gtt+j); | ||
1087 | } | ||
1088 | |||
1089 | readl(intel_private.gtt+j-1); | ||
1090 | agp_bridge->driver->tlb_flush(mem); | 1216 | agp_bridge->driver->tlb_flush(mem); |
1091 | 1217 | ||
1092 | out: | 1218 | out: |
@@ -1198,9 +1324,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | |||
1198 | * this conditional. | 1324 | * this conditional. |
1199 | */ | 1325 | */ |
1200 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, | 1326 | static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, |
1201 | struct page *page, int type) | 1327 | dma_addr_t addr, int type) |
1202 | { | 1328 | { |
1203 | dma_addr_t addr = phys_to_gart(page_to_phys(page)); | ||
1204 | /* Shift high bits down */ | 1329 | /* Shift high bits down */ |
1205 | addr |= (addr >> 28) & 0xf0; | 1330 | addr |= (addr >> 28) & 0xf0; |
1206 | 1331 | ||
@@ -2006,6 +2131,12 @@ static const struct agp_bridge_driver intel_915_driver = { | |||
2006 | .agp_destroy_pages = agp_generic_destroy_pages, | 2131 | .agp_destroy_pages = agp_generic_destroy_pages, |
2007 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 2132 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
2008 | .chipset_flush = intel_i915_chipset_flush, | 2133 | .chipset_flush = intel_i915_chipset_flush, |
2134 | #ifdef USE_PCI_DMA_API | ||
2135 | .agp_map_page = intel_agp_map_page, | ||
2136 | .agp_unmap_page = intel_agp_unmap_page, | ||
2137 | .agp_map_memory = intel_agp_map_memory, | ||
2138 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2139 | #endif | ||
2009 | }; | 2140 | }; |
2010 | 2141 | ||
2011 | static const struct agp_bridge_driver intel_i965_driver = { | 2142 | static const struct agp_bridge_driver intel_i965_driver = { |
@@ -2034,6 +2165,12 @@ static const struct agp_bridge_driver intel_i965_driver = { | |||
2034 | .agp_destroy_pages = agp_generic_destroy_pages, | 2165 | .agp_destroy_pages = agp_generic_destroy_pages, |
2035 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 2166 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
2036 | .chipset_flush = intel_i915_chipset_flush, | 2167 | .chipset_flush = intel_i915_chipset_flush, |
2168 | #ifdef USE_PCI_DMA_API | ||
2169 | .agp_map_page = intel_agp_map_page, | ||
2170 | .agp_unmap_page = intel_agp_unmap_page, | ||
2171 | .agp_map_memory = intel_agp_map_memory, | ||
2172 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2173 | #endif | ||
2037 | }; | 2174 | }; |
2038 | 2175 | ||
2039 | static const struct agp_bridge_driver intel_7505_driver = { | 2176 | static const struct agp_bridge_driver intel_7505_driver = { |
@@ -2088,6 +2225,12 @@ static const struct agp_bridge_driver intel_g33_driver = { | |||
2088 | .agp_destroy_pages = agp_generic_destroy_pages, | 2225 | .agp_destroy_pages = agp_generic_destroy_pages, |
2089 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | 2226 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, |
2090 | .chipset_flush = intel_i915_chipset_flush, | 2227 | .chipset_flush = intel_i915_chipset_flush, |
2228 | #ifdef USE_PCI_DMA_API | ||
2229 | .agp_map_page = intel_agp_map_page, | ||
2230 | .agp_unmap_page = intel_agp_unmap_page, | ||
2231 | .agp_map_memory = intel_agp_map_memory, | ||
2232 | .agp_unmap_memory = intel_agp_unmap_memory, | ||
2233 | #endif | ||
2091 | }; | 2234 | }; |
2092 | 2235 | ||
2093 | static int find_gmch(u16 device) | 2236 | static int find_gmch(u16 device) |
@@ -2313,15 +2456,6 @@ static int agp_intel_resume(struct pci_dev *pdev) | |||
2313 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); | 2456 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
2314 | int ret_val; | 2457 | int ret_val; |
2315 | 2458 | ||
2316 | pci_restore_state(pdev); | ||
2317 | |||
2318 | /* We should restore our graphics device's config space, | ||
2319 | * as host bridge (00:00) resumes before graphics device (02:00), | ||
2320 | * then our access to its pci space can work right. | ||
2321 | */ | ||
2322 | if (intel_private.pcidev) | ||
2323 | pci_restore_state(intel_private.pcidev); | ||
2324 | |||
2325 | if (bridge->driver == &intel_generic_driver) | 2459 | if (bridge->driver == &intel_generic_driver) |
2326 | intel_configure(); | 2460 | intel_configure(); |
2327 | else if (bridge->driver == &intel_850_driver) | 2461 | else if (bridge->driver == &intel_850_driver) |