diff options
author | venkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com> | 2008-03-18 20:00:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:19 -0400 |
commit | 03d72aa18f15df9987fe5837284e15b9ccf6e3f8 (patch) | |
tree | 4acfdd5a8a652cf19906a26a183f5d18f3777a83 /arch/x86/pci/i386.c | |
parent | 1219333dfdd488e85f08cf07881b8bc63cf92f21 (diff) |
x86: PAT use reserve free memtype in pci_mmap_page_range
Add reserve_memtype and free_memtype wrapper for pci_mmap_page_range. Free
is called on unmap, but identity map continues to be mapped as per
pci_mmap_page_range request, until next request for the same region calls
ioremap_change_attr(), which will go through without conflict. This way of
mapping is identical to one used in ioremap/iounmap.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/pci/i386.c')
-rw-r--r-- | arch/x86/pci/i386.c | 68 |
1 files changed, 60 insertions, 8 deletions
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 103b9dff1213..4ebf52f6b1fd 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
32 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
33 | #include <linux/bootmem.h> | ||
34 | |||
35 | #include <asm/pat.h> | ||
33 | 36 | ||
34 | #include "pci.h" | 37 | #include "pci.h" |
35 | 38 | ||
@@ -297,10 +300,34 @@ void pcibios_set_master(struct pci_dev *dev) | |||
297 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 300 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
298 | } | 301 | } |
299 | 302 | ||
303 | static void pci_unmap_page_range(struct vm_area_struct *vma) | ||
304 | { | ||
305 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
306 | free_memtype(addr, addr + vma->vm_end - vma->vm_start); | ||
307 | } | ||
308 | |||
309 | static void pci_track_mmap_page_range(struct vm_area_struct *vma) | ||
310 | { | ||
311 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
312 | unsigned long flags = pgprot_val(vma->vm_page_prot) | ||
313 | & _PAGE_CACHE_MASK; | ||
314 | |||
315 | reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); | ||
316 | } | ||
317 | |||
318 | static struct vm_operations_struct pci_mmap_ops = { | ||
319 | .open = pci_track_mmap_page_range, | ||
320 | .close = pci_unmap_page_range, | ||
321 | }; | ||
322 | |||
300 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 323 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
301 | enum pci_mmap_state mmap_state, int write_combine) | 324 | enum pci_mmap_state mmap_state, int write_combine) |
302 | { | 325 | { |
303 | unsigned long prot; | 326 | unsigned long prot; |
327 | u64 addr = vma->vm_pgoff << PAGE_SHIFT; | ||
328 | unsigned long len = vma->vm_end - vma->vm_start; | ||
329 | unsigned long flags; | ||
330 | unsigned long new_flags; | ||
304 | 331 | ||
305 | /* I/O space cannot be accessed via normal processor loads and | 332 | /* I/O space cannot be accessed via normal processor loads and |
306 | * stores on this platform. | 333 | * stores on this platform. |
@@ -308,21 +335,46 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
308 | if (mmap_state == pci_mmap_io) | 335 | if (mmap_state == pci_mmap_io) |
309 | return -EINVAL; | 336 | return -EINVAL; |
310 | 337 | ||
311 | /* Leave vm_pgoff as-is, the PCI space address is the physical | ||
312 | * address on this platform. | ||
313 | */ | ||
314 | prot = pgprot_val(vma->vm_page_prot); | 338 | prot = pgprot_val(vma->vm_page_prot); |
315 | if (boot_cpu_data.x86 > 3) | 339 | if (pat_wc_enabled && write_combine) |
316 | prot |= _PAGE_PCD | _PAGE_PWT; | 340 | prot |= _PAGE_CACHE_WC; |
341 | else if (boot_cpu_data.x86 > 3) | ||
342 | prot |= _PAGE_CACHE_UC; | ||
343 | |||
317 | vma->vm_page_prot = __pgprot(prot); | 344 | vma->vm_page_prot = __pgprot(prot); |
318 | 345 | ||
319 | /* Write-combine setting is ignored, it is changed via the mtrr | 346 | flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; |
320 | * interfaces on this platform. | 347 | if (reserve_memtype(addr, addr + len, flags, &new_flags)) { |
321 | */ | 348 | /* |
349 | * Do not fallback to certain memory types with certain | ||
350 | * requested type: | ||
351 | * - request is uncached, return cannot be write-back | ||
352 | * - request is uncached, return cannot be write-combine | ||
353 | * - request is write-combine, return cannot be write-back | ||
354 | */ | ||
355 | if ((flags == _PAGE_CACHE_UC && | ||
356 | (new_flags == _PAGE_CACHE_WB || | ||
357 | new_flags == _PAGE_CACHE_WC)) || | ||
358 | (flags == _PAGE_CACHE_WC && | ||
359 | new_flags == _PAGE_CACHE_WB)) { | ||
360 | free_memtype(addr, addr+len); | ||
361 | return -EINVAL; | ||
362 | } | ||
363 | flags = new_flags; | ||
364 | } | ||
365 | |||
366 | if (vma->vm_pgoff <= max_pfn_mapped && | ||
367 | ioremap_change_attr((unsigned long)__va(addr), len, flags)) { | ||
368 | free_memtype(addr, addr + len); | ||
369 | return -EINVAL; | ||
370 | } | ||
371 | |||
322 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 372 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
323 | vma->vm_end - vma->vm_start, | 373 | vma->vm_end - vma->vm_start, |
324 | vma->vm_page_prot)) | 374 | vma->vm_page_prot)) |
325 | return -EAGAIN; | 375 | return -EAGAIN; |
326 | 376 | ||
377 | vma->vm_ops = &pci_mmap_ops; | ||
378 | |||
327 | return 0; | 379 | return 0; |
328 | } | 380 | } |