diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2009-04-03 17:21:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-03 17:43:29 -0400 |
commit | 5a3ae276057840f0e60664c12fc3ef80aa59d1d4 (patch) | |
tree | e51666426f6b75a4ebdc4049d0356f69e8331e6f /arch/x86/pci/i386.c | |
parent | 78609a812e9afa87202631d128018361f68c44a9 (diff) |
x86, PAT: Remove duplicate memtype reserve in pci mmap
pci mmap code was doing memtype reserve for a while now. Recently we
added memtype tracking in remap_pfn_range, and pci code indirectly calls
remap_pfn_range. So, we don't need seperate tracking in pci code
anymore. Which means a patch that removes ~50 lines of code :-).
Also, recently we found out that the pci tracking is not working as we expect
it to work in some cases. Specifically, userlevel X mmap of pci, with some
recent version of X, is having a problem with vm_page_prot getting reset.
The pci tracking uses vm_page_prot to pass on the protection type from parent
to child during fork.
a) Parent does a pci mmap
b) We look at PAT and get either UC_MINUS or WC mapping for parent
c) Store that mapping type in vma vm_page_prot for future use
d) This thread does a fork
e) Fork results in mmap_ops ->open for the child process
f) We get the vm_page_prot from vma and reserve that type for the child process
But, between c) and e) above, the vma vm_page_prot is getting reset to zero.
This results in PAT reserve failing at the time of fork as in here.
http://marc.info/?l=linux-kernel&m=123858163103240&w=2
This cleanup makes the above problem go away as we do not depend on
vm_page_prot in our PAT code anymore.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/pci/i386.c')
-rw-r--r-- | arch/x86/pci/i386.c | 46 |
1 files changed, 0 insertions, 46 deletions
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index f234a37bd428..f1817f71e009 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -258,24 +258,7 @@ void pcibios_set_master(struct pci_dev *dev) | |||
258 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 258 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
259 | } | 259 | } |
260 | 260 | ||
261 | static void pci_unmap_page_range(struct vm_area_struct *vma) | ||
262 | { | ||
263 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
264 | free_memtype(addr, addr + vma->vm_end - vma->vm_start); | ||
265 | } | ||
266 | |||
267 | static void pci_track_mmap_page_range(struct vm_area_struct *vma) | ||
268 | { | ||
269 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
270 | unsigned long flags = pgprot_val(vma->vm_page_prot) | ||
271 | & _PAGE_CACHE_MASK; | ||
272 | |||
273 | reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); | ||
274 | } | ||
275 | |||
276 | static struct vm_operations_struct pci_mmap_ops = { | 261 | static struct vm_operations_struct pci_mmap_ops = { |
277 | .open = pci_track_mmap_page_range, | ||
278 | .close = pci_unmap_page_range, | ||
279 | .access = generic_access_phys, | 262 | .access = generic_access_phys, |
280 | }; | 263 | }; |
281 | 264 | ||
@@ -283,11 +266,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
283 | enum pci_mmap_state mmap_state, int write_combine) | 266 | enum pci_mmap_state mmap_state, int write_combine) |
284 | { | 267 | { |
285 | unsigned long prot; | 268 | unsigned long prot; |
286 | u64 addr = vma->vm_pgoff << PAGE_SHIFT; | ||
287 | unsigned long len = vma->vm_end - vma->vm_start; | ||
288 | unsigned long flags; | ||
289 | unsigned long new_flags; | ||
290 | int retval; | ||
291 | 269 | ||
292 | /* I/O space cannot be accessed via normal processor loads and | 270 | /* I/O space cannot be accessed via normal processor loads and |
293 | * stores on this platform. | 271 | * stores on this platform. |
@@ -308,30 +286,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
308 | 286 | ||
309 | vma->vm_page_prot = __pgprot(prot); | 287 | vma->vm_page_prot = __pgprot(prot); |
310 | 288 | ||
311 | flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; | ||
312 | retval = reserve_memtype(addr, addr + len, flags, &new_flags); | ||
313 | if (retval) | ||
314 | return retval; | ||
315 | |||
316 | if (flags != new_flags) { | ||
317 | if (!is_new_memtype_allowed(flags, new_flags)) { | ||
318 | free_memtype(addr, addr+len); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | flags = new_flags; | ||
322 | vma->vm_page_prot = __pgprot( | ||
323 | (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) | | ||
324 | flags); | ||
325 | } | ||
326 | |||
327 | if (((vma->vm_pgoff < max_low_pfn_mapped) || | ||
328 | (vma->vm_pgoff >= (1UL<<(32 - PAGE_SHIFT)) && | ||
329 | vma->vm_pgoff < max_pfn_mapped)) && | ||
330 | ioremap_change_attr((unsigned long)__va(addr), len, flags)) { | ||
331 | free_memtype(addr, addr + len); | ||
332 | return -EINVAL; | ||
333 | } | ||
334 | |||
335 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 289 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
336 | vma->vm_end - vma->vm_start, | 290 | vma->vm_end - vma->vm_start, |
337 | vma->vm_page_prot)) | 291 | vma->vm_page_prot)) |