aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-07-24 00:27:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:15 -0400
commit28b2ee20c7cba812b6f2ccf6d722cf86d00a84dc (patch)
treee8f1efd05c38c1cb26ca3ee051a454eb685fd122 /mm
parent0d71d10a4252a3938e6b70189bc776171c02e076 (diff)
access_process_vm device memory infrastructure
In order to be able to debug things like the X server and programs using the PPC Cell SPUs, the debugger needs to be able to access device memory through ptrace and /proc/pid/mem. This patch: Add the generic_access_phys access function and put the hooks in place to allow access_process_vm to access device or PPC Cell SPU memory. [riel@redhat.com: Add documentation for the vm_ops->access function] Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Benjamin Herrensmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnd Bergmann <arnd@arndb.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c131
1 files changed, 113 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 46dbed4b7446..87350321e66f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2751,6 +2751,86 @@ int in_gate_area_no_task(unsigned long addr)
2751 2751
2752#endif /* __HAVE_ARCH_GATE_AREA */ 2752#endif /* __HAVE_ARCH_GATE_AREA */
2753 2753
2754#ifdef CONFIG_HAVE_IOREMAP_PROT
2755static resource_size_t follow_phys(struct vm_area_struct *vma,
2756 unsigned long address, unsigned int flags,
2757 unsigned long *prot)
2758{
2759 pgd_t *pgd;
2760 pud_t *pud;
2761 pmd_t *pmd;
2762 pte_t *ptep, pte;
2763 spinlock_t *ptl;
2764 resource_size_t phys_addr = 0;
2765 struct mm_struct *mm = vma->vm_mm;
2766
2767 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
2768
2769 pgd = pgd_offset(mm, address);
2770 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2771 goto no_page_table;
2772
2773 pud = pud_offset(pgd, address);
2774 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2775 goto no_page_table;
2776
2777 pmd = pmd_offset(pud, address);
2778 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2779 goto no_page_table;
2780
2781 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2782 if (pmd_huge(*pmd))
2783 goto no_page_table;
2784
2785 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2786 if (!ptep)
2787 goto out;
2788
2789 pte = *ptep;
2790 if (!pte_present(pte))
2791 goto unlock;
2792 if ((flags & FOLL_WRITE) && !pte_write(pte))
2793 goto unlock;
2794 phys_addr = pte_pfn(pte);
2795 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2796
2797 *prot = pgprot_val(pte_pgprot(pte));
2798
2799unlock:
2800 pte_unmap_unlock(ptep, ptl);
2801out:
2802 return phys_addr;
2803no_page_table:
2804 return 0;
2805}
2806
2807int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2808 void *buf, int len, int write)
2809{
2810 resource_size_t phys_addr;
2811 unsigned long prot = 0;
2812 void *maddr;
2813 int offset = addr & (PAGE_SIZE-1);
2814
2815 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2816 return -EINVAL;
2817
2818 phys_addr = follow_phys(vma, addr, write, &prot);
2819
2820 if (!phys_addr)
2821 return -EINVAL;
2822
2823 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
2824 if (write)
2825 memcpy_toio(maddr + offset, buf, len);
2826 else
2827 memcpy_fromio(buf, maddr + offset, len);
2828 iounmap(maddr);
2829
2830 return len;
2831}
2832#endif
2833
2754/* 2834/*
2755 * Access another process' address space. 2835 * Access another process' address space.
2756 * Source/target buffer must be kernel space, 2836 * Source/target buffer must be kernel space,
@@ -2760,7 +2840,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
2760{ 2840{
2761 struct mm_struct *mm; 2841 struct mm_struct *mm;
2762 struct vm_area_struct *vma; 2842 struct vm_area_struct *vma;
2763 struct page *page;
2764 void *old_buf = buf; 2843 void *old_buf = buf;
2765 2844
2766 mm = get_task_mm(tsk); 2845 mm = get_task_mm(tsk);
@@ -2772,28 +2851,44 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
2772 while (len) { 2851 while (len) {
2773 int bytes, ret, offset; 2852 int bytes, ret, offset;
2774 void *maddr; 2853 void *maddr;
2854 struct page *page = NULL;
2775 2855
2776 ret = get_user_pages(tsk, mm, addr, 1, 2856 ret = get_user_pages(tsk, mm, addr, 1,
2777 write, 1, &page, &vma); 2857 write, 1, &page, &vma);
2778 if (ret <= 0) 2858 if (ret <= 0) {
2779 break; 2859 /*
2780 2860 * Check if this is a VM_IO | VM_PFNMAP VMA, which
2781 bytes = len; 2861 * we can access using slightly different code.
2782 offset = addr & (PAGE_SIZE-1); 2862 */
2783 if (bytes > PAGE_SIZE-offset) 2863#ifdef CONFIG_HAVE_IOREMAP_PROT
2784 bytes = PAGE_SIZE-offset; 2864 vma = find_vma(mm, addr);
2785 2865 if (!vma)
2786 maddr = kmap(page); 2866 break;
2787 if (write) { 2867 if (vma->vm_ops && vma->vm_ops->access)
2788 copy_to_user_page(vma, page, addr, 2868 ret = vma->vm_ops->access(vma, addr, buf,
2789 maddr + offset, buf, bytes); 2869 len, write);
2790 set_page_dirty_lock(page); 2870 if (ret <= 0)
2871#endif
2872 break;
2873 bytes = ret;
2791 } else { 2874 } else {
2792 copy_from_user_page(vma, page, addr, 2875 bytes = len;
2793 buf, maddr + offset, bytes); 2876 offset = addr & (PAGE_SIZE-1);
2877 if (bytes > PAGE_SIZE-offset)
2878 bytes = PAGE_SIZE-offset;
2879
2880 maddr = kmap(page);
2881 if (write) {
2882 copy_to_user_page(vma, page, addr,
2883 maddr + offset, buf, bytes);
2884 set_page_dirty_lock(page);
2885 } else {
2886 copy_from_user_page(vma, page, addr,
2887 buf, maddr + offset, bytes);
2888 }
2889 kunmap(page);
2890 page_cache_release(page);
2794 } 2891 }
2795 kunmap(page);
2796 page_cache_release(page);
2797 len -= bytes; 2892 len -= bytes;
2798 buf += bytes; 2893 buf += bytes;
2799 addr += bytes; 2894 addr += bytes;