diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-06-25 15:31:57 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-06-25 15:31:57 -0400 |
commit | dfc2f91ac29f5ef50e74bf15a1a6b6aa6b952e62 (patch) | |
tree | 0b3eb3ae6b7b21226cf8e1410aa6dd6d0e07a110 /mm | |
parent | 1fbcf37128cc19bd67d9a736fb634dc444e907d7 (diff) |
nommu: provide follow_pfn().
With the introduction of follow_pfn() as an exported symbol, modules have
begun making use of it. Unfortunately this was not reflected on nommu at
the time, so the in-tree users have subsequently all blown up with link
errors there.
This provides a simple follow_pfn() that just returns addr >> PAGE_SHIFT,
which will do the right thing on nommu. There is no need to do range
checking within the vma, as the find_vma() case will already take care of
this.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2fd2ad5da98e..598bc871487a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -240,6 +240,27 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
240 | } | 240 | } |
241 | EXPORT_SYMBOL(get_user_pages); | 241 | EXPORT_SYMBOL(get_user_pages); |
242 | 242 | ||
243 | /** | ||
244 | * follow_pfn - look up PFN at a user virtual address | ||
245 | * @vma: memory mapping | ||
246 | * @address: user virtual address | ||
247 | * @pfn: location to store found PFN | ||
248 | * | ||
249 | * Only IO mappings and raw PFN mappings are allowed. | ||
250 | * | ||
251 | * Returns zero and the pfn at @pfn on success, -ve otherwise. | ||
252 | */ | ||
253 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | ||
254 | unsigned long *pfn) | ||
255 | { | ||
256 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) | ||
257 | return -EINVAL; | ||
258 | |||
259 | *pfn = address >> PAGE_SHIFT; | ||
260 | return 0; | ||
261 | } | ||
262 | EXPORT_SYMBOL(follow_pfn); | ||
263 | |||
243 | DEFINE_RWLOCK(vmlist_lock); | 264 | DEFINE_RWLOCK(vmlist_lock); |
244 | struct vm_struct *vmlist; | 265 | struct vm_struct *vmlist; |
245 | 266 | ||