aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-04-28 05:13:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:23 -0400
commit423bad600443c590f34ed7ce357591f76f48f137 (patch)
tree79487f811bf1097f2592c4d20f688d1b1ec41e25 /mm/memory.c
parent7e675137a8e1a4d45822746456dd389b65745bf6 (diff)
mm: add vm_insert_mixed
vm_insert_mixed will insert either a raw pfn or a refcounted struct page into the page tables, depending on whether vm_normal_page() will return the page or not. With the introduction of the new pte bit, this is now a too tricky for drivers to be doing themselves. filemap_xip uses this in a subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Jared Hulbert <jaredeh@gmail.com> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c86
1 files changed, 60 insertions, 26 deletions
diff --git a/mm/memory.c b/mm/memory.c
index c5e88bcd8ec3..bbab1e37055e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1176,8 +1176,10 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1176 * old drivers should use this, and they needed to mark their 1176 * old drivers should use this, and they needed to mark their
1177 * pages reserved for the old functions anyway. 1177 * pages reserved for the old functions anyway.
1178 */ 1178 */
1179static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) 1179static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1180 struct page *page, pgprot_t prot)
1180{ 1181{
1182 struct mm_struct *mm = vma->vm_mm;
1181 int retval; 1183 int retval;
1182 pte_t *pte; 1184 pte_t *pte;
1183 spinlock_t *ptl; 1185 spinlock_t *ptl;
@@ -1237,17 +1239,46 @@ out:
1237 * 1239 *
1238 * The page does not need to be reserved. 1240 * The page does not need to be reserved.
1239 */ 1241 */
1240int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) 1242int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1243 struct page *page)
1241{ 1244{
1242 if (addr < vma->vm_start || addr >= vma->vm_end) 1245 if (addr < vma->vm_start || addr >= vma->vm_end)
1243 return -EFAULT; 1246 return -EFAULT;
1244 if (!page_count(page)) 1247 if (!page_count(page))
1245 return -EINVAL; 1248 return -EINVAL;
1246 vma->vm_flags |= VM_INSERTPAGE; 1249 vma->vm_flags |= VM_INSERTPAGE;
1247 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1250 return insert_page(vma, addr, page, vma->vm_page_prot);
1248} 1251}
1249EXPORT_SYMBOL(vm_insert_page); 1252EXPORT_SYMBOL(vm_insert_page);
1250 1253
1254static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1255 unsigned long pfn, pgprot_t prot)
1256{
1257 struct mm_struct *mm = vma->vm_mm;
1258 int retval;
1259 pte_t *pte, entry;
1260 spinlock_t *ptl;
1261
1262 retval = -ENOMEM;
1263 pte = get_locked_pte(mm, addr, &ptl);
1264 if (!pte)
1265 goto out;
1266 retval = -EBUSY;
1267 if (!pte_none(*pte))
1268 goto out_unlock;
1269
1270 /* Ok, finally just insert the thing.. */
1271 entry = pte_mkspecial(pfn_pte(pfn, prot));
1272 set_pte_at(mm, addr, pte, entry);
1273 update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
1274
1275 retval = 0;
1276out_unlock:
1277 pte_unmap_unlock(pte, ptl);
1278out:
1279 return retval;
1280}
1281
1251/** 1282/**
1252 * vm_insert_pfn - insert single pfn into user vma 1283 * vm_insert_pfn - insert single pfn into user vma
1253 * @vma: user vma to map to 1284 * @vma: user vma to map to
@@ -1261,13 +1292,8 @@ EXPORT_SYMBOL(vm_insert_page);
1261 * in that case the handler should return NULL. 1292 * in that case the handler should return NULL.
1262 */ 1293 */
1263int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1294int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1264 unsigned long pfn) 1295 unsigned long pfn)
1265{ 1296{
1266 struct mm_struct *mm = vma->vm_mm;
1267 int retval;
1268 pte_t *pte, entry;
1269 spinlock_t *ptl;
1270
1271 /* 1297 /*
1272 * Technically, architectures with pte_special can avoid all these 1298 * Technically, architectures with pte_special can avoid all these
1273 * restrictions (same for remap_pfn_range). However we would like 1299 * restrictions (same for remap_pfn_range). However we would like
@@ -1280,27 +1306,35 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1280 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1306 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1281 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1307 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1282 1308
1283 retval = -ENOMEM; 1309 if (addr < vma->vm_start || addr >= vma->vm_end)
1284 pte = get_locked_pte(mm, addr, &ptl); 1310 return -EFAULT;
1285 if (!pte) 1311 return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1286 goto out; 1312}
1287 retval = -EBUSY; 1313EXPORT_SYMBOL(vm_insert_pfn);
1288 if (!pte_none(*pte))
1289 goto out_unlock;
1290 1314
1291 /* Ok, finally just insert the thing.. */ 1315int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1292 entry = pte_mkspecial(pfn_pte(pfn, vma->vm_page_prot)); 1316 unsigned long pfn)
1293 set_pte_at(mm, addr, pte, entry); 1317{
1294 update_mmu_cache(vma, addr, entry); 1318 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1295 1319
1296 retval = 0; 1320 if (addr < vma->vm_start || addr >= vma->vm_end)
1297out_unlock: 1321 return -EFAULT;
1298 pte_unmap_unlock(pte, ptl);
1299 1322
1300out: 1323 /*
1301 return retval; 1324 * If we don't have pte special, then we have to use the pfn_valid()
1325 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1326 * refcount the page if pfn_valid is true (hence insert_page rather
1327 * than insert_pfn).
1328 */
1329 if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
1330 struct page *page;
1331
1332 page = pfn_to_page(pfn);
1333 return insert_page(vma, addr, page, vma->vm_page_prot);
1334 }
1335 return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1302} 1336}
1303EXPORT_SYMBOL(vm_insert_pfn); 1337EXPORT_SYMBOL(vm_insert_mixed);
1304 1338
1305/* 1339/*
1306 * maps a range of physical memory into the requested pages. the old 1340 * maps a range of physical memory into the requested pages. the old