aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-29 16:01:56 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-29 16:01:56 -0500
commit238f58d898df941aa9d1cb390fb27ff4febe8965 (patch)
tree4a897b3a47e7d209e3ff8601febd3dde16f3803e /mm/memory.c
parenteca351336acb2fa943611e0846562ce3997ef53b (diff)
Support strange discontiguous PFN remappings
These get created by some drivers that don't generally even want a pfn remapping at all, but would really mostly prefer to just map pages they've allocated individually instead. For now, create a helper function that turns such an incomplete PFN remapping call into a loop that does that explicit mapping. In the long run we almost certainly want to export a totally different interface for that, though. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 74839b3a3999..990e7dc666f8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1147,6 +1147,95 @@ int zeromap_page_range(struct vm_area_struct *vma,
1147} 1147}
1148 1148
1149/* 1149/*
1150 * This is the old fallback for page remapping.
1151 *
1152 * For historical reasons, it only allows reserved pages. Only
1153 * old drivers should use this, and they needed to mark their
1154 * pages reserved for the old functions anyway.
1155 */
1156static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
1157{
1158 int retval;
1159 pgd_t * pgd;
1160 pud_t * pud;
1161 pmd_t * pmd;
1162 pte_t * pte;
1163 spinlock_t *ptl;
1164
1165 retval = -EINVAL;
1166 if (PageAnon(page) || !PageReserved(page))
1167 goto out;
1168 retval = -ENOMEM;
1169 flush_dcache_page(page);
1170 pgd = pgd_offset(mm, addr);
1171 pud = pud_alloc(mm, pgd, addr);
1172 if (!pud)
1173 goto out;
1174 pmd = pmd_alloc(mm, pud, addr);
1175 if (!pmd)
1176 goto out;
1177 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1178 if (!pte)
1179 goto out;
1180 retval = -EBUSY;
1181 if (!pte_none(*pte))
1182 goto out_unlock;
1183
1184 /* Ok, finally just insert the thing.. */
1185 get_page(page);
1186 inc_mm_counter(mm, file_rss);
1187 page_add_file_rmap(page);
1188 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1189
1190 retval = 0;
1191out_unlock:
1192 pte_unmap_unlock(pte, ptl);
1193out:
1194 return retval;
1195}
1196
1197/*
1198 * Somebody does a pfn remapping that doesn't actually work as a vma.
1199 *
1200 * Do it as individual pages instead, and warn about it. It's bad form,
1201 * and very inefficient.
1202 */
1203static int incomplete_pfn_remap(struct vm_area_struct *vma,
1204 unsigned long start, unsigned long end,
1205 unsigned long pfn, pgprot_t prot)
1206{
1207 static int warn = 10;
1208 struct page *page;
1209 int retval;
1210
1211 if (!(vma->vm_flags & VM_INCOMPLETE)) {
1212 if (warn) {
1213 warn--;
1214 printk("%s does an incomplete pfn remapping", current->comm);
1215 dump_stack();
1216 }
1217 }
1218 vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
1219
1220 if (start < vma->vm_start || end > vma->vm_end)
1221 return -EINVAL;
1222
1223 if (!pfn_valid(pfn))
1224 return -EINVAL;
1225
1226 retval = 0;
1227 page = pfn_to_page(pfn);
1228 while (start < end) {
1229 retval = insert_page(vma->vm_mm, start, page, prot);
1230 if (retval < 0)
1231 break;
1232 start += PAGE_SIZE;
1233 page++;
1234 }
1235 return retval;
1236}
1237
1238/*
1150 * maps a range of physical memory into the requested pages. the old 1239 * maps a range of physical memory into the requested pages. the old
1151 * mappings are removed. any references to nonexistent pages results 1240 * mappings are removed. any references to nonexistent pages results
1152 * in null mappings (currently treated as "copy-on-access") 1241 * in null mappings (currently treated as "copy-on-access")
@@ -1220,6 +1309,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1220 struct mm_struct *mm = vma->vm_mm; 1309 struct mm_struct *mm = vma->vm_mm;
1221 int err; 1310 int err;
1222 1311
1312 if (addr != vma->vm_start || end != vma->vm_end)
1313 return incomplete_pfn_remap(vma, addr, end, pfn, prot);
1314
1223 /* 1315 /*
1224 * Physically remapped pages are special. Tell the 1316 * Physically remapped pages are special. Tell the
1225 * rest of the world about it: 1317 * rest of the world about it: