aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2013-04-22 00:40:39 -0400
committerRusty Russell <rusty@rustcorp.com.au>2013-04-22 02:01:39 -0400
commite1d12606f756bdb8328a66a2873dca6c46bcb4e5 (patch)
tree25464220a1fe74504cd5a8d0801913594e25539d /drivers
parent93a2cdff98243df06bafd3c4f3b31b38f0d0fe3e (diff)
lguest: make check_gpte et. al return bool.
This is a bit neater: we can immediately return if a PTE/PGD/PMD entry is invalid (which also kills the guest). It means we don't risk using invalid entries as we reshuffle the code. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lguest/page_tables.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 758466299b0d..f074f34acb86 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -259,26 +259,35 @@ static void release_pte(pte_t pte)
259} 259}
260/*:*/ 260/*:*/
261 261
262static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 262static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
263{ 263{
264 if ((pte_flags(gpte) & _PAGE_PSE) || 264 if ((pte_flags(gpte) & _PAGE_PSE) ||
265 pte_pfn(gpte) >= cpu->lg->pfn_limit) 265 pte_pfn(gpte) >= cpu->lg->pfn_limit) {
266 kill_guest(cpu, "bad page table entry"); 266 kill_guest(cpu, "bad page table entry");
267 return false;
268 }
269 return true;
267} 270}
268 271
269static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) 272static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
270{ 273{
271 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || 274 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
272 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) 275 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
273 kill_guest(cpu, "bad page directory entry"); 276 kill_guest(cpu, "bad page directory entry");
277 return false;
278 }
279 return true;
274} 280}
275 281
276#ifdef CONFIG_X86_PAE 282#ifdef CONFIG_X86_PAE
277static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) 283static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
278{ 284{
279 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || 285 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
280 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) 286 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
281 kill_guest(cpu, "bad page middle directory entry"); 287 kill_guest(cpu, "bad page middle directory entry");
288 return false;
289 }
290 return true;
282} 291}
283#endif 292#endif
284 293
@@ -336,7 +345,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
336 return false; 345 return false;
337 } 346 }
338 /* We check that the Guest pgd is OK. */ 347 /* We check that the Guest pgd is OK. */
339 check_gpgd(cpu, gpgd); 348 if (!check_gpgd(cpu, gpgd))
349 return false;
340 /* 350 /*
341 * And we copy the flags to the shadow PGD entry. The page 351 * And we copy the flags to the shadow PGD entry. The page
342 * number in the shadow PGD is the page we just allocated. 352 * number in the shadow PGD is the page we just allocated.
@@ -372,7 +382,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
372 } 382 }
373 383
374 /* We check that the Guest pmd is OK. */ 384 /* We check that the Guest pmd is OK. */
375 check_gpmd(cpu, gpmd); 385 if (!check_gpmd(cpu, gpmd))
386 return false;
376 387
377 /* 388 /*
378 * And we copy the flags to the shadow PMD entry. The page 389 * And we copy the flags to the shadow PMD entry. The page
@@ -421,7 +432,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
421 * Check that the Guest PTE flags are OK, and the page number is below 432 * Check that the Guest PTE flags are OK, and the page number is below
422 * the pfn_limit (ie. not mapping the Launcher binary). 433 * the pfn_limit (ie. not mapping the Launcher binary).
423 */ 434 */
424 check_gpte(cpu, gpte); 435 if (!check_gpte(cpu, gpte))
436 return false;
425 437
426 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 438 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
427 gpte = pte_mkyoung(gpte); 439 gpte = pte_mkyoung(gpte);
@@ -857,7 +869,8 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
857 * micro-benchmark. 869 * micro-benchmark.
858 */ 870 */
859 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 871 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
860 check_gpte(cpu, gpte); 872 if (!check_gpte(cpu, gpte))
873 return;
861 set_pte(spte, 874 set_pte(spte,
862 gpte_to_spte(cpu, gpte, 875 gpte_to_spte(cpu, gpte,
863 pte_flags(gpte) & _PAGE_DIRTY)); 876 pte_flags(gpte) & _PAGE_DIRTY));