aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/page_tables.c
diff options
context:
space:
mode:
authorMatias Zabaljauregui <zabaljauregui@gmail.com>2009-03-18 12:38:35 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-30 07:25:25 -0400
commitdf1693abc42e34bbc4351e179dbe66c28a94efb8 (patch)
treeb0cec44a3ace1fbc8377c73428daf64848b48907 /drivers/lguest/page_tables.c
parent4cd8b5e2a159f18a1507f1187b44a1acbfa6341b (diff)
lguest: use bool instead of int
Impact: clean up Rusty told me, some time ago, that he had become a fan of "bool". So, here are some replacements. Signed-off-by: Matias Zabaljauregui <zabaljauregui at gmail.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r--drivers/lguest/page_tables.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 82ff484bd8c8..a059cf9980f7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -199,7 +199,7 @@ static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
199 * 199 *
200 * If we fixed up the fault (ie. we mapped the address), this routine returns 200 * If we fixed up the fault (ie. we mapped the address), this routine returns
201 * true. Otherwise, it was a real fault and we need to tell the Guest. */ 201 * true. Otherwise, it was a real fault and we need to tell the Guest. */
202int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 202bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
203{ 203{
204 pgd_t gpgd; 204 pgd_t gpgd;
205 pgd_t *spgd; 205 pgd_t *spgd;
@@ -211,7 +211,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
212 /* Toplevel not present? We can't map it in. */ 212 /* Toplevel not present? We can't map it in. */
213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
214 return 0; 214 return false;
215 215
216 /* Now look at the matching shadow entry. */ 216 /* Now look at the matching shadow entry. */
217 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 217 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
@@ -222,7 +222,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
222 * simple for this corner case. */ 222 * simple for this corner case. */
223 if (!ptepage) { 223 if (!ptepage) {
224 kill_guest(cpu, "out of memory allocating pte page"); 224 kill_guest(cpu, "out of memory allocating pte page");
225 return 0; 225 return false;
226 } 226 }
227 /* We check that the Guest pgd is OK. */ 227 /* We check that the Guest pgd is OK. */
228 check_gpgd(cpu, gpgd); 228 check_gpgd(cpu, gpgd);
@@ -238,16 +238,16 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
238 238
239 /* If this page isn't in the Guest page tables, we can't page it in. */ 239 /* If this page isn't in the Guest page tables, we can't page it in. */
240 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 240 if (!(pte_flags(gpte) & _PAGE_PRESENT))
241 return 0; 241 return false;
242 242
243 /* Check they're not trying to write to a page the Guest wants 243 /* Check they're not trying to write to a page the Guest wants
244 * read-only (bit 2 of errcode == write). */ 244 * read-only (bit 2 of errcode == write). */
245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) 245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
246 return 0; 246 return false;
247 247
248 /* User access to a kernel-only page? (bit 3 == user access) */ 248 /* User access to a kernel-only page? (bit 3 == user access) */
249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
250 return 0; 250 return false;
251 251
252 /* Check that the Guest PTE flags are OK, and the page number is below 252 /* Check that the Guest PTE flags are OK, and the page number is below
253 * the pfn_limit (ie. not mapping the Launcher binary). */ 253 * the pfn_limit (ie. not mapping the Launcher binary). */
@@ -283,7 +283,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
283 * manipulated, the result returned and the code complete. A small 283 * manipulated, the result returned and the code complete. A small
284 * delay and a trace of alliteration are the only indications the Guest 284 * delay and a trace of alliteration are the only indications the Guest
285 * has that a page fault occurred at all. */ 285 * has that a page fault occurred at all. */
286 return 1; 286 return true;
287} 287}
288 288
289/*H:360 289/*H:360
@@ -296,7 +296,7 @@ int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
296 * 296 *
297 * This is a quick version which answers the question: is this virtual address 297 * This is a quick version which answers the question: is this virtual address
298 * mapped by the shadow page tables, and is it writable? */ 298 * mapped by the shadow page tables, and is it writable? */
299static int page_writable(struct lg_cpu *cpu, unsigned long vaddr) 299static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
300{ 300{
301 pgd_t *spgd; 301 pgd_t *spgd;
302 unsigned long flags; 302 unsigned long flags;
@@ -304,7 +304,7 @@ static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
304 /* Look at the current top level entry: is it present? */ 304 /* Look at the current top level entry: is it present? */
305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307 return 0; 307 return false;
308 308
309 /* Check the flags on the pte entry itself: it must be present and 309 /* Check the flags on the pte entry itself: it must be present and
310 * writable. */ 310 * writable. */