aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:08 -0500
commit56744546b3e5379177a70e7306c6283f727e4732 (patch)
tree6c31d1ee005adfa566d4cdd4aac3f11942fc01c6 /arch/x86
parentff31452b6ea5032f26f16140d45dc6596260cd9c (diff)
x86: cpa use the new set_clr function
Convert cpa_set and cpa_clear to call the new set_clr function. Seperate out the debug helpers. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pageattr.c126
1 files changed, 20 insertions, 106 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index c54832b75069..7823adab96e4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -321,116 +321,16 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
321 return ret; 321 return ret;
322} 322}
323 323
324/** 324static inline int change_page_attr_set(unsigned long addr, int numpages,
325 * change_page_attr_set - Change page table attributes in the linear mapping. 325 pgprot_t mask)
326 * @addr: Virtual address in linear mapping.
327 * @numpages: Number of pages to change
328 * @prot: Protection/caching type bits to set (PAGE_*)
329 *
330 * Returns 0 on success, otherwise a negated errno.
331 *
332 * This should be used when a page is mapped with a different caching policy
333 * than write-back somewhere - some CPUs do not like it when mappings with
334 * different caching policies exist. This changes the page attributes of the
335 * in kernel linear mapping too.
336 *
337 * The caller needs to ensure that there are no conflicting mappings elsewhere
338 * (e.g. in user space) * This function only deals with the kernel linear map.
339 *
340 * This function is different from change_page_attr() in that only selected bits
341 * are impacted, all other bits remain as is.
342 */
343static int __change_page_attr_set(unsigned long addr, int numpages,
344 pgprot_t prot)
345{ 326{
346 pgprot_t current_prot, new_prot; 327 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
347 int level;
348 pte_t *pte;
349 int i, ret;
350
351 for (i = 0; i < numpages ; i++) {
352
353 pte = lookup_address(addr, &level);
354 if (!pte)
355 return -EINVAL;
356
357 current_prot = pte_pgprot(*pte);
358
359 pgprot_val(new_prot) =
360 pgprot_val(current_prot) | pgprot_val(prot);
361
362 ret = change_page_attr_addr(addr, new_prot);
363 if (ret)
364 return ret;
365 addr += PAGE_SIZE;
366 }
367
368 return 0;
369} 328}
370 329
371static int change_page_attr_set(unsigned long addr, int numpages, pgprot_t prot) 330static inline int change_page_attr_clear(unsigned long addr, int numpages,
331 pgprot_t mask)
372{ 332{
373 int ret = __change_page_attr_set(addr, numpages, prot); 333 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
374
375 global_flush_tlb();
376 return ret;
377
378}
379
380/**
381 * change_page_attr_clear - Change page table attributes in the linear mapping.
382 * @addr: Virtual address in linear mapping.
383 * @numpages: Number of pages to change
384 * @prot: Protection/caching type bits to clear (PAGE_*)
385 *
386 * Returns 0 on success, otherwise a negated errno.
387 *
388 * This should be used when a page is mapped with a different caching policy
389 * than write-back somewhere - some CPUs do not like it when mappings with
390 * different caching policies exist. This changes the page attributes of the
391 * in kernel linear mapping too.
392 *
393 * The caller needs to ensure that there are no conflicting mappings elsewhere
394 * (e.g. in user space) * This function only deals with the kernel linear map.
395 *
396 * This function is different from change_page_attr() in that only selected bits
397 * are impacted, all other bits remain as is.
398 */
399static int __change_page_attr_clear(unsigned long addr, int numpages,
400 pgprot_t prot)
401{
402 pgprot_t current_prot, new_prot;
403 int level;
404 pte_t *pte;
405 int i, ret;
406
407 for (i = 0; i < numpages; i++) {
408
409 pte = lookup_address(addr, &level);
410 if (!pte)
411 return -EINVAL;
412
413 current_prot = pte_pgprot(*pte);
414
415 pgprot_val(new_prot) =
416 pgprot_val(current_prot) & ~pgprot_val(prot);
417
418 ret = change_page_attr_addr(addr, new_prot);
419 if (ret)
420 return ret;
421 addr += PAGE_SIZE;
422 }
423
424 return 0;
425}
426
427static int change_page_attr_clear(unsigned long addr, int numpages,
428 pgprot_t prot)
429{
430 int ret = __change_page_attr_clear(addr, numpages, prot);
431
432 global_flush_tlb();
433 return ret;
434 334
435} 335}
436 336
@@ -522,6 +422,20 @@ int set_pages_rw(struct page *page, int numpages)
522} 422}
523 423
524 424
425#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
426static inline int __change_page_attr_set(unsigned long addr, int numpages,
427 pgprot_t mask)
428{
429 return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
430}
431
432static inline int __change_page_attr_clear(unsigned long addr, int numpages,
433 pgprot_t mask)
434{
435 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
436}
437#endif
438
525#ifdef CONFIG_DEBUG_PAGEALLOC 439#ifdef CONFIG_DEBUG_PAGEALLOC
526 440
527static int __set_pages_p(struct page *page, int numpages) 441static int __set_pages_p(struct page *page, int numpages)