aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/sched.h6
4 files changed, 13 insertions, 11 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aa37469da696..2e1b64088490 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -646,7 +646,7 @@ static spinlock_t *lock_pte(struct page *page)
646{ 646{
647 spinlock_t *ptl = NULL; 647 spinlock_t *ptl = NULL;
648 648
649#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 649#if USE_SPLIT_PTLOCKS
650 ptl = __pte_lockptr(page); 650 ptl = __pte_lockptr(page);
651 spin_lock(ptl); 651 spin_lock(ptl);
652#endif 652#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 72a15dc26bbf..4194bf8e4f6c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -919,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
919} 919}
920#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 920#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
921 921
922#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 922#if USE_SPLIT_PTLOCKS
923/* 923/*
924 * We tuck a spinlock to guard each pagetable page into its struct page, 924 * We tuck a spinlock to guard each pagetable page into its struct page,
925 * at page->private, with BUILD_BUG_ON to make sure that this will not 925 * at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -932,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
932} while (0) 932} while (0)
933#define pte_lock_deinit(page) ((page)->mapping = NULL) 933#define pte_lock_deinit(page) ((page)->mapping = NULL)
934#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 934#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
935#else 935#else /* !USE_SPLIT_PTLOCKS */
936/* 936/*
937 * We use mm->page_table_lock to guard all pagetable pages of the mm. 937 * We use mm->page_table_lock to guard all pagetable pages of the mm.
938 */ 938 */
939#define pte_lock_init(page) do {} while (0) 939#define pte_lock_init(page) do {} while (0)
940#define pte_lock_deinit(page) do {} while (0) 940#define pte_lock_deinit(page) do {} while (0)
941#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 941#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
942#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 942#endif /* USE_SPLIT_PTLOCKS */
943 943
944static inline void pgtable_page_ctor(struct page *page) 944static inline void pgtable_page_ctor(struct page *page)
945{ 945{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bf334138c7c1..9d49fa36bbef 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -21,11 +21,13 @@
21 21
22struct address_space; 22struct address_space;
23 23
24#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 24#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
25
26#if USE_SPLIT_PTLOCKS
25typedef atomic_long_t mm_counter_t; 27typedef atomic_long_t mm_counter_t;
26#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 28#else /* !USE_SPLIT_PTLOCKS */
27typedef unsigned long mm_counter_t; 29typedef unsigned long mm_counter_t;
28#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 30#endif /* !USE_SPLIT_PTLOCKS */
29 31
30/* 32/*
31 * Each physical page in the system has a struct page associated with 33 * Each physical page in the system has a struct page associated with
@@ -65,7 +67,7 @@ struct page {
65 * see PAGE_MAPPING_ANON below. 67 * see PAGE_MAPPING_ANON below.
66 */ 68 */
67 }; 69 };
68#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 70#if USE_SPLIT_PTLOCKS
69 spinlock_t ptl; 71 spinlock_t ptl;
70#endif 72#endif
71 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 73 struct kmem_cache *slab; /* SLUB: Pointer to slab */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad15..272c35309df2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
352extern void arch_unmap_area(struct mm_struct *, unsigned long); 352extern void arch_unmap_area(struct mm_struct *, unsigned long);
353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
354 354
355#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 355#if USE_SPLIT_PTLOCKS
356/* 356/*
357 * The mm counters are not protected by its page_table_lock, 357 * The mm counters are not protected by its page_table_lock,
358 * so must be incremented atomically. 358 * so must be incremented atomically.
@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
365 365
366#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 366#else /* !USE_SPLIT_PTLOCKS */
367/* 367/*
368 * The mm counters are protected by its page_table_lock, 368 * The mm counters are protected by its page_table_lock,
369 * so can be incremented directly. 369 * so can be incremented directly.
@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
374#define inc_mm_counter(mm, member) (mm)->_##member++ 374#define inc_mm_counter(mm, member) (mm)->_##member++
375#define dec_mm_counter(mm, member) (mm)->_##member-- 375#define dec_mm_counter(mm, member) (mm)->_##member--
376 376
377#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 377#endif /* !USE_SPLIT_PTLOCKS */
378 378
379#define get_mm_rss(mm) \ 379#define get_mm_rss(mm) \
380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))