aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-06 17:39:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:00 -0500
commit64cdd548ffe26849d4cd113ac640f60606063b14 (patch)
tree9a7a8af9022621d2da3648757b724fcd32b20168
parent1b0bd118862cd9fe9ac2872137a1b8107e83ff9d (diff)
mm: cleanup: remove #ifdef CONFIG_MIGRATION
#ifdef in *.c file decrease source readability a bit. removing is better. This patch doesn't have any functional change. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/migrate.h4
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/rmap.c10
3 files changed, 9 insertions, 11 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3f34005068d4..527602cdea1c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,6 +7,8 @@
7typedef struct page *new_page_t(struct page *, unsigned long private, int **); 7typedef struct page *new_page_t(struct page *, unsigned long private, int **);
8 8
9#ifdef CONFIG_MIGRATION 9#ifdef CONFIG_MIGRATION
10#define PAGE_MIGRATION 1
11
10extern int putback_lru_pages(struct list_head *l); 12extern int putback_lru_pages(struct list_head *l);
11extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
12 struct page *, struct page *); 14 struct page *, struct page *);
@@ -20,6 +22,8 @@ extern int migrate_vmas(struct mm_struct *mm,
20 const nodemask_t *from, const nodemask_t *to, 22 const nodemask_t *from, const nodemask_t *to,
21 unsigned long flags); 23 unsigned long flags);
22#else 24#else
25#define PAGE_MIGRATION 0
26
23static inline int putback_lru_pages(struct list_head *l) { return 0; } 27static inline int putback_lru_pages(struct list_head *l) { return 0; }
24static inline int migrate_pages(struct list_head *l, new_page_t x, 28static inline int migrate_pages(struct list_head *l, new_page_t x,
25 unsigned long private) { return -ENOSYS; } 29 unsigned long private) { return -ENOSYS; }
diff --git a/mm/mprotect.c b/mm/mprotect.c
index cfb4c4852062..d0f6e7ce09f1 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -22,6 +22,7 @@
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/swapops.h> 23#include <linux/swapops.h>
24#include <linux/mmu_notifier.h> 24#include <linux/mmu_notifier.h>
25#include <linux/migrate.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
@@ -59,8 +60,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
59 ptent = pte_mkwrite(ptent); 60 ptent = pte_mkwrite(ptent);
60 61
61 ptep_modify_prot_commit(mm, addr, pte, ptent); 62 ptep_modify_prot_commit(mm, addr, pte, ptent);
62#ifdef CONFIG_MIGRATION 63 } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
63 } else if (!pte_file(oldpte)) {
64 swp_entry_t entry = pte_to_swp_entry(oldpte); 64 swp_entry_t entry = pte_to_swp_entry(oldpte);
65 65
66 if (is_write_migration_entry(entry)) { 66 if (is_write_migration_entry(entry)) {
@@ -72,9 +72,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
72 set_pte_at(mm, addr, pte, 72 set_pte_at(mm, addr, pte,
73 swp_entry_to_pte(entry)); 73 swp_entry_to_pte(entry));
74 } 74 }
75#endif
76 } 75 }
77
78 } while (pte++, addr += PAGE_SIZE, addr != end); 76 } while (pte++, addr += PAGE_SIZE, addr != end);
79 arch_leave_lazy_mmu_mode(); 77 arch_leave_lazy_mmu_mode();
80 pte_unmap_unlock(pte - 1, ptl); 78 pte_unmap_unlock(pte - 1, ptl);
diff --git a/mm/rmap.c b/mm/rmap.c
index 10993942d6c9..53c56dacd725 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -50,6 +50,7 @@
50#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/memcontrol.h> 51#include <linux/memcontrol.h>
52#include <linux/mmu_notifier.h> 52#include <linux/mmu_notifier.h>
53#include <linux/migrate.h>
53 54
54#include <asm/tlbflush.h> 55#include <asm/tlbflush.h>
55 56
@@ -818,8 +819,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
818 spin_unlock(&mmlist_lock); 819 spin_unlock(&mmlist_lock);
819 } 820 }
820 dec_mm_counter(mm, anon_rss); 821 dec_mm_counter(mm, anon_rss);
821#ifdef CONFIG_MIGRATION 822 } else if (PAGE_MIGRATION) {
822 } else {
823 /* 823 /*
824 * Store the pfn of the page in a special migration 824 * Store the pfn of the page in a special migration
825 * pte. do_swap_page() will wait until the migration 825 * pte. do_swap_page() will wait until the migration
@@ -827,19 +827,15 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
827 */ 827 */
828 BUG_ON(!migration); 828 BUG_ON(!migration);
829 entry = make_migration_entry(page, pte_write(pteval)); 829 entry = make_migration_entry(page, pte_write(pteval));
830#endif
831 } 830 }
832 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 831 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
833 BUG_ON(pte_file(*pte)); 832 BUG_ON(pte_file(*pte));
834 } else 833 } else if (PAGE_MIGRATION && migration) {
835#ifdef CONFIG_MIGRATION
836 if (migration) {
837 /* Establish migration entry for a file page */ 834 /* Establish migration entry for a file page */
838 swp_entry_t entry; 835 swp_entry_t entry;
839 entry = make_migration_entry(page, pte_write(pteval)); 836 entry = make_migration_entry(page, pte_write(pteval));
840 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 837 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
841 } else 838 } else
842#endif
843 dec_mm_counter(mm, file_rss); 839 dec_mm_counter(mm, file_rss);
844 840
845 841