diff options
author | Hugh Dickins <hughd@google.com> | 2012-01-12 20:20:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:10 -0500 |
commit | 4111304dab198c687bc60f2e235a9f7ee92c47c8 (patch) | |
tree | c98fbae214f73f8475bcdc54c8116dea82cd7d14 | |
parent | 4d06f382c733f99ec67df006255e87525ac1efd3 (diff) |
mm: enum lru_list lru
Mostly we use "enum lru_list lru": change those few "l"s to "lru"s.
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm_inline.h | 26 | ||||
-rw-r--r-- | include/linux/mmzone.h | 16 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 22 |
4 files changed, 35 insertions, 35 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 4e3478e71926..8f84d2e53d0f 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -22,21 +22,21 @@ static inline int page_is_file_cache(struct page *page) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | static inline void | 24 | static inline void |
25 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) | 25 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) |
26 | { | 26 | { |
27 | struct lruvec *lruvec; | 27 | struct lruvec *lruvec; |
28 | 28 | ||
29 | lruvec = mem_cgroup_lru_add_list(zone, page, l); | 29 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); |
30 | list_add(&page->lru, &lruvec->lists[l]); | 30 | list_add(&page->lru, &lruvec->lists[lru]); |
31 | __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); | 31 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline void | 34 | static inline void |
35 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) | 35 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) |
36 | { | 36 | { |
37 | mem_cgroup_lru_del_list(page, l); | 37 | mem_cgroup_lru_del_list(page, lru); |
38 | list_del(&page->lru); | 38 | list_del(&page->lru); |
39 | __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); | 39 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); |
40 | } | 40 | } |
41 | 41 | ||
42 | /** | 42 | /** |
@@ -57,21 +57,21 @@ static inline enum lru_list page_lru_base_type(struct page *page) | |||
57 | static inline void | 57 | static inline void |
58 | del_page_from_lru(struct zone *zone, struct page *page) | 58 | del_page_from_lru(struct zone *zone, struct page *page) |
59 | { | 59 | { |
60 | enum lru_list l; | 60 | enum lru_list lru; |
61 | 61 | ||
62 | if (PageUnevictable(page)) { | 62 | if (PageUnevictable(page)) { |
63 | __ClearPageUnevictable(page); | 63 | __ClearPageUnevictable(page); |
64 | l = LRU_UNEVICTABLE; | 64 | lru = LRU_UNEVICTABLE; |
65 | } else { | 65 | } else { |
66 | l = page_lru_base_type(page); | 66 | lru = page_lru_base_type(page); |
67 | if (PageActive(page)) { | 67 | if (PageActive(page)) { |
68 | __ClearPageActive(page); | 68 | __ClearPageActive(page); |
69 | l += LRU_ACTIVE; | 69 | lru += LRU_ACTIVE; |
70 | } | 70 | } |
71 | } | 71 | } |
72 | mem_cgroup_lru_del_list(page, l); | 72 | mem_cgroup_lru_del_list(page, lru); |
73 | list_del(&page->lru); | 73 | list_del(&page->lru); |
74 | __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); | 74 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); |
75 | } | 75 | } |
76 | 76 | ||
77 | /** | 77 | /** |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2038b90ca6e3..650ba2fb3301 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -140,23 +140,23 @@ enum lru_list { | |||
140 | NR_LRU_LISTS | 140 | NR_LRU_LISTS |
141 | }; | 141 | }; |
142 | 142 | ||
143 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) | 143 | #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) |
144 | 144 | ||
145 | #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) | 145 | #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) |
146 | 146 | ||
147 | static inline int is_file_lru(enum lru_list l) | 147 | static inline int is_file_lru(enum lru_list lru) |
148 | { | 148 | { |
149 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | 149 | return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int is_active_lru(enum lru_list l) | 152 | static inline int is_active_lru(enum lru_list lru) |
153 | { | 153 | { |
154 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); | 154 | return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); |
155 | } | 155 | } |
156 | 156 | ||
157 | static inline int is_unevictable_lru(enum lru_list l) | 157 | static inline int is_unevictable_lru(enum lru_list lru) |
158 | { | 158 | { |
159 | return (l == LRU_UNEVICTABLE); | 159 | return (lru == LRU_UNEVICTABLE); |
160 | } | 160 | } |
161 | 161 | ||
162 | struct lruvec { | 162 | struct lruvec { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb5723c491f0..0027d8f4a1bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4262,7 +4262,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4262 | for (j = 0; j < MAX_NR_ZONES; j++) { | 4262 | for (j = 0; j < MAX_NR_ZONES; j++) { |
4263 | struct zone *zone = pgdat->node_zones + j; | 4263 | struct zone *zone = pgdat->node_zones + j; |
4264 | unsigned long size, realsize, memmap_pages; | 4264 | unsigned long size, realsize, memmap_pages; |
4265 | enum lru_list l; | 4265 | enum lru_list lru; |
4266 | 4266 | ||
4267 | size = zone_spanned_pages_in_node(nid, j, zones_size); | 4267 | size = zone_spanned_pages_in_node(nid, j, zones_size); |
4268 | realsize = size - zone_absent_pages_in_node(nid, j, | 4268 | realsize = size - zone_absent_pages_in_node(nid, j, |
@@ -4312,8 +4312,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4312 | zone->zone_pgdat = pgdat; | 4312 | zone->zone_pgdat = pgdat; |
4313 | 4313 | ||
4314 | zone_pcp_init(zone); | 4314 | zone_pcp_init(zone); |
4315 | for_each_lru(l) | 4315 | for_each_lru(lru) |
4316 | INIT_LIST_HEAD(&zone->lruvec.lists[l]); | 4316 | INIT_LIST_HEAD(&zone->lruvec.lists[lru]); |
4317 | zone->reclaim_stat.recent_rotated[0] = 0; | 4317 | zone->reclaim_stat.recent_rotated[0] = 0; |
4318 | zone->reclaim_stat.recent_rotated[1] = 0; | 4318 | zone->reclaim_stat.recent_rotated[1] = 0; |
4319 | zone->reclaim_stat.recent_scanned[0] = 0; | 4319 | zone->reclaim_stat.recent_scanned[0] = 0; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7724fb8e7498..01466bf783fd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1920,7 +1920,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
1920 | unsigned long ap, fp; | 1920 | unsigned long ap, fp; |
1921 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); | 1921 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); |
1922 | u64 fraction[2], denominator; | 1922 | u64 fraction[2], denominator; |
1923 | enum lru_list l; | 1923 | enum lru_list lru; |
1924 | int noswap = 0; | 1924 | int noswap = 0; |
1925 | bool force_scan = false; | 1925 | bool force_scan = false; |
1926 | 1926 | ||
@@ -2010,18 +2010,18 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, | |||
2010 | fraction[1] = fp; | 2010 | fraction[1] = fp; |
2011 | denominator = ap + fp + 1; | 2011 | denominator = ap + fp + 1; |
2012 | out: | 2012 | out: |
2013 | for_each_evictable_lru(l) { | 2013 | for_each_evictable_lru(lru) { |
2014 | int file = is_file_lru(l); | 2014 | int file = is_file_lru(lru); |
2015 | unsigned long scan; | 2015 | unsigned long scan; |
2016 | 2016 | ||
2017 | scan = zone_nr_lru_pages(mz, l); | 2017 | scan = zone_nr_lru_pages(mz, lru); |
2018 | if (priority || noswap) { | 2018 | if (priority || noswap) { |
2019 | scan >>= priority; | 2019 | scan >>= priority; |
2020 | if (!scan && force_scan) | 2020 | if (!scan && force_scan) |
2021 | scan = SWAP_CLUSTER_MAX; | 2021 | scan = SWAP_CLUSTER_MAX; |
2022 | scan = div64_u64(scan * fraction[file], denominator); | 2022 | scan = div64_u64(scan * fraction[file], denominator); |
2023 | } | 2023 | } |
2024 | nr[l] = scan; | 2024 | nr[lru] = scan; |
2025 | } | 2025 | } |
2026 | } | 2026 | } |
2027 | 2027 | ||
@@ -2097,7 +2097,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, | |||
2097 | { | 2097 | { |
2098 | unsigned long nr[NR_LRU_LISTS]; | 2098 | unsigned long nr[NR_LRU_LISTS]; |
2099 | unsigned long nr_to_scan; | 2099 | unsigned long nr_to_scan; |
2100 | enum lru_list l; | 2100 | enum lru_list lru; |
2101 | unsigned long nr_reclaimed, nr_scanned; | 2101 | unsigned long nr_reclaimed, nr_scanned; |
2102 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; | 2102 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; |
2103 | struct blk_plug plug; | 2103 | struct blk_plug plug; |
@@ -2110,13 +2110,13 @@ restart: | |||
2110 | blk_start_plug(&plug); | 2110 | blk_start_plug(&plug); |
2111 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | 2111 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |
2112 | nr[LRU_INACTIVE_FILE]) { | 2112 | nr[LRU_INACTIVE_FILE]) { |
2113 | for_each_evictable_lru(l) { | 2113 | for_each_evictable_lru(lru) { |
2114 | if (nr[l]) { | 2114 | if (nr[lru]) { |
2115 | nr_to_scan = min_t(unsigned long, | 2115 | nr_to_scan = min_t(unsigned long, |
2116 | nr[l], SWAP_CLUSTER_MAX); | 2116 | nr[lru], SWAP_CLUSTER_MAX); |
2117 | nr[l] -= nr_to_scan; | 2117 | nr[lru] -= nr_to_scan; |
2118 | 2118 | ||
2119 | nr_reclaimed += shrink_list(l, nr_to_scan, | 2119 | nr_reclaimed += shrink_list(lru, nr_to_scan, |
2120 | mz, sc, priority); | 2120 | mz, sc, priority); |
2121 | } | 2121 | } |
2122 | } | 2122 | } |