aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/compaction.c98
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/internal.h1
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c37
8 files changed, 71 insertions, 123 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1324cd74faec..b93376c39b61 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
185 185
186 while (start < end) { 186 while (start < end) {
187 unsigned long *map, idx, vec; 187 unsigned long *map, idx, vec;
188 unsigned shift;
188 189
189 map = bdata->node_bootmem_map; 190 map = bdata->node_bootmem_map;
190 idx = start - bdata->node_min_pfn; 191 idx = start - bdata->node_min_pfn;
192 shift = idx & (BITS_PER_LONG - 1);
193 /*
194 * vec holds at most BITS_PER_LONG map bits,
195 * bit 0 corresponds to start.
196 */
191 vec = ~map[idx / BITS_PER_LONG]; 197 vec = ~map[idx / BITS_PER_LONG];
198
199 if (shift) {
200 vec >>= shift;
201 if (end - start >= BITS_PER_LONG)
202 vec |= ~map[idx / BITS_PER_LONG + 1] <<
203 (BITS_PER_LONG - shift);
204 }
192 /* 205 /*
193 * If we have a properly aligned and fully unreserved 206 * If we have a properly aligned and fully unreserved
194 * BITS_PER_LONG block of pages in front of us, free 207 * BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
201 count += BITS_PER_LONG; 214 count += BITS_PER_LONG;
202 start += BITS_PER_LONG; 215 start += BITS_PER_LONG;
203 } else { 216 } else {
204 unsigned long off = 0; 217 unsigned long cur = start;
205 218
206 vec >>= start & (BITS_PER_LONG - 1); 219 start = ALIGN(start + 1, BITS_PER_LONG);
207 while (vec) { 220 while (vec && cur != start) {
208 if (vec & 1) { 221 if (vec & 1) {
209 page = pfn_to_page(start + off); 222 page = pfn_to_page(cur);
210 __free_pages_bootmem(page, 0); 223 __free_pages_bootmem(page, 0);
211 count++; 224 count++;
212 } 225 }
213 vec >>= 1; 226 vec >>= 1;
214 off++; 227 ++cur;
215 } 228 }
216 start = ALIGN(start + 1, BITS_PER_LONG);
217 } 229 }
218 } 230 }
219 231
diff --git a/mm/compaction.c b/mm/compaction.c
index 6b807e466497..c62bd063d766 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -816,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
816static int compact_finished(struct zone *zone, 816static int compact_finished(struct zone *zone,
817 struct compact_control *cc) 817 struct compact_control *cc)
818{ 818{
819 unsigned int order;
819 unsigned long watermark; 820 unsigned long watermark;
820 821
821 if (fatal_signal_pending(current)) 822 if (fatal_signal_pending(current))
@@ -850,22 +851,16 @@ static int compact_finished(struct zone *zone,
850 return COMPACT_CONTINUE; 851 return COMPACT_CONTINUE;
851 852
852 /* Direct compactor: Is a suitable page free? */ 853 /* Direct compactor: Is a suitable page free? */
853 if (cc->page) { 854 for (order = cc->order; order < MAX_ORDER; order++) {
854 /* Was a suitable page captured? */ 855 struct free_area *area = &zone->free_area[order];
855 if (*cc->page) 856
857 /* Job done if page is free of the right migratetype */
858 if (!list_empty(&area->free_list[cc->migratetype]))
859 return COMPACT_PARTIAL;
860
861 /* Job done if allocation would set block type */
862 if (cc->order >= pageblock_order && area->nr_free)
856 return COMPACT_PARTIAL; 863 return COMPACT_PARTIAL;
857 } else {
858 unsigned int order;
859 for (order = cc->order; order < MAX_ORDER; order++) {
860 struct free_area *area = &zone->free_area[cc->order];
861 /* Job done if page is free of the right migratetype */
862 if (!list_empty(&area->free_list[cc->migratetype]))
863 return COMPACT_PARTIAL;
864
865 /* Job done if allocation would set block type */
866 if (cc->order >= pageblock_order && area->nr_free)
867 return COMPACT_PARTIAL;
868 }
869 } 864 }
870 865
871 return COMPACT_CONTINUE; 866 return COMPACT_CONTINUE;
@@ -921,60 +916,6 @@ unsigned long compaction_suitable(struct zone *zone, int order)
921 return COMPACT_CONTINUE; 916 return COMPACT_CONTINUE;
922} 917}
923 918
924static void compact_capture_page(struct compact_control *cc)
925{
926 unsigned long flags;
927 int mtype, mtype_low, mtype_high;
928
929 if (!cc->page || *cc->page)
930 return;
931
932 /*
933 * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
934 * regardless of the migratetype of the freelist is is captured from.
935 * This is fine because the order for a high-order MIGRATE_MOVABLE
936 * allocation is typically at least a pageblock size and overall
937 * fragmentation is not impaired. Other allocation types must
938 * capture pages from their own migratelist because otherwise they
939 * could pollute other pageblocks like MIGRATE_MOVABLE with
940 * difficult to move pages and making fragmentation worse overall.
941 */
942 if (cc->migratetype == MIGRATE_MOVABLE) {
943 mtype_low = 0;
944 mtype_high = MIGRATE_PCPTYPES;
945 } else {
946 mtype_low = cc->migratetype;
947 mtype_high = cc->migratetype + 1;
948 }
949
950 /* Speculatively examine the free lists without zone lock */
951 for (mtype = mtype_low; mtype < mtype_high; mtype++) {
952 int order;
953 for (order = cc->order; order < MAX_ORDER; order++) {
954 struct page *page;
955 struct free_area *area;
956 area = &(cc->zone->free_area[order]);
957 if (list_empty(&area->free_list[mtype]))
958 continue;
959
960 /* Take the lock and attempt capture of the page */
961 if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
962 return;
963 if (!list_empty(&area->free_list[mtype])) {
964 page = list_entry(area->free_list[mtype].next,
965 struct page, lru);
966 if (capture_free_page(page, cc->order, mtype)) {
967 spin_unlock_irqrestore(&cc->zone->lock,
968 flags);
969 *cc->page = page;
970 return;
971 }
972 }
973 spin_unlock_irqrestore(&cc->zone->lock, flags);
974 }
975 }
976}
977
978static int compact_zone(struct zone *zone, struct compact_control *cc) 919static int compact_zone(struct zone *zone, struct compact_control *cc)
979{ 920{
980 int ret; 921 int ret;
@@ -1054,9 +995,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1054 goto out; 995 goto out;
1055 } 996 }
1056 } 997 }
1057
1058 /* Capture a page now if it is a suitable size */
1059 compact_capture_page(cc);
1060 } 998 }
1061 999
1062out: 1000out:
@@ -1069,8 +1007,7 @@ out:
1069 1007
1070static unsigned long compact_zone_order(struct zone *zone, 1008static unsigned long compact_zone_order(struct zone *zone,
1071 int order, gfp_t gfp_mask, 1009 int order, gfp_t gfp_mask,
1072 bool sync, bool *contended, 1010 bool sync, bool *contended)
1073 struct page **page)
1074{ 1011{
1075 unsigned long ret; 1012 unsigned long ret;
1076 struct compact_control cc = { 1013 struct compact_control cc = {
@@ -1080,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone,
1080 .migratetype = allocflags_to_migratetype(gfp_mask), 1017 .migratetype = allocflags_to_migratetype(gfp_mask),
1081 .zone = zone, 1018 .zone = zone,
1082 .sync = sync, 1019 .sync = sync,
1083 .page = page,
1084 }; 1020 };
1085 INIT_LIST_HEAD(&cc.freepages); 1021 INIT_LIST_HEAD(&cc.freepages);
1086 INIT_LIST_HEAD(&cc.migratepages); 1022 INIT_LIST_HEAD(&cc.migratepages);
@@ -1110,7 +1046,7 @@ int sysctl_extfrag_threshold = 500;
1110 */ 1046 */
1111unsigned long try_to_compact_pages(struct zonelist *zonelist, 1047unsigned long try_to_compact_pages(struct zonelist *zonelist,
1112 int order, gfp_t gfp_mask, nodemask_t *nodemask, 1048 int order, gfp_t gfp_mask, nodemask_t *nodemask,
1113 bool sync, bool *contended, struct page **page) 1049 bool sync, bool *contended)
1114{ 1050{
1115 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1051 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1116 int may_enter_fs = gfp_mask & __GFP_FS; 1052 int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1136,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1136 int status; 1072 int status;
1137 1073
1138 status = compact_zone_order(zone, order, gfp_mask, sync, 1074 status = compact_zone_order(zone, order, gfp_mask, sync,
1139 contended, page); 1075 contended);
1140 rc = max(status, rc); 1076 rc = max(status, rc);
1141 1077
1142 /* If a normal allocation would succeed, stop compacting */ 1078 /* If a normal allocation would succeed, stop compacting */
@@ -1192,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
1192 struct compact_control cc = { 1128 struct compact_control cc = {
1193 .order = order, 1129 .order = order,
1194 .sync = false, 1130 .sync = false,
1195 .page = NULL,
1196 }; 1131 };
1197 1132
1198 return __compact_pgdat(pgdat, &cc); 1133 return __compact_pgdat(pgdat, &cc);
@@ -1203,14 +1138,13 @@ static int compact_node(int nid)
1203 struct compact_control cc = { 1138 struct compact_control cc = {
1204 .order = -1, 1139 .order = -1,
1205 .sync = true, 1140 .sync = true,
1206 .page = NULL,
1207 }; 1141 };
1208 1142
1209 return __compact_pgdat(NODE_DATA(nid), &cc); 1143 return __compact_pgdat(NODE_DATA(nid), &cc);
1210} 1144}
1211 1145
1212/* Compact all nodes in the system */ 1146/* Compact all nodes in the system */
1213static int compact_nodes(void) 1147static void compact_nodes(void)
1214{ 1148{
1215 int nid; 1149 int nid;
1216 1150
@@ -1219,8 +1153,6 @@ static int compact_nodes(void)
1219 1153
1220 for_each_online_node(nid) 1154 for_each_online_node(nid)
1221 compact_node(nid); 1155 compact_node(nid);
1222
1223 return COMPACT_COMPLETE;
1224} 1156}
1225 1157
1226/* The written value is actually unused, all memory is compacted */ 1158/* The written value is actually unused, all memory is compacted */
@@ -1231,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
1231 void __user *buffer, size_t *length, loff_t *ppos) 1163 void __user *buffer, size_t *length, loff_t *ppos)
1232{ 1164{
1233 if (write) 1165 if (write)
1234 return compact_nodes(); 1166 compact_nodes();
1235 1167
1236 return 0; 1168 return 0;
1237} 1169}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9e894edc7811..6001ee6347a9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
1819 1819
1820 BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); 1820 BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
1821 BUG_ON(!PageAnon(page)); 1821 BUG_ON(!PageAnon(page));
1822 anon_vma = page_lock_anon_vma_read(page); 1822
1823 /*
1824 * The caller does not necessarily hold an mmap_sem that would prevent
1825 * the anon_vma disappearing so we first we take a reference to it
1826 * and then lock the anon_vma for write. This is similar to
1827 * page_lock_anon_vma_read except the write lock is taken to serialise
1828 * against parallel split or collapse operations.
1829 */
1830 anon_vma = page_get_anon_vma(page);
1823 if (!anon_vma) 1831 if (!anon_vma)
1824 goto out; 1832 goto out;
1833 anon_vma_lock_write(anon_vma);
1834
1825 ret = 0; 1835 ret = 0;
1826 if (!PageCompound(page)) 1836 if (!PageCompound(page))
1827 goto out_unlock; 1837 goto out_unlock;
@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
1832 1842
1833 BUG_ON(PageCompound(page)); 1843 BUG_ON(PageCompound(page));
1834out_unlock: 1844out_unlock:
1835 page_unlock_anon_vma_read(anon_vma); 1845 anon_vma_unlock(anon_vma);
1846 put_anon_vma(anon_vma);
1836out: 1847out:
1837 return ret; 1848 return ret;
1838} 1849}
diff --git a/mm/internal.h b/mm/internal.h
index d597f94cc205..9ba21100ebf3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -135,7 +135,6 @@ struct compact_control {
135 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 135 int migratetype; /* MOVABLE, RECLAIMABLE etc */
136 struct zone *zone; 136 struct zone *zone;
137 bool contended; /* True if a lock was contended */ 137 bool contended; /* True if a lock was contended */
138 struct page **page; /* Page captured of requested size */
139}; 138};
140 139
141unsigned long 140unsigned long
diff --git a/mm/memblock.c b/mm/memblock.c
index 625905523c2a..88adc8afb610 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
314 } 314 }
315 315
316 this->size += next->size; 316 this->size += next->size;
317 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); 317 /* move forward from next + 1, index of which is i + 2 */
318 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
318 type->cnt--; 319 type->cnt--;
319 } 320 }
320} 321}
diff --git a/mm/migrate.c b/mm/migrate.c
index 3b676b0c5c3e..c38778610aa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1679 page_xchg_last_nid(new_page, page_last_nid(page)); 1679 page_xchg_last_nid(new_page, page_last_nid(page));
1680 1680
1681 isolated = numamigrate_isolate_page(pgdat, page); 1681 isolated = numamigrate_isolate_page(pgdat, page);
1682 if (!isolated) { 1682
1683 /*
1684 * Failing to isolate or a GUP pin prevents migration. The expected
1685 * page count is 2. 1 for anonymous pages without a mapping and 1
1686 * for the callers pin. If the page was isolated, the page will
1687 * need to be put back on the LRU.
1688 */
1689 if (!isolated || page_count(page) != 2) {
1683 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 1690 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1684 put_page(new_page); 1691 put_page(new_page);
1692 if (isolated) {
1693 putback_lru_page(page);
1694 isolated = 0;
1695 goto out;
1696 }
1685 goto out_keep_locked; 1697 goto out_keep_locked;
1686 } 1698 }
1687 1699
diff --git a/mm/mmap.c b/mm/mmap.c
index f54b235f29a9..35730ee9d515 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2886 * The LSB of head.next can't change from under us 2886 * The LSB of head.next can't change from under us
2887 * because we hold the mm_all_locks_mutex. 2887 * because we hold the mm_all_locks_mutex.
2888 */ 2888 */
2889 down_write(&anon_vma->root->rwsem); 2889 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
2890 /* 2890 /*
2891 * We can safely modify head.next after taking the 2891 * We can safely modify head.next after taking the
2892 * anon_vma->root->rwsem. If some other vma in this mm shares 2892 * anon_vma->root->rwsem. If some other vma in this mm shares
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bc6cc0e913bd..df2022ff0c8a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1384,14 +1384,8 @@ void split_page(struct page *page, unsigned int order)
1384 set_page_refcounted(page + i); 1384 set_page_refcounted(page + i);
1385} 1385}
1386 1386
1387/* 1387static int __isolate_free_page(struct page *page, unsigned int order)
1388 * Similar to the split_page family of functions except that the page
1389 * required at the given order and being isolated now to prevent races
1390 * with parallel allocators
1391 */
1392int capture_free_page(struct page *page, int alloc_order, int migratetype)
1393{ 1388{
1394 unsigned int order;
1395 unsigned long watermark; 1389 unsigned long watermark;
1396 struct zone *zone; 1390 struct zone *zone;
1397 int mt; 1391 int mt;
@@ -1399,7 +1393,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
1399 BUG_ON(!PageBuddy(page)); 1393 BUG_ON(!PageBuddy(page));
1400 1394
1401 zone = page_zone(page); 1395 zone = page_zone(page);
1402 order = page_order(page);
1403 mt = get_pageblock_migratetype(page); 1396 mt = get_pageblock_migratetype(page);
1404 1397
1405 if (mt != MIGRATE_ISOLATE) { 1398 if (mt != MIGRATE_ISOLATE) {
@@ -1408,7 +1401,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
1408 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1401 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1409 return 0; 1402 return 0;
1410 1403
1411 __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); 1404 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1412 } 1405 }
1413 1406
1414 /* Remove page from free list */ 1407 /* Remove page from free list */
@@ -1416,11 +1409,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
1416 zone->free_area[order].nr_free--; 1409 zone->free_area[order].nr_free--;
1417 rmv_page_order(page); 1410 rmv_page_order(page);
1418 1411
1419 if (alloc_order != order) 1412 /* Set the pageblock if the isolated page is at least a pageblock */
1420 expand(zone, page, alloc_order, order,
1421 &zone->free_area[order], migratetype);
1422
1423 /* Set the pageblock if the captured page is at least a pageblock */
1424 if (order >= pageblock_order - 1) { 1413 if (order >= pageblock_order - 1) {
1425 struct page *endpage = page + (1 << order) - 1; 1414 struct page *endpage = page + (1 << order) - 1;
1426 for (; page < endpage; page += pageblock_nr_pages) { 1415 for (; page < endpage; page += pageblock_nr_pages) {
@@ -1431,7 +1420,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
1431 } 1420 }
1432 } 1421 }
1433 1422
1434 return 1UL << alloc_order; 1423 return 1UL << order;
1435} 1424}
1436 1425
1437/* 1426/*
@@ -1449,10 +1438,9 @@ int split_free_page(struct page *page)
1449 unsigned int order; 1438 unsigned int order;
1450 int nr_pages; 1439 int nr_pages;
1451 1440
1452 BUG_ON(!PageBuddy(page));
1453 order = page_order(page); 1441 order = page_order(page);
1454 1442
1455 nr_pages = capture_free_page(page, order, 0); 1443 nr_pages = __isolate_free_page(page, order);
1456 if (!nr_pages) 1444 if (!nr_pages)
1457 return 0; 1445 return 0;
1458 1446
@@ -2136,8 +2124,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2136 bool *contended_compaction, bool *deferred_compaction, 2124 bool *contended_compaction, bool *deferred_compaction,
2137 unsigned long *did_some_progress) 2125 unsigned long *did_some_progress)
2138{ 2126{
2139 struct page *page = NULL;
2140
2141 if (!order) 2127 if (!order)
2142 return NULL; 2128 return NULL;
2143 2129
@@ -2149,16 +2135,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2149 current->flags |= PF_MEMALLOC; 2135 current->flags |= PF_MEMALLOC;
2150 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2136 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2151 nodemask, sync_migration, 2137 nodemask, sync_migration,
2152 contended_compaction, &page); 2138 contended_compaction);
2153 current->flags &= ~PF_MEMALLOC; 2139 current->flags &= ~PF_MEMALLOC;
2154 2140
2155 /* If compaction captured a page, prep and use it */
2156 if (page) {
2157 prep_new_page(page, order, gfp_mask);
2158 goto got_page;
2159 }
2160
2161 if (*did_some_progress != COMPACT_SKIPPED) { 2141 if (*did_some_progress != COMPACT_SKIPPED) {
2142 struct page *page;
2143
2162 /* Page migration frees to the PCP lists but we want merging */ 2144 /* Page migration frees to the PCP lists but we want merging */
2163 drain_pages(get_cpu()); 2145 drain_pages(get_cpu());
2164 put_cpu(); 2146 put_cpu();
@@ -2168,7 +2150,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2168 alloc_flags & ~ALLOC_NO_WATERMARKS, 2150 alloc_flags & ~ALLOC_NO_WATERMARKS,
2169 preferred_zone, migratetype); 2151 preferred_zone, migratetype);
2170 if (page) { 2152 if (page) {
2171got_page:
2172 preferred_zone->compact_blockskip_flush = false; 2153 preferred_zone->compact_blockskip_flush = false;
2173 preferred_zone->compact_considered = 0; 2154 preferred_zone->compact_considered = 0;
2174 preferred_zone->compact_defer_shift = 0; 2155 preferred_zone->compact_defer_shift = 0;
@@ -5604,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5604 pfn &= (PAGES_PER_SECTION-1); 5585 pfn &= (PAGES_PER_SECTION-1);
5605 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5586 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5606#else 5587#else
5607 pfn = pfn - zone->zone_start_pfn; 5588 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
5608 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5589 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5609#endif /* CONFIG_SPARSEMEM */ 5590#endif /* CONFIG_SPARSEMEM */
5610} 5591}