aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memblock.h
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-01-29 12:16:01 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 02:58:39 -0400
commitf1af9d3af308145478749194346f11efad1134b2 (patch)
tree86abcc79427e3b24230c4b2a3fdf0eae524888b7 /include/linux/memblock.h
parent6ed8bf82fe44752f748bdc19567f79a961cf916c (diff)
mm/memblock: Do some refactoring, enhance API
Refactor the memblock code and extend the memblock API to make it more flexible. With the extended API it is simple to define and work with additional memory lists. The static functions memblock_add_region and __memblock_remove are renamed to memblock_add_range and meblock_remove_range and added to the memblock API. The __next_free_mem_range and __next_free_mem_range_rev functions are replaced with calls to the more generic list walkers __next_mem_range and __next_mem_range_rev. To walk an arbitrary memory list two new macros for_each_mem_range and for_each_mem_range_rev are added. These new macros are used to define for_each_free_mem_range and for_each_free_mem_range_reverse. Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/linux/memblock.h')
-rw-r--r--include/linux/memblock.h75
1 files changed, 61 insertions, 14 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8a20a51ed42d..f669016874b3 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -71,6 +71,63 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
71void memblock_trim_memory(phys_addr_t align); 71void memblock_trim_memory(phys_addr_t align);
72int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 72int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
73int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 73int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
74
75/* Low level functions */
76int memblock_add_range(struct memblock_type *type,
77 phys_addr_t base, phys_addr_t size,
78 int nid, unsigned long flags);
79
80int memblock_remove_range(struct memblock_type *type,
81 phys_addr_t base,
82 phys_addr_t size);
83
84void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
85 struct memblock_type *type_b, phys_addr_t *out_start,
86 phys_addr_t *out_end, int *out_nid);
87
88void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
89 struct memblock_type *type_b, phys_addr_t *out_start,
90 phys_addr_t *out_end, int *out_nid);
91
92/**
93 * for_each_mem_range - iterate through memblock areas from type_a and not
94 * included in type_b. Or just type_a if type_b is NULL.
95 * @i: u64 used as loop variable
96 * @type_a: ptr to memblock_type to iterate
97 * @type_b: ptr to memblock_type which excludes from the iteration
98 * @nid: node selector, %NUMA_NO_NODE for all nodes
99 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
100 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
101 * @p_nid: ptr to int for nid of the range, can be %NULL
102 */
103#define for_each_mem_range(i, type_a, type_b, nid, \
104 p_start, p_end, p_nid) \
105 for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
106 p_start, p_end, p_nid); \
107 i != (u64)ULLONG_MAX; \
108 __next_mem_range(&i, nid, type_a, type_b, \
109 p_start, p_end, p_nid))
110
111/**
112 * for_each_mem_range_rev - reverse iterate through memblock areas from
113 * type_a and not included in type_b. Or just type_a if type_b is NULL.
114 * @i: u64 used as loop variable
115 * @type_a: ptr to memblock_type to iterate
116 * @type_b: ptr to memblock_type which excludes from the iteration
117 * @nid: node selector, %NUMA_NO_NODE for all nodes
118 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
119 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
120 * @p_nid: ptr to int for nid of the range, can be %NULL
121 */
122#define for_each_mem_range_rev(i, type_a, type_b, nid, \
123 p_start, p_end, p_nid) \
124 for (i = (u64)ULLONG_MAX, \
125 __next_mem_range_rev(&i, nid, type_a, type_b, \
126 p_start, p_end, p_nid); \
127 i != (u64)ULLONG_MAX; \
128 __next_mem_range_rev(&i, nid, type_a, type_b, \
129 p_start, p_end, p_nid))
130
74#ifdef CONFIG_MOVABLE_NODE 131#ifdef CONFIG_MOVABLE_NODE
75static inline bool memblock_is_hotpluggable(struct memblock_region *m) 132static inline bool memblock_is_hotpluggable(struct memblock_region *m)
76{ 133{
@@ -113,9 +170,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
113 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 170 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
114#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 171#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
115 172
116void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
117 phys_addr_t *out_end, int *out_nid);
118
119/** 173/**
120 * for_each_free_mem_range - iterate through free memblock areas 174 * for_each_free_mem_range - iterate through free memblock areas
121 * @i: u64 used as loop variable 175 * @i: u64 used as loop variable
@@ -128,13 +182,8 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
128 * soon as memblock is initialized. 182 * soon as memblock is initialized.
129 */ 183 */
130#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ 184#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
131 for (i = 0, \ 185 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
132 __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ 186 nid, p_start, p_end, p_nid)
133 i != (u64)ULLONG_MAX; \
134 __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
135
136void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
137 phys_addr_t *out_end, int *out_nid);
138 187
139/** 188/**
140 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 189 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -148,10 +197,8 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
148 * order. Available as soon as memblock is initialized. 197 * order. Available as soon as memblock is initialized.
149 */ 198 */
150#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ 199#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
151 for (i = (u64)ULLONG_MAX, \ 200 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
152 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ 201 nid, p_start, p_end, p_nid)
153 i != (u64)ULLONG_MAX; \
154 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
155 202
156static inline void memblock_set_region_flags(struct memblock_region *r, 203static inline void memblock_set_region_flags(struct memblock_region *r,
157 unsigned long flags) 204 unsigned long flags)