aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memblock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memblock.h')
-rw-r--r--include/linux/memblock.h81
1 files changed, 67 insertions, 14 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8a20a51ed42d..b660e05b63d4 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#define INIT_MEMBLOCK_REGIONS 128 20#define INIT_MEMBLOCK_REGIONS 128
21#define INIT_PHYSMEM_REGIONS 4
21 22
22/* Definition of memblock flags. */ 23/* Definition of memblock flags. */
23#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ 24#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
@@ -43,6 +44,9 @@ struct memblock {
43 phys_addr_t current_limit; 44 phys_addr_t current_limit;
44 struct memblock_type memory; 45 struct memblock_type memory;
45 struct memblock_type reserved; 46 struct memblock_type reserved;
47#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
48 struct memblock_type physmem;
49#endif
46}; 50};
47 51
48extern struct memblock memblock; 52extern struct memblock memblock;
@@ -71,6 +75,63 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
71void memblock_trim_memory(phys_addr_t align); 75void memblock_trim_memory(phys_addr_t align);
72int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 76int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
73int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 77int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
78
79/* Low level functions */
80int memblock_add_range(struct memblock_type *type,
81 phys_addr_t base, phys_addr_t size,
82 int nid, unsigned long flags);
83
84int memblock_remove_range(struct memblock_type *type,
85 phys_addr_t base,
86 phys_addr_t size);
87
88void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
89 struct memblock_type *type_b, phys_addr_t *out_start,
90 phys_addr_t *out_end, int *out_nid);
91
92void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
93 struct memblock_type *type_b, phys_addr_t *out_start,
94 phys_addr_t *out_end, int *out_nid);
95
96/**
97 * for_each_mem_range - iterate through memblock areas from type_a and not
98 * included in type_b. Or just type_a if type_b is NULL.
99 * @i: u64 used as loop variable
100 * @type_a: ptr to memblock_type to iterate
101 * @type_b: ptr to memblock_type which excludes from the iteration
102 * @nid: node selector, %NUMA_NO_NODE for all nodes
103 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
104 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
105 * @p_nid: ptr to int for nid of the range, can be %NULL
106 */
107#define for_each_mem_range(i, type_a, type_b, nid, \
108 p_start, p_end, p_nid) \
109 for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
110 p_start, p_end, p_nid); \
111 i != (u64)ULLONG_MAX; \
112 __next_mem_range(&i, nid, type_a, type_b, \
113 p_start, p_end, p_nid))
114
115/**
116 * for_each_mem_range_rev - reverse iterate through memblock areas from
117 * type_a and not included in type_b. Or just type_a if type_b is NULL.
118 * @i: u64 used as loop variable
119 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes
122 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
123 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
124 * @p_nid: ptr to int for nid of the range, can be %NULL
125 */
126#define for_each_mem_range_rev(i, type_a, type_b, nid, \
127 p_start, p_end, p_nid) \
128 for (i = (u64)ULLONG_MAX, \
129 __next_mem_range_rev(&i, nid, type_a, type_b, \
130 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \
132 __next_mem_range_rev(&i, nid, type_a, type_b, \
133 p_start, p_end, p_nid))
134
74#ifdef CONFIG_MOVABLE_NODE 135#ifdef CONFIG_MOVABLE_NODE
75static inline bool memblock_is_hotpluggable(struct memblock_region *m) 136static inline bool memblock_is_hotpluggable(struct memblock_region *m)
76{ 137{
@@ -113,9 +174,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
113 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 174 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
114#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 175#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
115 176
116void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
117 phys_addr_t *out_end, int *out_nid);
118
119/** 177/**
120 * for_each_free_mem_range - iterate through free memblock areas 178 * for_each_free_mem_range - iterate through free memblock areas
121 * @i: u64 used as loop variable 179 * @i: u64 used as loop variable
@@ -128,13 +186,8 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
128 * soon as memblock is initialized. 186 * soon as memblock is initialized.
129 */ 187 */
130#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ 188#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
131 for (i = 0, \ 189 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
132 __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ 190 nid, p_start, p_end, p_nid)
133 i != (u64)ULLONG_MAX; \
134 __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
135
136void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
137 phys_addr_t *out_end, int *out_nid);
138 191
139/** 192/**
140 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 193 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -148,10 +201,8 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
148 * order. Available as soon as memblock is initialized. 201 * order. Available as soon as memblock is initialized.
149 */ 202 */
150#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ 203#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
151 for (i = (u64)ULLONG_MAX, \ 204 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
152 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ 205 nid, p_start, p_end, p_nid)
153 i != (u64)ULLONG_MAX; \
154 __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
155 206
156static inline void memblock_set_region_flags(struct memblock_region *r, 207static inline void memblock_set_region_flags(struct memblock_region *r,
157 unsigned long flags) 208 unsigned long flags)
@@ -221,6 +272,8 @@ static inline bool memblock_bottom_up(void) { return false; }
221#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 272#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
222#define MEMBLOCK_ALLOC_ACCESSIBLE 0 273#define MEMBLOCK_ALLOC_ACCESSIBLE 0
223 274
275phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
276 phys_addr_t start, phys_addr_t end);
224phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 277phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
225 phys_addr_t max_addr); 278 phys_addr_t max_addr);
226phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 279phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,