aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memblock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memblock.h')
-rw-r--r--include/linux/memblock.h168
1 files changed, 123 insertions, 45 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index a59faf2b5edd..62a10c2a11f2 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,6 +2,7 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifdef CONFIG_HAVE_MEMBLOCK
5/* 6/*
6 * Logical memory blocks. 7 * Logical memory blocks.
7 * 8 *
@@ -16,73 +17,150 @@
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
18 19
19#define MAX_MEMBLOCK_REGIONS 128 20#include <asm/memblock.h>
20 21
21struct memblock_property { 22#define INIT_MEMBLOCK_REGIONS 128
22 u64 base; 23#define MEMBLOCK_ERROR 0
23 u64 size;
24};
25 24
26struct memblock_region { 25struct memblock_region {
27 unsigned long cnt; 26 phys_addr_t base;
28 u64 size; 27 phys_addr_t size;
29 struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; 28};
29
30struct memblock_type {
31 unsigned long cnt; /* number of regions */
32 unsigned long max; /* size of the allocated array */
33 struct memblock_region *regions;
30}; 34};
31 35
32struct memblock { 36struct memblock {
33 unsigned long debug; 37 phys_addr_t current_limit;
34 u64 rmo_size; 38 phys_addr_t memory_size; /* Updated by memblock_analyze() */
35 struct memblock_region memory; 39 struct memblock_type memory;
36 struct memblock_region reserved; 40 struct memblock_type reserved;
37}; 41};
38 42
39extern struct memblock memblock; 43extern struct memblock memblock;
44extern int memblock_debug;
45extern int memblock_can_resize;
40 46
41extern void __init memblock_init(void); 47#define memblock_dbg(fmt, ...) \
42extern void __init memblock_analyze(void); 48 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
43extern long memblock_add(u64 base, u64 size); 49
44extern long memblock_remove(u64 base, u64 size); 50u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
45extern long __init memblock_free(u64 base, u64 size); 51int memblock_free_reserved_regions(void);
46extern long __init memblock_reserve(u64 base, u64 size); 52int memblock_reserve_reserved_regions(void);
47extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, 53
48 u64 (*nid_range)(u64, u64, int *)); 54extern void memblock_init(void);
49extern u64 __init memblock_alloc(u64 size, u64 align); 55extern void memblock_analyze(void);
50extern u64 __init memblock_alloc_base(u64 size, 56extern long memblock_add(phys_addr_t base, phys_addr_t size);
51 u64, u64 max_addr); 57extern long memblock_remove(phys_addr_t base, phys_addr_t size);
52extern u64 __init __memblock_alloc_base(u64 size, 58extern long memblock_free(phys_addr_t base, phys_addr_t size);
53 u64 align, u64 max_addr); 59extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
54extern u64 __init memblock_phys_mem_size(void); 60
55extern u64 memblock_end_of_DRAM(void); 61/* The numa aware allocator is only available if
56extern void __init memblock_enforce_memory_limit(u64 memory_limit); 62 * CONFIG_ARCH_POPULATES_NODE_MAP is set
57extern int __init memblock_is_reserved(u64 addr); 63 */
58extern int memblock_is_region_reserved(u64 base, u64 size); 64extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
59extern int memblock_find(struct memblock_property *res); 65 int nid);
66extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
67 int nid);
68
69extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
70
71/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
72#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
73#define MEMBLOCK_ALLOC_ACCESSIBLE 0
74
75extern phys_addr_t memblock_alloc_base(phys_addr_t size,
76 phys_addr_t align,
77 phys_addr_t max_addr);
78extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
79 phys_addr_t align,
80 phys_addr_t max_addr);
81extern phys_addr_t memblock_phys_mem_size(void);
82extern phys_addr_t memblock_end_of_DRAM(void);
83extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
84extern int memblock_is_memory(phys_addr_t addr);
85extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
86extern int memblock_is_reserved(phys_addr_t addr);
87extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
60 88
61extern void memblock_dump_all(void); 89extern void memblock_dump_all(void);
62 90
63static inline u64 91/* Provided by the architecture */
64memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) 92extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
93extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
94 phys_addr_t addr2, phys_addr_t size2);
95
96/**
97 * memblock_set_current_limit - Set the current allocation limit to allow
98 * limiting allocations to what is currently
99 * accessible during boot
100 * @limit: New limit value (physical address)
101 */
102extern void memblock_set_current_limit(phys_addr_t limit);
103
104
105/*
106 * pfn conversion functions
107 *
108 * While the memory MEMBLOCKs should always be page aligned, the reserved
109 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
110 * idea of what they return for such non aligned MEMBLOCKs.
111 */
112
113/**
114 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
115 * @reg: memblock_region structure
116 */
117static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
65{ 118{
66 return type->region[region_nr].size; 119 return PFN_UP(reg->base);
67} 120}
68static inline u64 121
69memblock_size_pages(struct memblock_region *type, unsigned long region_nr) 122/**
123 * memblock_region_memory_end_pfn - Return the end_pfn this region
124 * @reg: memblock_region structure
125 */
126static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
70{ 127{
71 return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; 128 return PFN_DOWN(reg->base + reg->size);
72} 129}
73static inline u64 130
74memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) 131/**
132 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
133 * @reg: memblock_region structure
134 */
135static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
75{ 136{
76 return type->region[region_nr].base >> PAGE_SHIFT; 137 return PFN_DOWN(reg->base);
77} 138}
78static inline u64 139
79memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) 140/**
141 * memblock_region_reserved_end_pfn - Return the end_pfn this region
142 * @reg: memblock_region structure
143 */
144static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
80{ 145{
81 return memblock_start_pfn(type, region_nr) + 146 return PFN_UP(reg->base + reg->size);
82 memblock_size_pages(type, region_nr);
83} 147}
84 148
85#include <asm/memblock.h> 149#define for_each_memblock(memblock_type, region) \
150 for (region = memblock.memblock_type.regions; \
151 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
152 region++)
153
154
155#ifdef ARCH_DISCARD_MEMBLOCK
156#define __init_memblock __init
157#define __initdata_memblock __initdata
158#else
159#define __init_memblock
160#define __initdata_memblock
161#endif
162
163#endif /* CONFIG_HAVE_MEMBLOCK */
86 164
87#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
88 166