aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memblock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memblock.h')
-rw-r--r--include/linux/memblock.h175
1 files changed, 130 insertions, 45 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index a59faf2b5edd..7525e38c434d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,6 +2,9 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define MEMBLOCK_ERROR 0
6
7#ifdef CONFIG_HAVE_MEMBLOCK
5/* 8/*
6 * Logical memory blocks. 9 * Logical memory blocks.
7 * 10 *
@@ -16,73 +19,155 @@
16#include <linux/init.h> 19#include <linux/init.h>
17#include <linux/mm.h> 20#include <linux/mm.h>
18 21
19#define MAX_MEMBLOCK_REGIONS 128 22#include <asm/memblock.h>
20 23
21struct memblock_property { 24#define INIT_MEMBLOCK_REGIONS 128
22 u64 base;
23 u64 size;
24};
25 25
26struct memblock_region { 26struct memblock_region {
27 unsigned long cnt; 27 phys_addr_t base;
28 u64 size; 28 phys_addr_t size;
29 struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; 29};
30
31struct memblock_type {
32 unsigned long cnt; /* number of regions */
33 unsigned long max; /* size of the allocated array */
34 struct memblock_region *regions;
30}; 35};
31 36
32struct memblock { 37struct memblock {
33 unsigned long debug; 38 phys_addr_t current_limit;
34 u64 rmo_size; 39 phys_addr_t memory_size; /* Updated by memblock_analyze() */
35 struct memblock_region memory; 40 struct memblock_type memory;
36 struct memblock_region reserved; 41 struct memblock_type reserved;
37}; 42};
38 43
39extern struct memblock memblock; 44extern struct memblock memblock;
45extern int memblock_debug;
46extern int memblock_can_resize;
40 47
41extern void __init memblock_init(void); 48#define memblock_dbg(fmt, ...) \
42extern void __init memblock_analyze(void); 49 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
43extern long memblock_add(u64 base, u64 size); 50
44extern long memblock_remove(u64 base, u64 size); 51u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
45extern long __init memblock_free(u64 base, u64 size); 52int memblock_free_reserved_regions(void);
46extern long __init memblock_reserve(u64 base, u64 size); 53int memblock_reserve_reserved_regions(void);
47extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, 54
48 u64 (*nid_range)(u64, u64, int *)); 55extern void memblock_init(void);
49extern u64 __init memblock_alloc(u64 size, u64 align); 56extern void memblock_analyze(void);
50extern u64 __init memblock_alloc_base(u64 size, 57extern long memblock_add(phys_addr_t base, phys_addr_t size);
51 u64, u64 max_addr); 58extern long memblock_remove(phys_addr_t base, phys_addr_t size);
52extern u64 __init __memblock_alloc_base(u64 size, 59extern long memblock_free(phys_addr_t base, phys_addr_t size);
53 u64 align, u64 max_addr); 60extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
54extern u64 __init memblock_phys_mem_size(void); 61
55extern u64 memblock_end_of_DRAM(void); 62/* The numa aware allocator is only available if
56extern void __init memblock_enforce_memory_limit(u64 memory_limit); 63 * CONFIG_ARCH_POPULATES_NODE_MAP is set
57extern int __init memblock_is_reserved(u64 addr); 64 */
58extern int memblock_is_region_reserved(u64 base, u64 size); 65extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
59extern int memblock_find(struct memblock_property *res); 66 int nid);
67extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
68 int nid);
69
70extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
71
72/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
73#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
74#define MEMBLOCK_ALLOC_ACCESSIBLE 0
75
76extern phys_addr_t memblock_alloc_base(phys_addr_t size,
77 phys_addr_t align,
78 phys_addr_t max_addr);
79extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
80 phys_addr_t align,
81 phys_addr_t max_addr);
82extern phys_addr_t memblock_phys_mem_size(void);
83extern phys_addr_t memblock_end_of_DRAM(void);
84extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
85extern int memblock_is_memory(phys_addr_t addr);
86extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
87extern int memblock_is_reserved(phys_addr_t addr);
88extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
60 89
61extern void memblock_dump_all(void); 90extern void memblock_dump_all(void);
62 91
63static inline u64 92/* Provided by the architecture */
64memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) 93extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
94extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
95 phys_addr_t addr2, phys_addr_t size2);
96
97/**
98 * memblock_set_current_limit - Set the current allocation limit to allow
99 * limiting allocations to what is currently
100 * accessible during boot
101 * @limit: New limit value (physical address)
102 */
103extern void memblock_set_current_limit(phys_addr_t limit);
104
105
106/*
107 * pfn conversion functions
108 *
109 * While the memory MEMBLOCKs should always be page aligned, the reserved
110 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
111 * idea of what they return for such non aligned MEMBLOCKs.
112 */
113
114/**
115 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
116 * @reg: memblock_region structure
117 */
118static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
65{ 119{
66 return type->region[region_nr].size; 120 return PFN_UP(reg->base);
67} 121}
68static inline u64 122
69memblock_size_pages(struct memblock_region *type, unsigned long region_nr) 123/**
124 * memblock_region_memory_end_pfn - Return the end_pfn this region
125 * @reg: memblock_region structure
126 */
127static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
70{ 128{
71 return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; 129 return PFN_DOWN(reg->base + reg->size);
72} 130}
73static inline u64 131
74memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) 132/**
133 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
134 * @reg: memblock_region structure
135 */
136static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
75{ 137{
76 return type->region[region_nr].base >> PAGE_SHIFT; 138 return PFN_DOWN(reg->base);
77} 139}
78static inline u64 140
79memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) 141/**
142 * memblock_region_reserved_end_pfn - Return the end_pfn this region
143 * @reg: memblock_region structure
144 */
145static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
80{ 146{
81 return memblock_start_pfn(type, region_nr) + 147 return PFN_UP(reg->base + reg->size);
82 memblock_size_pages(type, region_nr);
83} 148}
84 149
85#include <asm/memblock.h> 150#define for_each_memblock(memblock_type, region) \
151 for (region = memblock.memblock_type.regions; \
152 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
153 region++)
154
155
156#ifdef ARCH_DISCARD_MEMBLOCK
157#define __init_memblock __init
158#define __initdata_memblock __initdata
159#else
160#define __init_memblock
161#define __initdata_memblock
162#endif
163
164#else
165static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
166{
167 return MEMBLOCK_ERROR;
168}
169
170#endif /* CONFIG_HAVE_MEMBLOCK */
86 171
87#endif /* __KERNEL__ */ 172#endif /* __KERNEL__ */
88 173