1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/range.h>
/* Check for already reserved areas */
static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
u64 size = *sizep;
bool changed = false;
again:
last = addr + size;
for_each_memblock(reserved, r) {
if (last > r->base && addr < r->base) {
size = r->base - addr;
changed = true;
goto again;
}
if (last > (r->base + r->size) && addr < (r->base + r->size)) {
addr = round_up(r->base + r->size, align);
size = last - addr;
changed = true;
goto again;
}
if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
return false;
}
}
if (changed) {
*addrp = addr;
*sizep = size;
}
return changed;
}
static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align)
{
u64 addr, last;
addr = round_up(ei_start, align);
if (addr < start)
addr = round_up(start, align);
if (addr >= ei_last)
goto out;
*sizep = ei_last - addr;
while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
;
last = addr + *sizep;
if (last > ei_last)
goto out;
return addr;
out:
return MEMBLOCK_ERROR;
}
/*
* Find next free range after start, and size is returned in *sizep
*/
u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
{
struct memblock_region *r;
for_each_memblock(memory, r) {
u64 ei_start = r->base;
u64 ei_last = ei_start + r->size;
u64 addr;
addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
sizep, align);
if (addr != MEMBLOCK_ERROR)
return addr;
}
return MEMBLOCK_ERROR;
}
#ifndef CONFIG_NO_BOOTMEM
void __init memblock_x86_to_bootmem(u64 start, u64 end)
{
int count;
u64 final_start, final_end;
struct memblock_region *r;
/* Take out region array itself */
memblock_free_reserved_regions();
count = memblock.reserved.cnt;
pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1);
for_each_memblock(reserved, r) {
pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1);
final_start = max(start, r->base);
final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
pr_cont("\n");
continue;
}
pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1);
reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT);
}
/* Put region array back ? */
memblock_reserve_reserved_regions();
}
#endif
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end))
return;
memblock_reserve(start, end - start);
}
void __init memblock_x86_free_range(u64 start, u64 end)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end))
return;
memblock_free(start, end - start);
}
|