diff options
-rw-r--r-- | kernel/resource.c | 68 |
1 files changed, 55 insertions, 13 deletions
diff --git a/kernel/resource.c b/kernel/resource.c index 4aef8867fd4b..d7386986e10e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/pfn.h> | 23 | #include <linux/pfn.h> |
24 | #include <linux/mm.h> | ||
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | 26 | ||
26 | 27 | ||
@@ -50,6 +51,14 @@ struct resource_constraint { | |||
50 | 51 | ||
51 | static DEFINE_RWLOCK(resource_lock); | 52 | static DEFINE_RWLOCK(resource_lock); |
52 | 53 | ||
54 | /* | ||
55 | * For memory hotplug, there is no way to free resource entries allocated | ||
56 | * by boot mem after the system is up. So for reusing the resource entry | ||
57 | * we need to remember the resource. | ||
58 | */ | ||
59 | static struct resource *bootmem_resource_free; | ||
60 | static DEFINE_SPINLOCK(bootmem_resource_lock); | ||
61 | |||
53 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 62 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
54 | { | 63 | { |
55 | struct resource *p = v; | 64 | struct resource *p = v; |
@@ -151,6 +160,40 @@ __initcall(ioresources_init); | |||
151 | 160 | ||
152 | #endif /* CONFIG_PROC_FS */ | 161 | #endif /* CONFIG_PROC_FS */ |
153 | 162 | ||
163 | static void free_resource(struct resource *res) | ||
164 | { | ||
165 | if (!res) | ||
166 | return; | ||
167 | |||
168 | if (!PageSlab(virt_to_head_page(res))) { | ||
169 | spin_lock(&bootmem_resource_lock); | ||
170 | res->sibling = bootmem_resource_free; | ||
171 | bootmem_resource_free = res; | ||
172 | spin_unlock(&bootmem_resource_lock); | ||
173 | } else { | ||
174 | kfree(res); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | static struct resource *alloc_resource(gfp_t flags) | ||
179 | { | ||
180 | struct resource *res = NULL; | ||
181 | |||
182 | spin_lock(&bootmem_resource_lock); | ||
183 | if (bootmem_resource_free) { | ||
184 | res = bootmem_resource_free; | ||
185 | bootmem_resource_free = res->sibling; | ||
186 | } | ||
187 | spin_unlock(&bootmem_resource_lock); | ||
188 | |||
189 | if (res) | ||
190 | memset(res, 0, sizeof(struct resource)); | ||
191 | else | ||
192 | res = kzalloc(sizeof(struct resource), flags); | ||
193 | |||
194 | return res; | ||
195 | } | ||
196 | |||
154 | /* Return the conflict entry if you can't request it */ | 197 | /* Return the conflict entry if you can't request it */ |
155 | static struct resource * __request_resource(struct resource *root, struct resource *new) | 198 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
156 | { | 199 | { |
@@ -771,7 +814,7 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
771 | { | 814 | { |
772 | struct resource *parent = root; | 815 | struct resource *parent = root; |
773 | struct resource *conflict; | 816 | struct resource *conflict; |
774 | struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC); | 817 | struct resource *res = alloc_resource(GFP_ATOMIC); |
775 | struct resource *next_res = NULL; | 818 | struct resource *next_res = NULL; |
776 | 819 | ||
777 | if (!res) | 820 | if (!res) |
@@ -796,7 +839,7 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
796 | /* conflict covered whole area */ | 839 | /* conflict covered whole area */ |
797 | if (conflict->start <= res->start && | 840 | if (conflict->start <= res->start && |
798 | conflict->end >= res->end) { | 841 | conflict->end >= res->end) { |
799 | kfree(res); | 842 | free_resource(res); |
800 | WARN_ON(next_res); | 843 | WARN_ON(next_res); |
801 | break; | 844 | break; |
802 | } | 845 | } |
@@ -806,10 +849,9 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
806 | end = res->end; | 849 | end = res->end; |
807 | res->end = conflict->start - 1; | 850 | res->end = conflict->start - 1; |
808 | if (conflict->end < end) { | 851 | if (conflict->end < end) { |
809 | next_res = kzalloc(sizeof(*next_res), | 852 | next_res = alloc_resource(GFP_ATOMIC); |
810 | GFP_ATOMIC); | ||
811 | if (!next_res) { | 853 | if (!next_res) { |
812 | kfree(res); | 854 | free_resource(res); |
813 | break; | 855 | break; |
814 | } | 856 | } |
815 | next_res->name = name; | 857 | next_res->name = name; |
@@ -899,7 +941,7 @@ struct resource * __request_region(struct resource *parent, | |||
899 | const char *name, int flags) | 941 | const char *name, int flags) |
900 | { | 942 | { |
901 | DECLARE_WAITQUEUE(wait, current); | 943 | DECLARE_WAITQUEUE(wait, current); |
902 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | 944 | struct resource *res = alloc_resource(GFP_KERNEL); |
903 | 945 | ||
904 | if (!res) | 946 | if (!res) |
905 | return NULL; | 947 | return NULL; |
@@ -933,7 +975,7 @@ struct resource * __request_region(struct resource *parent, | |||
933 | continue; | 975 | continue; |
934 | } | 976 | } |
935 | /* Uhhuh, that didn't work out.. */ | 977 | /* Uhhuh, that didn't work out.. */ |
936 | kfree(res); | 978 | free_resource(res); |
937 | res = NULL; | 979 | res = NULL; |
938 | break; | 980 | break; |
939 | } | 981 | } |
@@ -967,7 +1009,7 @@ int __check_region(struct resource *parent, resource_size_t start, | |||
967 | return -EBUSY; | 1009 | return -EBUSY; |
968 | 1010 | ||
969 | release_resource(res); | 1011 | release_resource(res); |
970 | kfree(res); | 1012 | free_resource(res); |
971 | return 0; | 1013 | return 0; |
972 | } | 1014 | } |
973 | EXPORT_SYMBOL(__check_region); | 1015 | EXPORT_SYMBOL(__check_region); |
@@ -1007,7 +1049,7 @@ void __release_region(struct resource *parent, resource_size_t start, | |||
1007 | write_unlock(&resource_lock); | 1049 | write_unlock(&resource_lock); |
1008 | if (res->flags & IORESOURCE_MUXED) | 1050 | if (res->flags & IORESOURCE_MUXED) |
1009 | wake_up(&muxed_resource_wait); | 1051 | wake_up(&muxed_resource_wait); |
1010 | kfree(res); | 1052 | free_resource(res); |
1011 | return; | 1053 | return; |
1012 | } | 1054 | } |
1013 | p = &res->sibling; | 1055 | p = &res->sibling; |
@@ -1055,8 +1097,8 @@ int release_mem_region_adjustable(struct resource *parent, | |||
1055 | if ((start < parent->start) || (end > parent->end)) | 1097 | if ((start < parent->start) || (end > parent->end)) |
1056 | return ret; | 1098 | return ret; |
1057 | 1099 | ||
1058 | /* The kzalloc() result gets checked later */ | 1100 | /* The alloc_resource() result gets checked later */ |
1059 | new_res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 1101 | new_res = alloc_resource(GFP_KERNEL); |
1060 | 1102 | ||
1061 | p = &parent->child; | 1103 | p = &parent->child; |
1062 | write_lock(&resource_lock); | 1104 | write_lock(&resource_lock); |
@@ -1083,7 +1125,7 @@ int release_mem_region_adjustable(struct resource *parent, | |||
1083 | if (res->start == start && res->end == end) { | 1125 | if (res->start == start && res->end == end) { |
1084 | /* free the whole entry */ | 1126 | /* free the whole entry */ |
1085 | *p = res->sibling; | 1127 | *p = res->sibling; |
1086 | kfree(res); | 1128 | free_resource(res); |
1087 | ret = 0; | 1129 | ret = 0; |
1088 | } else if (res->start == start && res->end != end) { | 1130 | } else if (res->start == start && res->end != end) { |
1089 | /* adjust the start */ | 1131 | /* adjust the start */ |
@@ -1119,7 +1161,7 @@ int release_mem_region_adjustable(struct resource *parent, | |||
1119 | } | 1161 | } |
1120 | 1162 | ||
1121 | write_unlock(&resource_lock); | 1163 | write_unlock(&resource_lock); |
1122 | kfree(new_res); | 1164 | free_resource(new_res); |
1123 | return ret; | 1165 | return ret; |
1124 | } | 1166 | } |
1125 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 1167 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |