diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:35 -0500 |
commit | 5ce1a70e2f00f0bce0cab57f798ca354b9496169 (patch) | |
tree | 6e80200536b7a3576fd71ff2c7135ffe87dc858e /drivers | |
parent | 9d3cae26acb471d5954cfdc25d1438b32060babe (diff) | |
parent | ef53d16cded7f89b3843b7a96970dab897843ea5 (diff) |
Merge branch 'akpm' (more incoming from Andrew)
Merge second patch-bomb from Andrew Morton:
- A little DM fix
- the MM queue
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (154 commits)
ksm: allocate roots when needed
mm: cleanup "swapcache" in do_swap_page
mm,ksm: swapoff might need to copy
mm,ksm: FOLL_MIGRATION do migration_entry_wait
ksm: shrink 32-bit rmap_item back to 32 bytes
ksm: treat unstable nid like in stable tree
ksm: add some comments
tmpfs: fix mempolicy object leaks
tmpfs: fix use-after-free of mempolicy object
mm/fadvise.c: drain all pagevecs if POSIX_FADV_DONTNEED fails to discard all pages
mm: export mmu notifier invalidates
mm: accelerate mm_populate() treatment of THP pages
mm: use long type for page counts in mm_populate() and get_user_pages()
mm: accurately document nr_free_*_pages functions with code comments
HWPOISON: change order of error_states[]'s elements
HWPOISON: fix misjudgement of page_action() for errors on mlocked pages
memcg: stop warning on memcg_propagate_kmem
net: change type of virtio_chan->p9_max_pages
vmscan: change type of vm_total_pages to unsigned long
fs/nfsd: change type of max_delegations, nfsd_drc_max_mem and nfsd_drc_mem_used
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/acpi_memhotplug.c | 8 | ||||
-rw-r--r-- | drivers/acpi/numa.c | 23 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 2 | ||||
-rw-r--r-- | drivers/base/memory.c | 6 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 89 | ||||
-rw-r--r-- | drivers/firmware/memmap.c | 196 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-transaction-manager.c | 14 | ||||
-rw-r--r-- | drivers/staging/zcache/zbud.c | 2 | ||||
-rw-r--r-- | drivers/staging/zsmalloc/zsmalloc-main.c | 2 | ||||
-rw-r--r-- | drivers/usb/core/hub.c | 13 |
10 files changed, 320 insertions, 35 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 034d3e72aa92..da1f82b445e0 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -280,9 +280,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
280 | 280 | ||
281 | static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device) | 281 | static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device) |
282 | { | 282 | { |
283 | int result = 0; | 283 | int result = 0, nid; |
284 | struct acpi_memory_info *info, *n; | 284 | struct acpi_memory_info *info, *n; |
285 | 285 | ||
286 | nid = acpi_get_node(mem_device->device->handle); | ||
287 | |||
286 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) { | 288 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) { |
287 | if (info->failed) | 289 | if (info->failed) |
288 | /* The kernel does not use this memory block */ | 290 | /* The kernel does not use this memory block */ |
@@ -295,7 +297,9 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device) | |||
295 | */ | 297 | */ |
296 | return -EBUSY; | 298 | return -EBUSY; |
297 | 299 | ||
298 | result = remove_memory(info->start_addr, info->length); | 300 | if (nid < 0) |
301 | nid = memory_add_physaddr_to_nid(info->start_addr); | ||
302 | result = remove_memory(nid, info->start_addr, info->length); | ||
299 | if (result) | 303 | if (result) |
300 | return result; | 304 | return result; |
301 | 305 | ||
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 33e609f63585..59844ee149be 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -282,10 +282,10 @@ acpi_table_parse_srat(enum acpi_srat_type id, | |||
282 | handler, max_entries); | 282 | handler, max_entries); |
283 | } | 283 | } |
284 | 284 | ||
285 | int __init acpi_numa_init(void) | 285 | static int srat_mem_cnt; |
286 | { | ||
287 | int cnt = 0; | ||
288 | 286 | ||
287 | void __init early_parse_srat(void) | ||
288 | { | ||
289 | /* | 289 | /* |
290 | * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= | 290 | * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= |
291 | * SRAT cpu entries could have different order with that in MADT. | 291 | * SRAT cpu entries could have different order with that in MADT. |
@@ -295,21 +295,24 @@ int __init acpi_numa_init(void) | |||
295 | /* SRAT: Static Resource Affinity Table */ | 295 | /* SRAT: Static Resource Affinity Table */ |
296 | if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { | 296 | if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { |
297 | acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, | 297 | acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, |
298 | acpi_parse_x2apic_affinity, 0); | 298 | acpi_parse_x2apic_affinity, 0); |
299 | acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, | 299 | acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, |
300 | acpi_parse_processor_affinity, 0); | 300 | acpi_parse_processor_affinity, 0); |
301 | cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, | 301 | srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, |
302 | acpi_parse_memory_affinity, | 302 | acpi_parse_memory_affinity, |
303 | NR_NODE_MEMBLKS); | 303 | NR_NODE_MEMBLKS); |
304 | } | 304 | } |
305 | } | ||
305 | 306 | ||
307 | int __init acpi_numa_init(void) | ||
308 | { | ||
306 | /* SLIT: System Locality Information Table */ | 309 | /* SLIT: System Locality Information Table */ |
307 | acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); | 310 | acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); |
308 | 311 | ||
309 | acpi_numa_arch_fixup(); | 312 | acpi_numa_arch_fixup(); |
310 | 313 | ||
311 | if (cnt < 0) | 314 | if (srat_mem_cnt < 0) |
312 | return cnt; | 315 | return srat_mem_cnt; |
313 | else if (!parsed_numa_memblks) | 316 | else if (!parsed_numa_memblks) |
314 | return -ENOENT; | 317 | return -ENOENT; |
315 | return 0; | 318 | return 0; |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index cbf1f122666b..df34bd04ae62 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/cpuidle.h> | 45 | #include <linux/cpuidle.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/acpi.h> | 47 | #include <linux/acpi.h> |
48 | #include <linux/memory_hotplug.h> | ||
48 | 49 | ||
49 | #include <asm/io.h> | 50 | #include <asm/io.h> |
50 | #include <asm/cpu.h> | 51 | #include <asm/cpu.h> |
@@ -641,6 +642,7 @@ static int acpi_processor_remove(struct acpi_device *device) | |||
641 | 642 | ||
642 | per_cpu(processors, pr->id) = NULL; | 643 | per_cpu(processors, pr->id) = NULL; |
643 | per_cpu(processor_device_array, pr->id) = NULL; | 644 | per_cpu(processor_device_array, pr->id) = NULL; |
645 | try_offline_node(cpu_to_node(pr->id)); | ||
644 | 646 | ||
645 | free: | 647 | free: |
646 | free_cpumask_var(pr->throttling.shared_cpu_map); | 648 | free_cpumask_var(pr->throttling.shared_cpu_map); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 83d0b17ba1c2..a51007b79032 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -693,6 +693,12 @@ int offline_memory_block(struct memory_block *mem) | |||
693 | return ret; | 693 | return ret; |
694 | } | 694 | } |
695 | 695 | ||
696 | /* return true if the memory block is offlined, otherwise, return false */ | ||
697 | bool is_memblock_offlined(struct memory_block *mem) | ||
698 | { | ||
699 | return mem->state == MEM_OFFLINE; | ||
700 | } | ||
701 | |||
696 | /* | 702 | /* |
697 | * Initialize the sysfs support for memory devices... | 703 | * Initialize the sysfs support for memory devices... |
698 | */ | 704 | */ |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3148b10dc2e5..1244930e3d7a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) | |||
124 | } | 124 | } |
125 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | 125 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); |
126 | 126 | ||
127 | static int dev_memalloc_noio(struct device *dev, void *data) | ||
128 | { | ||
129 | return dev->power.memalloc_noio; | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. | ||
134 | * @dev: Device to handle. | ||
135 | * @enable: True for setting the flag and False for clearing the flag. | ||
136 | * | ||
137 | * Set the flag for all devices in the path from the device to the | ||
138 | * root device in the device tree if @enable is true, otherwise clear | ||
139 | * the flag for devices in the path whose siblings don't set the flag. | ||
140 | * | ||
141 | * The function should only be called by block device, or network | ||
142 | * device driver for solving the deadlock problem during runtime | ||
143 | * resume/suspend: | ||
144 | * | ||
145 | * If memory allocation with GFP_KERNEL is called inside runtime | ||
146 | * resume/suspend callback of any one of its ancestors(or the | ||
147 | * block device itself), the deadlock may be triggered inside the | ||
148 | * memory allocation since it might not complete until the block | ||
149 | * device becomes active and the involed page I/O finishes. The | ||
150 | * situation is pointed out first by Alan Stern. Network device | ||
151 | * are involved in iSCSI kind of situation. | ||
152 | * | ||
153 | * The lock of dev_hotplug_mutex is held in the function for handling | ||
154 | * hotplug race because pm_runtime_set_memalloc_noio() may be called | ||
155 | * in async probe(). | ||
156 | * | ||
157 | * The function should be called between device_add() and device_del() | ||
158 | * on the affected device(block/network device). | ||
159 | */ | ||
160 | void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) | ||
161 | { | ||
162 | static DEFINE_MUTEX(dev_hotplug_mutex); | ||
163 | |||
164 | mutex_lock(&dev_hotplug_mutex); | ||
165 | for (;;) { | ||
166 | bool enabled; | ||
167 | |||
168 | /* hold power lock since bitfield is not SMP-safe. */ | ||
169 | spin_lock_irq(&dev->power.lock); | ||
170 | enabled = dev->power.memalloc_noio; | ||
171 | dev->power.memalloc_noio = enable; | ||
172 | spin_unlock_irq(&dev->power.lock); | ||
173 | |||
174 | /* | ||
175 | * not need to enable ancestors any more if the device | ||
176 | * has been enabled. | ||
177 | */ | ||
178 | if (enabled && enable) | ||
179 | break; | ||
180 | |||
181 | dev = dev->parent; | ||
182 | |||
183 | /* | ||
184 | * clear flag of the parent device only if all the | ||
185 | * children don't set the flag because ancestor's | ||
186 | * flag was set by any one of the descendants. | ||
187 | */ | ||
188 | if (!dev || (!enable && | ||
189 | device_for_each_child(dev, NULL, | ||
190 | dev_memalloc_noio))) | ||
191 | break; | ||
192 | } | ||
193 | mutex_unlock(&dev_hotplug_mutex); | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); | ||
196 | |||
127 | /** | 197 | /** |
128 | * rpm_check_suspend_allowed - Test whether a device may be suspended. | 198 | * rpm_check_suspend_allowed - Test whether a device may be suspended. |
129 | * @dev: Device to test. | 199 | * @dev: Device to test. |
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
278 | if (!cb) | 348 | if (!cb) |
279 | return -ENOSYS; | 349 | return -ENOSYS; |
280 | 350 | ||
281 | retval = __rpm_callback(cb, dev); | 351 | if (dev->power.memalloc_noio) { |
352 | unsigned int noio_flag; | ||
353 | |||
354 | /* | ||
355 | * Deadlock might be caused if memory allocation with | ||
356 | * GFP_KERNEL happens inside runtime_suspend and | ||
357 | * runtime_resume callbacks of one block device's | ||
358 | * ancestor or the block device itself. Network | ||
359 | * device might be thought as part of iSCSI block | ||
360 | * device, so network device and its ancestor should | ||
361 | * be marked as memalloc_noio too. | ||
362 | */ | ||
363 | noio_flag = memalloc_noio_save(); | ||
364 | retval = __rpm_callback(cb, dev); | ||
365 | memalloc_noio_restore(noio_flag); | ||
366 | } else { | ||
367 | retval = __rpm_callback(cb, dev); | ||
368 | } | ||
282 | 369 | ||
283 | dev->power.runtime_error = retval; | 370 | dev->power.runtime_error = retval; |
284 | return retval != -EACCES ? retval : -EIO; | 371 | return retval != -EACCES ? retval : -EIO; |
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 90723e65b081..0b5b5f619c75 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/mm.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Data types ------------------------------------------------------------------ | 27 | * Data types ------------------------------------------------------------------ |
@@ -52,6 +53,9 @@ static ssize_t start_show(struct firmware_map_entry *entry, char *buf); | |||
52 | static ssize_t end_show(struct firmware_map_entry *entry, char *buf); | 53 | static ssize_t end_show(struct firmware_map_entry *entry, char *buf); |
53 | static ssize_t type_show(struct firmware_map_entry *entry, char *buf); | 54 | static ssize_t type_show(struct firmware_map_entry *entry, char *buf); |
54 | 55 | ||
56 | static struct firmware_map_entry * __meminit | ||
57 | firmware_map_find_entry(u64 start, u64 end, const char *type); | ||
58 | |||
55 | /* | 59 | /* |
56 | * Static data ----------------------------------------------------------------- | 60 | * Static data ----------------------------------------------------------------- |
57 | */ | 61 | */ |
@@ -79,7 +83,52 @@ static const struct sysfs_ops memmap_attr_ops = { | |||
79 | .show = memmap_attr_show, | 83 | .show = memmap_attr_show, |
80 | }; | 84 | }; |
81 | 85 | ||
82 | static struct kobj_type memmap_ktype = { | 86 | /* Firmware memory map entries. */ |
87 | static LIST_HEAD(map_entries); | ||
88 | static DEFINE_SPINLOCK(map_entries_lock); | ||
89 | |||
90 | /* | ||
91 | * For memory hotplug, there is no way to free memory map entries allocated | ||
92 | * by boot mem after the system is up. So when we hot-remove memory whose | ||
93 | * map entry is allocated by bootmem, we need to remember the storage and | ||
94 | * reuse it when the memory is hot-added again. | ||
95 | */ | ||
96 | static LIST_HEAD(map_entries_bootmem); | ||
97 | static DEFINE_SPINLOCK(map_entries_bootmem_lock); | ||
98 | |||
99 | |||
100 | static inline struct firmware_map_entry * | ||
101 | to_memmap_entry(struct kobject *kobj) | ||
102 | { | ||
103 | return container_of(kobj, struct firmware_map_entry, kobj); | ||
104 | } | ||
105 | |||
106 | static void __meminit release_firmware_map_entry(struct kobject *kobj) | ||
107 | { | ||
108 | struct firmware_map_entry *entry = to_memmap_entry(kobj); | ||
109 | |||
110 | if (PageReserved(virt_to_page(entry))) { | ||
111 | /* | ||
112 | * Remember the storage allocated by bootmem, and reuse it when | ||
113 | * the memory is hot-added again. The entry will be added to | ||
114 | * map_entries_bootmem here, and deleted from &map_entries in | ||
115 | * firmware_map_remove_entry(). | ||
116 | */ | ||
117 | if (firmware_map_find_entry(entry->start, entry->end, | ||
118 | entry->type)) { | ||
119 | spin_lock(&map_entries_bootmem_lock); | ||
120 | list_add(&entry->list, &map_entries_bootmem); | ||
121 | spin_unlock(&map_entries_bootmem_lock); | ||
122 | } | ||
123 | |||
124 | return; | ||
125 | } | ||
126 | |||
127 | kfree(entry); | ||
128 | } | ||
129 | |||
130 | static struct kobj_type __refdata memmap_ktype = { | ||
131 | .release = release_firmware_map_entry, | ||
83 | .sysfs_ops = &memmap_attr_ops, | 132 | .sysfs_ops = &memmap_attr_ops, |
84 | .default_attrs = def_attrs, | 133 | .default_attrs = def_attrs, |
85 | }; | 134 | }; |
@@ -88,13 +137,6 @@ static struct kobj_type memmap_ktype = { | |||
88 | * Registration functions ------------------------------------------------------ | 137 | * Registration functions ------------------------------------------------------ |
89 | */ | 138 | */ |
90 | 139 | ||
91 | /* | ||
92 | * Firmware memory map entries. No locking is needed because the | ||
93 | * firmware_map_add() and firmware_map_add_early() functions are called | ||
94 | * in firmware initialisation code in one single thread of execution. | ||
95 | */ | ||
96 | static LIST_HEAD(map_entries); | ||
97 | |||
98 | /** | 140 | /** |
99 | * firmware_map_add_entry() - Does the real work to add a firmware memmap entry. | 141 | * firmware_map_add_entry() - Does the real work to add a firmware memmap entry. |
100 | * @start: Start of the memory range. | 142 | * @start: Start of the memory range. |
@@ -118,11 +160,25 @@ static int firmware_map_add_entry(u64 start, u64 end, | |||
118 | INIT_LIST_HEAD(&entry->list); | 160 | INIT_LIST_HEAD(&entry->list); |
119 | kobject_init(&entry->kobj, &memmap_ktype); | 161 | kobject_init(&entry->kobj, &memmap_ktype); |
120 | 162 | ||
163 | spin_lock(&map_entries_lock); | ||
121 | list_add_tail(&entry->list, &map_entries); | 164 | list_add_tail(&entry->list, &map_entries); |
165 | spin_unlock(&map_entries_lock); | ||
122 | 166 | ||
123 | return 0; | 167 | return 0; |
124 | } | 168 | } |
125 | 169 | ||
170 | /** | ||
171 | * firmware_map_remove_entry() - Does the real work to remove a firmware | ||
172 | * memmap entry. | ||
173 | * @entry: removed entry. | ||
174 | * | ||
175 | * The caller must hold map_entries_lock, and release it properly. | ||
176 | **/ | ||
177 | static inline void firmware_map_remove_entry(struct firmware_map_entry *entry) | ||
178 | { | ||
179 | list_del(&entry->list); | ||
180 | } | ||
181 | |||
126 | /* | 182 | /* |
127 | * Add memmap entry on sysfs | 183 | * Add memmap entry on sysfs |
128 | */ | 184 | */ |
@@ -144,6 +200,78 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry) | |||
144 | return 0; | 200 | return 0; |
145 | } | 201 | } |
146 | 202 | ||
203 | /* | ||
204 | * Remove memmap entry on sysfs | ||
205 | */ | ||
206 | static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry) | ||
207 | { | ||
208 | kobject_put(&entry->kobj); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * firmware_map_find_entry_in_list() - Search memmap entry in a given list. | ||
213 | * @start: Start of the memory range. | ||
214 | * @end: End of the memory range (exclusive). | ||
215 | * @type: Type of the memory range. | ||
216 | * @list: In which to find the entry. | ||
217 | * | ||
218 | * This function is to find the memmap entey of a given memory range in a | ||
219 | * given list. The caller must hold map_entries_lock, and must not release | ||
220 | * the lock until the processing of the returned entry has completed. | ||
221 | * | ||
222 | * Return: Pointer to the entry to be found on success, or NULL on failure. | ||
223 | */ | ||
224 | static struct firmware_map_entry * __meminit | ||
225 | firmware_map_find_entry_in_list(u64 start, u64 end, const char *type, | ||
226 | struct list_head *list) | ||
227 | { | ||
228 | struct firmware_map_entry *entry; | ||
229 | |||
230 | list_for_each_entry(entry, list, list) | ||
231 | if ((entry->start == start) && (entry->end == end) && | ||
232 | (!strcmp(entry->type, type))) { | ||
233 | return entry; | ||
234 | } | ||
235 | |||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * firmware_map_find_entry() - Search memmap entry in map_entries. | ||
241 | * @start: Start of the memory range. | ||
242 | * @end: End of the memory range (exclusive). | ||
243 | * @type: Type of the memory range. | ||
244 | * | ||
245 | * This function is to find the memmap entey of a given memory range. | ||
246 | * The caller must hold map_entries_lock, and must not release the lock | ||
247 | * until the processing of the returned entry has completed. | ||
248 | * | ||
249 | * Return: Pointer to the entry to be found on success, or NULL on failure. | ||
250 | */ | ||
251 | static struct firmware_map_entry * __meminit | ||
252 | firmware_map_find_entry(u64 start, u64 end, const char *type) | ||
253 | { | ||
254 | return firmware_map_find_entry_in_list(start, end, type, &map_entries); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem. | ||
259 | * @start: Start of the memory range. | ||
260 | * @end: End of the memory range (exclusive). | ||
261 | * @type: Type of the memory range. | ||
262 | * | ||
263 | * This function is similar to firmware_map_find_entry except that it find the | ||
264 | * given entry in map_entries_bootmem. | ||
265 | * | ||
266 | * Return: Pointer to the entry to be found on success, or NULL on failure. | ||
267 | */ | ||
268 | static struct firmware_map_entry * __meminit | ||
269 | firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type) | ||
270 | { | ||
271 | return firmware_map_find_entry_in_list(start, end, type, | ||
272 | &map_entries_bootmem); | ||
273 | } | ||
274 | |||
147 | /** | 275 | /** |
148 | * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do | 276 | * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do |
149 | * memory hotplug. | 277 | * memory hotplug. |
@@ -161,9 +289,19 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type) | |||
161 | { | 289 | { |
162 | struct firmware_map_entry *entry; | 290 | struct firmware_map_entry *entry; |
163 | 291 | ||
164 | entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); | 292 | entry = firmware_map_find_entry_bootmem(start, end, type); |
165 | if (!entry) | 293 | if (!entry) { |
166 | return -ENOMEM; | 294 | entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); |
295 | if (!entry) | ||
296 | return -ENOMEM; | ||
297 | } else { | ||
298 | /* Reuse storage allocated by bootmem. */ | ||
299 | spin_lock(&map_entries_bootmem_lock); | ||
300 | list_del(&entry->list); | ||
301 | spin_unlock(&map_entries_bootmem_lock); | ||
302 | |||
303 | memset(entry, 0, sizeof(*entry)); | ||
304 | } | ||
167 | 305 | ||
168 | firmware_map_add_entry(start, end, type, entry); | 306 | firmware_map_add_entry(start, end, type, entry); |
169 | /* create the memmap entry */ | 307 | /* create the memmap entry */ |
@@ -196,6 +334,36 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) | |||
196 | return firmware_map_add_entry(start, end, type, entry); | 334 | return firmware_map_add_entry(start, end, type, entry); |
197 | } | 335 | } |
198 | 336 | ||
337 | /** | ||
338 | * firmware_map_remove() - remove a firmware mapping entry | ||
339 | * @start: Start of the memory range. | ||
340 | * @end: End of the memory range. | ||
341 | * @type: Type of the memory range. | ||
342 | * | ||
343 | * removes a firmware mapping entry. | ||
344 | * | ||
345 | * Returns 0 on success, or -EINVAL if no entry. | ||
346 | **/ | ||
347 | int __meminit firmware_map_remove(u64 start, u64 end, const char *type) | ||
348 | { | ||
349 | struct firmware_map_entry *entry; | ||
350 | |||
351 | spin_lock(&map_entries_lock); | ||
352 | entry = firmware_map_find_entry(start, end - 1, type); | ||
353 | if (!entry) { | ||
354 | spin_unlock(&map_entries_lock); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | |||
358 | firmware_map_remove_entry(entry); | ||
359 | spin_unlock(&map_entries_lock); | ||
360 | |||
361 | /* remove the memmap entry */ | ||
362 | remove_sysfs_fw_map_entry(entry); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
199 | /* | 367 | /* |
200 | * Sysfs functions ------------------------------------------------------------- | 368 | * Sysfs functions ------------------------------------------------------------- |
201 | */ | 369 | */ |
@@ -217,8 +385,10 @@ static ssize_t type_show(struct firmware_map_entry *entry, char *buf) | |||
217 | return snprintf(buf, PAGE_SIZE, "%s\n", entry->type); | 385 | return snprintf(buf, PAGE_SIZE, "%s\n", entry->type); |
218 | } | 386 | } |
219 | 387 | ||
220 | #define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr) | 388 | static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr) |
221 | #define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj) | 389 | { |
390 | return container_of(attr, struct memmap_attribute, attr); | ||
391 | } | ||
222 | 392 | ||
223 | static ssize_t memmap_attr_show(struct kobject *kobj, | 393 | static ssize_t memmap_attr_show(struct kobject *kobj, |
224 | struct attribute *attr, char *buf) | 394 | struct attribute *attr, char *buf) |
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index d247a35da3c6..7b17a1fdeaf9 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c | |||
@@ -25,8 +25,8 @@ struct shadow_info { | |||
25 | /* | 25 | /* |
26 | * It would be nice if we scaled with the size of transaction. | 26 | * It would be nice if we scaled with the size of transaction. |
27 | */ | 27 | */ |
28 | #define HASH_SIZE 256 | 28 | #define DM_HASH_SIZE 256 |
29 | #define HASH_MASK (HASH_SIZE - 1) | 29 | #define DM_HASH_MASK (DM_HASH_SIZE - 1) |
30 | 30 | ||
31 | struct dm_transaction_manager { | 31 | struct dm_transaction_manager { |
32 | int is_clone; | 32 | int is_clone; |
@@ -36,7 +36,7 @@ struct dm_transaction_manager { | |||
36 | struct dm_space_map *sm; | 36 | struct dm_space_map *sm; |
37 | 37 | ||
38 | spinlock_t lock; | 38 | spinlock_t lock; |
39 | struct hlist_head buckets[HASH_SIZE]; | 39 | struct hlist_head buckets[DM_HASH_SIZE]; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /*----------------------------------------------------------------*/ | 42 | /*----------------------------------------------------------------*/ |
@@ -44,7 +44,7 @@ struct dm_transaction_manager { | |||
44 | static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b) | 44 | static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b) |
45 | { | 45 | { |
46 | int r = 0; | 46 | int r = 0; |
47 | unsigned bucket = dm_hash_block(b, HASH_MASK); | 47 | unsigned bucket = dm_hash_block(b, DM_HASH_MASK); |
48 | struct shadow_info *si; | 48 | struct shadow_info *si; |
49 | struct hlist_node *n; | 49 | struct hlist_node *n; |
50 | 50 | ||
@@ -71,7 +71,7 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b) | |||
71 | si = kmalloc(sizeof(*si), GFP_NOIO); | 71 | si = kmalloc(sizeof(*si), GFP_NOIO); |
72 | if (si) { | 72 | if (si) { |
73 | si->where = b; | 73 | si->where = b; |
74 | bucket = dm_hash_block(b, HASH_MASK); | 74 | bucket = dm_hash_block(b, DM_HASH_MASK); |
75 | spin_lock(&tm->lock); | 75 | spin_lock(&tm->lock); |
76 | hlist_add_head(&si->hlist, tm->buckets + bucket); | 76 | hlist_add_head(&si->hlist, tm->buckets + bucket); |
77 | spin_unlock(&tm->lock); | 77 | spin_unlock(&tm->lock); |
@@ -86,7 +86,7 @@ static void wipe_shadow_table(struct dm_transaction_manager *tm) | |||
86 | int i; | 86 | int i; |
87 | 87 | ||
88 | spin_lock(&tm->lock); | 88 | spin_lock(&tm->lock); |
89 | for (i = 0; i < HASH_SIZE; i++) { | 89 | for (i = 0; i < DM_HASH_SIZE; i++) { |
90 | bucket = tm->buckets + i; | 90 | bucket = tm->buckets + i; |
91 | hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) | 91 | hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) |
92 | kfree(si); | 92 | kfree(si); |
@@ -115,7 +115,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm, | |||
115 | tm->sm = sm; | 115 | tm->sm = sm; |
116 | 116 | ||
117 | spin_lock_init(&tm->lock); | 117 | spin_lock_init(&tm->lock); |
118 | for (i = 0; i < HASH_SIZE; i++) | 118 | for (i = 0; i < DM_HASH_SIZE; i++) |
119 | INIT_HLIST_HEAD(tm->buckets + i); | 119 | INIT_HLIST_HEAD(tm->buckets + i); |
120 | 120 | ||
121 | return tm; | 121 | return tm; |
diff --git a/drivers/staging/zcache/zbud.c b/drivers/staging/zcache/zbud.c index 328c397ea5dc..fdff5c6a0239 100644 --- a/drivers/staging/zcache/zbud.c +++ b/drivers/staging/zcache/zbud.c | |||
@@ -404,7 +404,7 @@ static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage, | |||
404 | else | 404 | else |
405 | zbud_pers_pageframes--; | 405 | zbud_pers_pageframes--; |
406 | zbudpage_spin_unlock(zbudpage); | 406 | zbudpage_spin_unlock(zbudpage); |
407 | reset_page_mapcount(page); | 407 | page_mapcount_reset(page); |
408 | init_page_count(page); | 408 | init_page_count(page); |
409 | page->index = 0; | 409 | page->index = 0; |
410 | return page; | 410 | return page; |
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c index 06f73a93a44d..e78d262c5249 100644 --- a/drivers/staging/zsmalloc/zsmalloc-main.c +++ b/drivers/staging/zsmalloc/zsmalloc-main.c | |||
@@ -472,7 +472,7 @@ static void reset_page(struct page *page) | |||
472 | set_page_private(page, 0); | 472 | set_page_private(page, 0); |
473 | page->mapping = NULL; | 473 | page->mapping = NULL; |
474 | page->freelist = NULL; | 474 | page->freelist = NULL; |
475 | reset_page_mapcount(page); | 475 | page_mapcount_reset(page); |
476 | } | 476 | } |
477 | 477 | ||
478 | static void free_zspage(struct page *first_page) | 478 | static void free_zspage(struct page *first_page) |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 1775ad471edd..5480352f984d 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -5177,6 +5177,7 @@ int usb_reset_device(struct usb_device *udev) | |||
5177 | { | 5177 | { |
5178 | int ret; | 5178 | int ret; |
5179 | int i; | 5179 | int i; |
5180 | unsigned int noio_flag; | ||
5180 | struct usb_host_config *config = udev->actconfig; | 5181 | struct usb_host_config *config = udev->actconfig; |
5181 | 5182 | ||
5182 | if (udev->state == USB_STATE_NOTATTACHED || | 5183 | if (udev->state == USB_STATE_NOTATTACHED || |
@@ -5186,6 +5187,17 @@ int usb_reset_device(struct usb_device *udev) | |||
5186 | return -EINVAL; | 5187 | return -EINVAL; |
5187 | } | 5188 | } |
5188 | 5189 | ||
5190 | /* | ||
5191 | * Don't allocate memory with GFP_KERNEL in current | ||
5192 | * context to avoid possible deadlock if usb mass | ||
5193 | * storage interface or usbnet interface(iSCSI case) | ||
5194 | * is included in current configuration. The easist | ||
5195 | * approach is to do it for every device reset, | ||
5196 | * because the device 'memalloc_noio' flag may have | ||
5197 | * not been set before reseting the usb device. | ||
5198 | */ | ||
5199 | noio_flag = memalloc_noio_save(); | ||
5200 | |||
5189 | /* Prevent autosuspend during the reset */ | 5201 | /* Prevent autosuspend during the reset */ |
5190 | usb_autoresume_device(udev); | 5202 | usb_autoresume_device(udev); |
5191 | 5203 | ||
@@ -5230,6 +5242,7 @@ int usb_reset_device(struct usb_device *udev) | |||
5230 | } | 5242 | } |
5231 | 5243 | ||
5232 | usb_autosuspend_device(udev); | 5244 | usb_autosuspend_device(udev); |
5245 | memalloc_noio_restore(noio_flag); | ||
5233 | return ret; | 5246 | return ret; |
5234 | } | 5247 | } |
5235 | EXPORT_SYMBOL_GPL(usb_reset_device); | 5248 | EXPORT_SYMBOL_GPL(usb_reset_device); |