aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 00:14:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 00:14:42 -0400
commit33caee39925b887a99a2400dc5c980097c3573f9 (patch)
tree8e68ad97e1fee88c4a3f31453041f8d139f2027e /drivers
parent6456a0438b984186a0c9c8ecc9fe3d97b7ac3613 (diff)
parentf84223087402c45179be5e7060c5736c17a7b271 (diff)
Merge branch 'akpm' (patchbomb from Andrew Morton)
Merge incoming from Andrew Morton: - Various misc things. - arch/sh updates. - Part of ocfs2. Review is slow. - Slab updates. - Most of -mm. - printk updates. - lib/ updates. - checkpatch updates. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (226 commits) checkpatch: update $declaration_macros, add uninitialized_var checkpatch: warn on missing spaces in broken up quoted checkpatch: fix false positives for --strict "space after cast" test checkpatch: fix false positive MISSING_BREAK warnings with --file checkpatch: add test for native c90 types in unusual order checkpatch: add signed generic types checkpatch: add short int to c variable types checkpatch: add for_each tests to indentation and brace tests checkpatch: fix brace style misuses of else and while checkpatch: add --fix option for a couple OPEN_BRACE misuses checkpatch: use the correct indentation for which() checkpatch: add fix_insert_line and fix_delete_line helpers checkpatch: add ability to insert and delete lines to patch/file checkpatch: add an index variable for fixed lines checkpatch: warn on break after goto or return with same tab indentation checkpatch: emit a warning on file add/move/delete checkpatch: add test for commit id formatting style in commit log checkpatch: emit fewer kmalloc_array/kcalloc conversion warnings checkpatch: improve "no space after cast" test checkpatch: allow multiple const * types ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/libata-core.c72
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/dma-contiguous.c220
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/block/zram/zram_drv.c71
-rw-r--r--drivers/block/zram/zram_drv.h29
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/lguest/core.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/staging/android/binder.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c4
-rw-r--r--drivers/tty/sysrq.c2
17 files changed, 111 insertions, 355 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e65d400efd44..e1b92788c225 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -16,6 +16,7 @@ menuconfig ATA
16 depends on BLOCK 16 depends on BLOCK
17 depends on !(M32R || M68K || S390) || BROKEN 17 depends on !(M32R || M68K || S390) || BROKEN
18 select SCSI 18 select SCSI
19 select GLOB
19 ---help--- 20 ---help---
20 If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or 21 If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
21 any other ATA device under Linux, say Y and make sure that you know 22 any other ATA device under Linux, say Y and make sure that you know
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 677c0c1b03bd..dbdc5d32343f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,6 +59,7 @@
59#include <linux/async.h> 59#include <linux/async.h>
60#include <linux/log2.h> 60#include <linux/log2.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/glob.h>
62#include <scsi/scsi.h> 63#include <scsi/scsi.h>
63#include <scsi/scsi_cmnd.h> 64#include <scsi/scsi_cmnd.h>
64#include <scsi/scsi_host.h> 65#include <scsi/scsi_host.h>
@@ -4250,73 +4251,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4250 { } 4251 { }
4251}; 4252};
4252 4253
4253/**
4254 * glob_match - match a text string against a glob-style pattern
4255 * @text: the string to be examined
4256 * @pattern: the glob-style pattern to be matched against
4257 *
4258 * Either/both of text and pattern can be empty strings.
4259 *
4260 * Match text against a glob-style pattern, with wildcards and simple sets:
4261 *
4262 * ? matches any single character.
4263 * * matches any run of characters.
4264 * [xyz] matches a single character from the set: x, y, or z.
4265 * [a-d] matches a single character from the range: a, b, c, or d.
4266 * [a-d0-9] matches a single character from either range.
4267 *
4268 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4269 * Behaviour with malformed patterns is undefined, though generally reasonable.
4270 *
4271 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4272 *
4273 * This function uses one level of recursion per '*' in pattern.
4274 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4275 * this will not cause stack problems for any reasonable use here.
4276 *
4277 * RETURNS:
4278 * 0 on match, 1 otherwise.
4279 */
4280static int glob_match (const char *text, const char *pattern)
4281{
4282 do {
4283 /* Match single character or a '?' wildcard */
4284 if (*text == *pattern || *pattern == '?') {
4285 if (!*pattern++)
4286 return 0; /* End of both strings: match */
4287 } else {
4288 /* Match single char against a '[' bracketed ']' pattern set */
4289 if (!*text || *pattern != '[')
4290 break; /* Not a pattern set */
4291 while (*++pattern && *pattern != ']' && *text != *pattern) {
4292 if (*pattern == '-' && *(pattern - 1) != '[')
4293 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4294 ++pattern;
4295 break;
4296 }
4297 }
4298 if (!*pattern || *pattern == ']')
4299 return 1; /* No match */
4300 while (*pattern && *pattern++ != ']');
4301 }
4302 } while (*++text && *pattern);
4303
4304 /* Match any run of chars against a '*' wildcard */
4305 if (*pattern == '*') {
4306 if (!*++pattern)
4307 return 0; /* Match: avoid recursion at end of pattern */
4308 /* Loop to handle additional pattern chars after the wildcard */
4309 while (*text) {
4310 if (glob_match(text, pattern) == 0)
4311 return 0; /* Remainder matched */
4312 ++text; /* Absorb (match) this char and try again */
4313 }
4314 }
4315 if (!*text && !*pattern)
4316 return 0; /* End of both strings: match */
4317 return 1; /* No match */
4318}
4319
4320static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4254static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4321{ 4255{
4322 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4256 unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4327,10 +4261,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4327 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4261 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4328 4262
4329 while (ad->model_num) { 4263 while (ad->model_num) {
4330 if (!glob_match(model_num, ad->model_num)) { 4264 if (glob_match(model_num, ad->model_num)) {
4331 if (ad->model_rev == NULL) 4265 if (ad->model_rev == NULL)
4332 return ad->horkage; 4266 return ad->horkage;
4333 if (!glob_match(model_rev, ad->model_rev)) 4267 if (glob_match(model_rev, ad->model_rev))
4334 return ad->horkage; 4268 return ad->horkage;
4335 } 4269 }
4336 ad++; 4270 ad++;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 88500fed3c7a..4e7f0ff83ae7 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -289,16 +289,6 @@ config CMA_ALIGNMENT
289 289
290 If unsure, leave the default value "8". 290 If unsure, leave the default value "8".
291 291
292config CMA_AREAS
293 int "Maximum count of the CMA device-private areas"
294 default 7
295 help
296 CMA allows to create CMA areas for particular devices. This parameter
297 sets the maximum number of such device private CMA areas in the
298 system.
299
300 If unsure, leave the default value "7".
301
302endif 292endif
303 293
304endmenu 294endmenu
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,23 +24,9 @@
24 24
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
30#include <linux/sizes.h> 27#include <linux/sizes.h>
31#include <linux/slab.h>
32#include <linux/swap.h>
33#include <linux/mm_types.h>
34#include <linux/dma-contiguous.h> 28#include <linux/dma-contiguous.h>
35 29#include <linux/cma.h>
36struct cma {
37 unsigned long base_pfn;
38 unsigned long count;
39 unsigned long *bitmap;
40 struct mutex lock;
41};
42
43struct cma *dma_contiguous_default_area;
44 30
45#ifdef CONFIG_CMA_SIZE_MBYTES 31#ifdef CONFIG_CMA_SIZE_MBYTES
46#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
48#define CMA_SIZE_MBYTES 0 34#define CMA_SIZE_MBYTES 0
49#endif 35#endif
50 36
37struct cma *dma_contiguous_default_area;
38
51/* 39/*
52 * Default global CMA area size can be defined in kernel's .config. 40 * Default global CMA area size can be defined in kernel's .config.
53 * This is useful mainly for distro maintainers to create a kernel 41 * This is useful mainly for distro maintainers to create a kernel
@@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
154 } 142 }
155} 143}
156 144
157static DEFINE_MUTEX(cma_mutex);
158
159static int __init cma_activate_area(struct cma *cma)
160{
161 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
162 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
163 unsigned i = cma->count >> pageblock_order;
164 struct zone *zone;
165
166 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
167
168 if (!cma->bitmap)
169 return -ENOMEM;
170
171 WARN_ON_ONCE(!pfn_valid(pfn));
172 zone = page_zone(pfn_to_page(pfn));
173
174 do {
175 unsigned j;
176 base_pfn = pfn;
177 for (j = pageblock_nr_pages; j; --j, pfn++) {
178 WARN_ON_ONCE(!pfn_valid(pfn));
179 /*
180 * alloc_contig_range requires the pfn range
181 * specified to be in the same zone. Make this
182 * simple by forcing the entire CMA resv range
183 * to be in the same zone.
184 */
185 if (page_zone(pfn_to_page(pfn)) != zone)
186 goto err;
187 }
188 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
189 } while (--i);
190
191 mutex_init(&cma->lock);
192 return 0;
193
194err:
195 kfree(cma->bitmap);
196 return -EINVAL;
197}
198
199static struct cma cma_areas[MAX_CMA_AREAS];
200static unsigned cma_area_count;
201
202static int __init cma_init_reserved_areas(void)
203{
204 int i;
205
206 for (i = 0; i < cma_area_count; i++) {
207 int ret = cma_activate_area(&cma_areas[i]);
208 if (ret)
209 return ret;
210 }
211
212 return 0;
213}
214core_initcall(cma_init_reserved_areas);
215
216/** 145/**
217 * dma_contiguous_reserve_area() - reserve custom contiguous area 146 * dma_contiguous_reserve_area() - reserve custom contiguous area
218 * @size: Size of the reserved area (in bytes), 147 * @size: Size of the reserved area (in bytes),
@@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
234 phys_addr_t limit, struct cma **res_cma, 163 phys_addr_t limit, struct cma **res_cma,
235 bool fixed) 164 bool fixed)
236{ 165{
237 struct cma *cma = &cma_areas[cma_area_count]; 166 int ret;
238 phys_addr_t alignment;
239 int ret = 0;
240
241 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
242 (unsigned long)size, (unsigned long)base,
243 (unsigned long)limit);
244
245 /* Sanity checks */
246 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
247 pr_err("Not enough slots for CMA reserved regions!\n");
248 return -ENOSPC;
249 }
250
251 if (!size)
252 return -EINVAL;
253
254 /* Sanitise input arguments */
255 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
256 base = ALIGN(base, alignment);
257 size = ALIGN(size, alignment);
258 limit &= ~(alignment - 1);
259
260 /* Reserve memory */
261 if (base && fixed) {
262 if (memblock_is_region_reserved(base, size) ||
263 memblock_reserve(base, size) < 0) {
264 ret = -EBUSY;
265 goto err;
266 }
267 } else {
268 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
269 limit);
270 if (!addr) {
271 ret = -ENOMEM;
272 goto err;
273 } else {
274 base = addr;
275 }
276 }
277
278 /*
279 * Each reserved area must be initialised later, when more kernel
280 * subsystems (like slab allocator) are available.
281 */
282 cma->base_pfn = PFN_DOWN(base);
283 cma->count = size >> PAGE_SHIFT;
284 *res_cma = cma;
285 cma_area_count++;
286 167
287 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
288 (unsigned long)base); 169 if (ret)
170 return ret;
289 171
290 /* Architecture specific contiguous memory fixup. */ 172 /* Architecture specific contiguous memory fixup. */
291 dma_contiguous_early_fixup(base, size); 173 dma_contiguous_early_fixup(cma_get_base(*res_cma),
292 return 0; 174 cma_get_size(*res_cma));
293err:
294 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
295 return ret;
296}
297 175
298static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 176 return 0;
299{
300 mutex_lock(&cma->lock);
301 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
302 mutex_unlock(&cma->lock);
303} 177}
304 178
305/** 179/**
@@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
316struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190struct page *dma_alloc_from_contiguous(struct device *dev, int count,
317 unsigned int align) 191 unsigned int align)
318{ 192{
319 unsigned long mask, pfn, pageno, start = 0;
320 struct cma *cma = dev_get_cma_area(dev);
321 struct page *page = NULL;
322 int ret;
323
324 if (!cma || !cma->count)
325 return NULL;
326
327 if (align > CONFIG_CMA_ALIGNMENT) 193 if (align > CONFIG_CMA_ALIGNMENT)
328 align = CONFIG_CMA_ALIGNMENT; 194 align = CONFIG_CMA_ALIGNMENT;
329 195
330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 196 return cma_alloc(dev_get_cma_area(dev), count, align);
331 count, align);
332
333 if (!count)
334 return NULL;
335
336 mask = (1 << align) - 1;
337
338
339 for (;;) {
340 mutex_lock(&cma->lock);
341 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
342 start, count, mask);
343 if (pageno >= cma->count) {
344 mutex_unlock(&cma->lock);
345 break;
346 }
347 bitmap_set(cma->bitmap, pageno, count);
348 /*
349 * It's safe to drop the lock here. We've marked this region for
350 * our exclusive use. If the migration fails we will take the
351 * lock again and unmark it.
352 */
353 mutex_unlock(&cma->lock);
354
355 pfn = cma->base_pfn + pageno;
356 mutex_lock(&cma_mutex);
357 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
358 mutex_unlock(&cma_mutex);
359 if (ret == 0) {
360 page = pfn_to_page(pfn);
361 break;
362 } else if (ret != -EBUSY) {
363 clear_cma_bitmap(cma, pfn, count);
364 break;
365 }
366 clear_cma_bitmap(cma, pfn, count);
367 pr_debug("%s(): memory range at %p is busy, retrying\n",
368 __func__, pfn_to_page(pfn));
369 /* try again with a bit different memory target */
370 start = pageno + mask + 1;
371 }
372
373 pr_debug("%s(): returned %p\n", __func__, page);
374 return page;
375} 197}
376 198
377/** 199/**
@@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
387bool dma_release_from_contiguous(struct device *dev, struct page *pages, 209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
388 int count) 210 int count)
389{ 211{
390 struct cma *cma = dev_get_cma_area(dev); 212 return cma_release(dev_get_cma_area(dev), pages, count);
391 unsigned long pfn;
392
393 if (!cma || !pages)
394 return false;
395
396 pr_debug("%s(page %p)\n", __func__, (void *)pages);
397
398 pfn = page_to_pfn(pages);
399
400 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
401 return false;
402
403 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
404
405 free_contig_range(pfn, count);
406 clear_cma_bitmap(cma, pfn, count);
407
408 return true;
409} 213}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 89f752dd8465..a2e13e250bba 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev)
284 * attribute and need to set the online_type. 284 * attribute and need to set the online_type.
285 */ 285 */
286 if (mem->online_type < 0) 286 if (mem->online_type < 0)
287 mem->online_type = ONLINE_KEEP; 287 mem->online_type = MMOP_ONLINE_KEEP;
288 288
289 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 289 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
290 290
@@ -315,23 +315,23 @@ store_mem_state(struct device *dev,
315 if (ret) 315 if (ret)
316 return ret; 316 return ret;
317 317
318 if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) 318 if (sysfs_streq(buf, "online_kernel"))
319 online_type = ONLINE_KERNEL; 319 online_type = MMOP_ONLINE_KERNEL;
320 else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) 320 else if (sysfs_streq(buf, "online_movable"))
321 online_type = ONLINE_MOVABLE; 321 online_type = MMOP_ONLINE_MOVABLE;
322 else if (!strncmp(buf, "online", min_t(int, count, 6))) 322 else if (sysfs_streq(buf, "online"))
323 online_type = ONLINE_KEEP; 323 online_type = MMOP_ONLINE_KEEP;
324 else if (!strncmp(buf, "offline", min_t(int, count, 7))) 324 else if (sysfs_streq(buf, "offline"))
325 online_type = -1; 325 online_type = MMOP_OFFLINE;
326 else { 326 else {
327 ret = -EINVAL; 327 ret = -EINVAL;
328 goto err; 328 goto err;
329 } 329 }
330 330
331 switch (online_type) { 331 switch (online_type) {
332 case ONLINE_KERNEL: 332 case MMOP_ONLINE_KERNEL:
333 case ONLINE_MOVABLE: 333 case MMOP_ONLINE_MOVABLE:
334 case ONLINE_KEEP: 334 case MMOP_ONLINE_KEEP:
335 /* 335 /*
336 * mem->online_type is not protected so there can be a 336 * mem->online_type is not protected so there can be a
337 * race here. However, when racing online, the first 337 * race here. However, when racing online, the first
@@ -342,7 +342,7 @@ store_mem_state(struct device *dev,
342 mem->online_type = online_type; 342 mem->online_type = online_type;
343 ret = device_online(&mem->dev); 343 ret = device_online(&mem->dev);
344 break; 344 break;
345 case -1: 345 case MMOP_OFFLINE:
346 ret = device_offline(&mem->dev); 346 ret = device_offline(&mem->dev);
347 break; 347 break;
348 default: 348 default:
@@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
406 int i, ret; 406 int i, ret;
407 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; 407 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
408 408
409 phys_addr = simple_strtoull(buf, NULL, 0); 409 ret = kstrtoull(buf, 0, &phys_addr);
410 if (ret)
411 return ret;
410 412
411 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) 413 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
412 return -EINVAL; 414 return -EINVAL;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8f7ed9933a7c..c6d3ae05f1ca 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev,
126 nid, K(node_page_state(nid, NR_FILE_PAGES)), 126 nid, K(node_page_state(nid, NR_FILE_PAGES)),
127 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 127 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
128 nid, K(node_page_state(nid, NR_ANON_PAGES)), 128 nid, K(node_page_state(nid, NR_ANON_PAGES)),
129 nid, K(node_page_state(nid, NR_SHMEM)), 129 nid, K(i.sharedram),
130 nid, node_page_state(nid, NR_KERNEL_STACK) * 130 nid, node_page_state(nid, NR_KERNEL_STACK) *
131 THREAD_SIZE / 1024, 131 THREAD_SIZE / 1024,
132 nid, K(node_page_state(nid, NR_PAGETABLE)), 132 nid, K(node_page_state(nid, NR_PAGETABLE)),
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 36e54be402df..dfa4024c448a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
183static int zram_test_flag(struct zram_meta *meta, u32 index, 183static int zram_test_flag(struct zram_meta *meta, u32 index,
184 enum zram_pageflags flag) 184 enum zram_pageflags flag)
185{ 185{
186 return meta->table[index].flags & BIT(flag); 186 return meta->table[index].value & BIT(flag);
187} 187}
188 188
189static void zram_set_flag(struct zram_meta *meta, u32 index, 189static void zram_set_flag(struct zram_meta *meta, u32 index,
190 enum zram_pageflags flag) 190 enum zram_pageflags flag)
191{ 191{
192 meta->table[index].flags |= BIT(flag); 192 meta->table[index].value |= BIT(flag);
193} 193}
194 194
195static void zram_clear_flag(struct zram_meta *meta, u32 index, 195static void zram_clear_flag(struct zram_meta *meta, u32 index,
196 enum zram_pageflags flag) 196 enum zram_pageflags flag)
197{ 197{
198 meta->table[index].flags &= ~BIT(flag); 198 meta->table[index].value &= ~BIT(flag);
199}
200
201static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
202{
203 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
204}
205
206static void zram_set_obj_size(struct zram_meta *meta,
207 u32 index, size_t size)
208{
209 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
210
211 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
199} 212}
200 213
201static inline int is_partial_io(struct bio_vec *bvec) 214static inline int is_partial_io(struct bio_vec *bvec)
@@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
255 goto free_table; 268 goto free_table;
256 } 269 }
257 270
258 rwlock_init(&meta->tb_lock);
259 return meta; 271 return meta;
260 272
261free_table: 273free_table:
@@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
304 flush_dcache_page(page); 316 flush_dcache_page(page);
305} 317}
306 318
307/* NOTE: caller should hold meta->tb_lock with write-side */ 319
320/*
321 * To protect concurrent access to the same index entry,
322 * caller should hold this table index entry's bit_spinlock to
323 * indicate this index entry is accessing.
324 */
308static void zram_free_page(struct zram *zram, size_t index) 325static void zram_free_page(struct zram *zram, size_t index)
309{ 326{
310 struct zram_meta *meta = zram->meta; 327 struct zram_meta *meta = zram->meta;
@@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
324 341
325 zs_free(meta->mem_pool, handle); 342 zs_free(meta->mem_pool, handle);
326 343
327 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size); 344 atomic64_sub(zram_get_obj_size(meta, index),
345 &zram->stats.compr_data_size);
328 atomic64_dec(&zram->stats.pages_stored); 346 atomic64_dec(&zram->stats.pages_stored);
329 347
330 meta->table[index].handle = 0; 348 meta->table[index].handle = 0;
331 meta->table[index].size = 0; 349 zram_set_obj_size(meta, index, 0);
332} 350}
333 351
334static int zram_decompress_page(struct zram *zram, char *mem, u32 index) 352static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
@@ -337,14 +355,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
337 unsigned char *cmem; 355 unsigned char *cmem;
338 struct zram_meta *meta = zram->meta; 356 struct zram_meta *meta = zram->meta;
339 unsigned long handle; 357 unsigned long handle;
340 u16 size; 358 size_t size;
341 359
342 read_lock(&meta->tb_lock); 360 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
343 handle = meta->table[index].handle; 361 handle = meta->table[index].handle;
344 size = meta->table[index].size; 362 size = zram_get_obj_size(meta, index);
345 363
346 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { 364 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
347 read_unlock(&meta->tb_lock); 365 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
348 clear_page(mem); 366 clear_page(mem);
349 return 0; 367 return 0;
350 } 368 }
@@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
355 else 373 else
356 ret = zcomp_decompress(zram->comp, cmem, size, mem); 374 ret = zcomp_decompress(zram->comp, cmem, size, mem);
357 zs_unmap_object(meta->mem_pool, handle); 375 zs_unmap_object(meta->mem_pool, handle);
358 read_unlock(&meta->tb_lock); 376 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
359 377
360 /* Should NEVER happen. Return bio error if it does. */ 378 /* Should NEVER happen. Return bio error if it does. */
361 if (unlikely(ret)) { 379 if (unlikely(ret)) {
@@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
376 struct zram_meta *meta = zram->meta; 394 struct zram_meta *meta = zram->meta;
377 page = bvec->bv_page; 395 page = bvec->bv_page;
378 396
379 read_lock(&meta->tb_lock); 397 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
380 if (unlikely(!meta->table[index].handle) || 398 if (unlikely(!meta->table[index].handle) ||
381 zram_test_flag(meta, index, ZRAM_ZERO)) { 399 zram_test_flag(meta, index, ZRAM_ZERO)) {
382 read_unlock(&meta->tb_lock); 400 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
383 handle_zero_page(bvec); 401 handle_zero_page(bvec);
384 return 0; 402 return 0;
385 } 403 }
386 read_unlock(&meta->tb_lock); 404 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
387 405
388 if (is_partial_io(bvec)) 406 if (is_partial_io(bvec))
389 /* Use a temporary buffer to decompress the page */ 407 /* Use a temporary buffer to decompress the page */
@@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
461 if (page_zero_filled(uncmem)) { 479 if (page_zero_filled(uncmem)) {
462 kunmap_atomic(user_mem); 480 kunmap_atomic(user_mem);
463 /* Free memory associated with this sector now. */ 481 /* Free memory associated with this sector now. */
464 write_lock(&zram->meta->tb_lock); 482 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
465 zram_free_page(zram, index); 483 zram_free_page(zram, index);
466 zram_set_flag(meta, index, ZRAM_ZERO); 484 zram_set_flag(meta, index, ZRAM_ZERO);
467 write_unlock(&zram->meta->tb_lock); 485 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
468 486
469 atomic64_inc(&zram->stats.zero_pages); 487 atomic64_inc(&zram->stats.zero_pages);
470 ret = 0; 488 ret = 0;
@@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
514 * Free memory associated with this sector 532 * Free memory associated with this sector
515 * before overwriting unused sectors. 533 * before overwriting unused sectors.
516 */ 534 */
517 write_lock(&zram->meta->tb_lock); 535 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
518 zram_free_page(zram, index); 536 zram_free_page(zram, index);
519 537
520 meta->table[index].handle = handle; 538 meta->table[index].handle = handle;
521 meta->table[index].size = clen; 539 zram_set_obj_size(meta, index, clen);
522 write_unlock(&zram->meta->tb_lock); 540 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
523 541
524 /* Update stats */ 542 /* Update stats */
525 atomic64_add(clen, &zram->stats.compr_data_size); 543 atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
560 int offset, struct bio *bio) 578 int offset, struct bio *bio)
561{ 579{
562 size_t n = bio->bi_iter.bi_size; 580 size_t n = bio->bi_iter.bi_size;
581 struct zram_meta *meta = zram->meta;
563 582
564 /* 583 /*
565 * zram manages data in physical block size units. Because logical block 584 * zram manages data in physical block size units. Because logical block
@@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
580 } 599 }
581 600
582 while (n >= PAGE_SIZE) { 601 while (n >= PAGE_SIZE) {
583 /* 602 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
584 * Discard request can be large so the lock hold times could be
585 * lengthy. So take the lock once per page.
586 */
587 write_lock(&zram->meta->tb_lock);
588 zram_free_page(zram, index); 603 zram_free_page(zram, index);
589 write_unlock(&zram->meta->tb_lock); 604 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
590 index++; 605 index++;
591 n -= PAGE_SIZE; 606 n -= PAGE_SIZE;
592 } 607 }
@@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
821 zram = bdev->bd_disk->private_data; 836 zram = bdev->bd_disk->private_data;
822 meta = zram->meta; 837 meta = zram->meta;
823 838
824 write_lock(&meta->tb_lock); 839 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
825 zram_free_page(zram, index); 840 zram_free_page(zram, index);
826 write_unlock(&meta->tb_lock); 841 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
827 atomic64_inc(&zram->stats.notify_free); 842 atomic64_inc(&zram->stats.notify_free);
828} 843}
829 844
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 7f21c145e317..5b0afde729cd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
43/*-- End of configurable params */ 43/*-- End of configurable params */
44 44
45#define SECTOR_SHIFT 9 45#define SECTOR_SHIFT 9
46#define SECTOR_SIZE (1 << SECTOR_SHIFT)
47#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 46#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
48#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) 47#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
49#define ZRAM_LOGICAL_BLOCK_SHIFT 12 48#define ZRAM_LOGICAL_BLOCK_SHIFT 12
@@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
51#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ 50#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
52 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) 51 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
53 52
54/* Flags for zram pages (table[page_no].flags) */ 53
54/*
55 * The lower ZRAM_FLAG_SHIFT bits of table.value is for
56 * object size (excluding header), the higher bits is for
57 * zram_pageflags.
58 *
59 * zram is mainly used for memory efficiency so we want to keep memory
60 * footprint small so we can squeeze size and flags into a field.
61 * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
62 * the higher bits is for zram_pageflags.
63 */
64#define ZRAM_FLAG_SHIFT 24
65
66/* Flags for zram pages (table[page_no].value) */
55enum zram_pageflags { 67enum zram_pageflags {
56 /* Page consists entirely of zeros */ 68 /* Page consists entirely of zeros */
57 ZRAM_ZERO, 69 ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
70 ZRAM_ACCESS, /* page in now accessed */
58 71
59 __NR_ZRAM_PAGEFLAGS, 72 __NR_ZRAM_PAGEFLAGS,
60}; 73};
@@ -62,11 +75,10 @@ enum zram_pageflags {
62/*-- Data structures */ 75/*-- Data structures */
63 76
64/* Allocated for each disk page */ 77/* Allocated for each disk page */
65struct table { 78struct zram_table_entry {
66 unsigned long handle; 79 unsigned long handle;
67 u16 size; /* object size (excluding header) */ 80 unsigned long value;
68 u8 flags; 81};
69} __aligned(4);
70 82
71struct zram_stats { 83struct zram_stats {
72 atomic64_t compr_data_size; /* compressed size of pages stored */ 84 atomic64_t compr_data_size; /* compressed size of pages stored */
@@ -81,8 +93,7 @@ struct zram_stats {
81}; 93};
82 94
83struct zram_meta { 95struct zram_meta {
84 rwlock_t tb_lock; /* protect table */ 96 struct zram_table_entry *table;
85 struct table *table;
86 struct zs_pool *mem_pool; 97 struct zs_pool *mem_pool;
87}; 98};
88 99
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 17cf96c45f2b..79f18e6d9c4f 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -286,7 +286,11 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
286{ 286{
287 struct firmware_map_entry *entry; 287 struct firmware_map_entry *entry;
288 288
289 entry = firmware_map_find_entry_bootmem(start, end, type); 289 entry = firmware_map_find_entry(start, end - 1, type);
290 if (entry)
291 return 0;
292
293 entry = firmware_map_find_entry_bootmem(start, end - 1, type);
290 if (!entry) { 294 if (!entry) {
291 entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); 295 entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
292 if (!entry) 296 if (!entry)
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7e4bae760e27..c3b80fd65d62 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
125 parent = &entry->head; 125 parent = &entry->head;
126 } 126 }
127 if (parent) { 127 if (parent) {
128 hlist_add_after_rcu(parent, &item->head); 128 hlist_add_behind_rcu(&item->head, parent);
129 } else { 129 } else {
130 hlist_add_head_rcu(&item->head, h_list); 130 hlist_add_head_rcu(&item->head, h_list);
131 } 131 }
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index ae208f612198..cccef87963e0 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -688,7 +688,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
688DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm, 688DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
689 atk_debugfs_gitm_get, 689 atk_debugfs_gitm_get,
690 NULL, 690 NULL,
691 "0x%08llx\n") 691 "0x%08llx\n");
692 692
693static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj) 693static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
694{ 694{
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 0bf1e4edf04d..6590558d1d31 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
42static __init int map_switcher(void) 42static __init int map_switcher(void)
43{ 43{
44 int i, err; 44 int i, err;
45 struct page **pagep;
46 45
47 /* 46 /*
48 * Map the Switcher in to high memory. 47 * Map the Switcher in to high memory.
@@ -110,11 +109,9 @@ static __init int map_switcher(void)
110 * This code actually sets up the pages we've allocated to appear at 109 * This code actually sets up the pages we've allocated to appear at
111 * switcher_addr. map_vm_area() takes the vma we allocated above, the 110 * switcher_addr. map_vm_area() takes the vma we allocated above, the
112 * kind of pages we're mapping (kernel pages), and a pointer to our 111 * kind of pages we're mapping (kernel pages), and a pointer to our
113 * array of struct pages. It increments that pointer, but we don't 112 * array of struct pages.
114 * care.
115 */ 113 */
116 pagep = lg_switcher_pages; 114 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
117 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
118 if (err) { 115 if (err) {
119 printk("lguest: map_vm_area failed: %i\n", err); 116 printk("lguest: map_vm_area failed: %i\n", err);
120 goto free_vma; 117 goto free_vma;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 681a9e81ff51..e8ba7470700a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1948,7 +1948,7 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
1948 1948
1949 /* add filter to the list */ 1949 /* add filter to the list */
1950 if (parent) 1950 if (parent)
1951 hlist_add_after(&parent->fdir_node, &input->fdir_node); 1951 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
1952 else 1952 else
1953 hlist_add_head(&input->fdir_node, 1953 hlist_add_head(&input->fdir_node,
1954 &pf->fdir_filter_list); 1954 &pf->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 94a1c07efeb0..e4100b5737b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2517,7 +2517,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2517 2517
2518 /* add filter to the list */ 2518 /* add filter to the list */
2519 if (parent) 2519 if (parent)
2520 hlist_add_after(&parent->fdir_node, &input->fdir_node); 2520 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2521 else 2521 else
2522 hlist_add_head(&input->fdir_node, 2522 hlist_add_head(&input->fdir_node,
2523 &adapter->fdir_filter_list); 2523 &adapter->fdir_filter_list);
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 02b0379ae550..4f34dc0095b5 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
585 585
586 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 586 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
587 int ret; 587 int ret;
588 struct page **page_array_ptr;
589 588
590 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 589 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
591 590
@@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
598 } 597 }
599 tmp_area.addr = page_addr; 598 tmp_area.addr = page_addr;
600 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 599 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
601 page_array_ptr = page; 600 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
602 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
603 if (ret) { 601 if (ret) {
604 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 602 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
605 proc->pid, page_addr); 603 proc->pid, page_addr);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 5dde79418297..8ef1deb59d4a 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -351,7 +351,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
351 cfs_hash_dhead_t, dh_head); 351 cfs_hash_dhead_t, dh_head);
352 352
353 if (dh->dh_tail != NULL) /* not empty */ 353 if (dh->dh_tail != NULL) /* not empty */
354 hlist_add_after(dh->dh_tail, hnode); 354 hlist_add_behind(hnode, dh->dh_tail);
355 else /* empty list */ 355 else /* empty list */
356 hlist_add_head(hnode, &dh->dh_head); 356 hlist_add_head(hnode, &dh->dh_head);
357 dh->dh_tail = hnode; 357 dh->dh_tail = hnode;
@@ -406,7 +406,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
406 cfs_hash_dhead_dep_t, dd_head); 406 cfs_hash_dhead_dep_t, dd_head);
407 407
408 if (dh->dd_tail != NULL) /* not empty */ 408 if (dh->dd_tail != NULL) /* not empty */
409 hlist_add_after(dh->dd_tail, hnode); 409 hlist_add_behind(hnode, dh->dd_tail);
410 else /* empty list */ 410 else /* empty list */
411 hlist_add_head(hnode, &dh->dd_head); 411 hlist_add_head(hnode, &dh->dd_head);
412 dh->dd_tail = hnode; 412 dh->dd_tail = hnode;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 454b65898e2c..42bad18c66c9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -355,7 +355,7 @@ static struct sysrq_key_op sysrq_term_op = {
355 355
356static void moom_callback(struct work_struct *ignored) 356static void moom_callback(struct work_struct *ignored)
357{ 357{
358 out_of_memory(node_zonelist(first_online_node, GFP_KERNEL), GFP_KERNEL, 358 out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
359 0, NULL, true); 359 0, NULL, true);
360} 360}
361 361