aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 21:44:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 21:44:44 -0500
commitaa2e7100e38880db7907cb2b7ec6267b2b243771 (patch)
tree67f9d2479365398c07833d3fc4f794861f7da5b1
parent2def2ef2ae5f3990aabdbe8a755911902707d268 (diff)
parent7c094fd698de2f333fa39b6da213f880d40b9bfe (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge misc fixes from Andrew Morton: "A few hotfixes and various leftovers which were awaiting other merges. Mainly movement of zram into mm/" * emailed patches fron Andrew Morton <akpm@linux-foundation.org>: (25 commits) memcg: fix mutex not unlocked on memcg_create_kmem_cache fail path Documentation/filesystems/vfs.txt: update file_operations documentation mm, oom: base root bonus on current usage mm: don't lose the SOFT_DIRTY flag on mprotect mm/slub.c: fix page->_count corruption (again) mm/mempolicy.c: fix mempolicy printing in numa_maps zram: remove zram->lock in read path and change it with mutex zram: remove workqueue for freeing removed pending slot zram: introduce zram->tb_lock zram: use atomic operation for stat zram: remove unnecessary free zram: delay pending free request in read path zram: fix race between reset and flushing pending work zsmalloc: add maintainers zram: add zram maintainers zsmalloc: add copyright zram: add copyright zram: remove old private project comment zram: promote zram from staging zsmalloc: move it under mm ...
-rw-r--r--Documentation/blockdev/zram.txt (renamed from drivers/staging/zram/zram.txt)6
-rw-r--r--Documentation/filesystems/proc.txt4
-rw-r--r--Documentation/filesystems/vfs.txt12
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/zram/Kconfig (renamed from drivers/staging/zram/Kconfig)1
-rw-r--r--drivers/block/zram/Makefile (renamed from drivers/staging/zram/Makefile)0
-rw-r--r--drivers/block/zram/zram_drv.c (renamed from drivers/staging/zram/zram_drv.c)128
-rw-r--r--drivers/block/zram/zram_drv.h (renamed from drivers/staging/zram/zram_drv.h)32
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/zsmalloc/Kconfig24
-rw-r--r--drivers/staging/zsmalloc/Makefile3
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/zsmalloc.h (renamed from drivers/staging/zsmalloc/zsmalloc.h)1
-rw-r--r--kernel/smp.c68
-rw-r--r--mm/Kconfig25
-rw-r--r--mm/Makefile1
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/slub.c19
-rw-r--r--mm/zsmalloc.c (renamed from drivers/staging/zsmalloc/zsmalloc-main.c)4
29 files changed, 154 insertions, 231 deletions
diff --git a/drivers/staging/zram/zram.txt b/Documentation/blockdev/zram.txt
index 765d790ae831..2eccddffa6c8 100644
--- a/drivers/staging/zram/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -1,8 +1,6 @@
1zram: Compressed RAM based block devices 1zram: Compressed RAM based block devices
2---------------------------------------- 2----------------------------------------
3 3
4Project home: http://compcache.googlecode.com/
5
6* Introduction 4* Introduction
7 5
8The zram module creates RAM based block devices named /dev/zram<id> 6The zram module creates RAM based block devices named /dev/zram<id>
@@ -69,9 +67,5 @@ Following shows a typical sequence of steps for using zram.
69 resets the disksize to zero. You must set the disksize again 67 resets the disksize to zero. You must set the disksize again
70 before reusing the device. 68 before reusing the device.
71 69
72Please report any problems at:
73 - Mailing list: linux-mm-cc at laptop dot org
74 - Issue tracker: http://code.google.com/p/compcache/issues/list
75
76Nitin Gupta 70Nitin Gupta
77ngupta@vflare.org 71ngupta@vflare.org
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 31f76178c987..f00bee144add 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1386,8 +1386,8 @@ may allocate from based on an estimation of its current memory and swap use.
1386For example, if a task is using all allowed memory, its badness score will be 1386For example, if a task is using all allowed memory, its badness score will be
13871000. If it is using half of its allowed memory, its score will be 500. 13871000. If it is using half of its allowed memory, its score will be 500.
1388 1388
1389There is an additional factor included in the badness score: root 1389There is an additional factor included in the badness score: the current memory
1390processes are given 3% extra memory over other tasks. 1390and swap usage is discounted by 3% for root processes.
1391 1391
1392The amount of "allowed" memory depends on the context in which the oom killer 1392The amount of "allowed" memory depends on the context in which the oom killer
1393was called. If it is due to the memory assigned to the allocating task's cpuset 1393was called. If it is due to the memory assigned to the allocating task's cpuset
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index deb48b5fd883..c53784c119c8 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -782,7 +782,7 @@ struct file_operations
782---------------------- 782----------------------
783 783
784This describes how the VFS can manipulate an open file. As of kernel 784This describes how the VFS can manipulate an open file. As of kernel
7853.5, the following members are defined: 7853.12, the following members are defined:
786 786
787struct file_operations { 787struct file_operations {
788 struct module *owner; 788 struct module *owner;
@@ -803,9 +803,6 @@ struct file_operations {
803 int (*aio_fsync) (struct kiocb *, int datasync); 803 int (*aio_fsync) (struct kiocb *, int datasync);
804 int (*fasync) (int, struct file *, int); 804 int (*fasync) (int, struct file *, int);
805 int (*lock) (struct file *, int, struct file_lock *); 805 int (*lock) (struct file *, int, struct file_lock *);
806 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
807 ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
808 ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
809 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); 806 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
810 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 807 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
811 int (*check_flags)(int); 808 int (*check_flags)(int);
@@ -814,6 +811,7 @@ struct file_operations {
814 ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int); 811 ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
815 int (*setlease)(struct file *, long arg, struct file_lock **); 812 int (*setlease)(struct file *, long arg, struct file_lock **);
816 long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len); 813 long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
814 int (*show_fdinfo)(struct seq_file *m, struct file *f);
817}; 815};
818 816
819Again, all methods are called without any locks being held, unless 817Again, all methods are called without any locks being held, unless
@@ -864,12 +862,6 @@ otherwise noted.
864 lock: called by the fcntl(2) system call for F_GETLK, F_SETLK, and F_SETLKW 862 lock: called by the fcntl(2) system call for F_GETLK, F_SETLK, and F_SETLKW
865 commands 863 commands
866 864
867 readv: called by the readv(2) system call
868
869 writev: called by the writev(2) system call
870
871 sendfile: called by the sendfile(2) system call
872
873 get_unmapped_area: called by the mmap(2) system call 865 get_unmapped_area: called by the mmap(2) system call
874 866
875 check_flags: called by the fcntl(2) system call for F_SETFL command 867 check_flags: called by the fcntl(2) system call for F_SETFL command
diff --git a/MAINTAINERS b/MAINTAINERS
index a31a6e3e199f..b5795f031b3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9740,11 +9740,27 @@ T: Mercurial http://linuxtv.org/hg/v4l-dvb
9740S: Odd Fixes 9740S: Odd Fixes
9741F: drivers/media/pci/zoran/ 9741F: drivers/media/pci/zoran/
9742 9742
9743ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
9744M: Minchan Kim <minchan@kernel.org>
9745M: Nitin Gupta <ngupta@vflare.org>
9746L: linux-kernel@vger.kernel.org
9747S: Maintained
9748F: drivers/block/zram/
9749F: Documentation/blockdev/zram.txt
9750
9743ZS DECSTATION Z85C30 SERIAL DRIVER 9751ZS DECSTATION Z85C30 SERIAL DRIVER
9744M: "Maciej W. Rozycki" <macro@linux-mips.org> 9752M: "Maciej W. Rozycki" <macro@linux-mips.org>
9745S: Maintained 9753S: Maintained
9746F: drivers/tty/serial/zs.* 9754F: drivers/tty/serial/zs.*
9747 9755
9756ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
9757M: Minchan Kim <minchan@kernel.org>
9758M: Nitin Gupta <ngupta@vflare.org>
9759L: linux-mm@kvack.org
9760S: Maintained
9761F: mm/zsmalloc.c
9762F: include/linux/zsmalloc.h
9763
9748ZSWAP COMPRESSED SWAP CACHING 9764ZSWAP COMPRESSED SWAP CACHING
9749M: Seth Jennings <sjenning@linux.vnet.ibm.com> 9765M: Seth Jennings <sjenning@linux.vnet.ibm.com>
9750L: linux-mm@kvack.org 9766L: linux-mm@kvack.org
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index a83aa44bb1fb..1aa9ccd43223 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -121,7 +121,8 @@
121 121
122/* Set of bits not changed in pte_modify */ 122/* Set of bits not changed in pte_modify */
123#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 123#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
124 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) 124 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
125 _PAGE_SOFT_DIRTY)
125#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) 126#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
126 127
127#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 128#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 9ffa90c6201c..014a1cfc41c5 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -108,6 +108,8 @@ source "drivers/block/paride/Kconfig"
108 108
109source "drivers/block/mtip32xx/Kconfig" 109source "drivers/block/mtip32xx/Kconfig"
110 110
111source "drivers/block/zram/Kconfig"
112
111config BLK_CPQ_DA 113config BLK_CPQ_DA
112 tristate "Compaq SMART2 support" 114 tristate "Compaq SMART2 support"
113 depends on PCI && VIRT_TO_BUS && 0 115 depends on PCI && VIRT_TO_BUS && 0
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 816d979c3266..02b688d1438d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
42 42
43obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ 43obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
44obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o 44obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
45obj-$(CONFIG_ZRAM) += zram/
45 46
46nvme-y := nvme-core.o nvme-scsi.o 47nvme-y := nvme-core.o nvme-scsi.o
47skd-y := skd_main.o 48skd-y := skd_main.o
diff --git a/drivers/staging/zram/Kconfig b/drivers/block/zram/Kconfig
index 983314c41349..3450be850399 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -14,7 +14,6 @@ config ZRAM
14 disks and maybe many more. 14 disks and maybe many more.
15 15
16 See zram.txt for more information. 16 See zram.txt for more information.
17 Project home: <https://compcache.googlecode.com/>
18 17
19config ZRAM_DEBUG 18config ZRAM_DEBUG
20 bool "Compressed RAM block device debug support" 19 bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/block/zram/Makefile
index cb0f9ced6a93..cb0f9ced6a93 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/block/zram/Makefile
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 108f2733106d..011e55d820b1 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2,6 +2,7 @@
2 * Compressed RAM block device 2 * Compressed RAM block device
3 * 3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
5 * 6 *
6 * This code is released using a dual license strategy: BSD/GPL 7 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements. 8 * You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
9 * Released under the terms of 3-clause BSD License 10 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0 11 * Released under the terms of GNU General Public License Version 2.0
11 * 12 *
12 * Project home: http://compcache.googlecode.com
13 */ 13 */
14 14
15#define KMSG_COMPONENT "zram" 15#define KMSG_COMPONENT "zram"
@@ -104,7 +104,7 @@ static ssize_t zero_pages_show(struct device *dev,
104{ 104{
105 struct zram *zram = dev_to_zram(dev); 105 struct zram *zram = dev_to_zram(dev);
106 106
107 return sprintf(buf, "%u\n", zram->stats.pages_zero); 107 return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
108} 108}
109 109
110static ssize_t orig_data_size_show(struct device *dev, 110static ssize_t orig_data_size_show(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t orig_data_size_show(struct device *dev,
113 struct zram *zram = dev_to_zram(dev); 113 struct zram *zram = dev_to_zram(dev);
114 114
115 return sprintf(buf, "%llu\n", 115 return sprintf(buf, "%llu\n",
116 (u64)(zram->stats.pages_stored) << PAGE_SHIFT); 116 (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
117} 117}
118 118
119static ssize_t compr_data_size_show(struct device *dev, 119static ssize_t compr_data_size_show(struct device *dev,
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev,
140 return sprintf(buf, "%llu\n", val); 140 return sprintf(buf, "%llu\n", val);
141} 141}
142 142
143/* flag operations needs meta->tb_lock */
143static int zram_test_flag(struct zram_meta *meta, u32 index, 144static int zram_test_flag(struct zram_meta *meta, u32 index,
144 enum zram_pageflags flag) 145 enum zram_pageflags flag)
145{ 146{
@@ -228,6 +229,8 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
228 goto free_table; 229 goto free_table;
229 } 230 }
230 231
232 rwlock_init(&meta->tb_lock);
233 mutex_init(&meta->buffer_lock);
231 return meta; 234 return meta;
232 235
233free_table: 236free_table:
@@ -280,6 +283,7 @@ static void handle_zero_page(struct bio_vec *bvec)
280 flush_dcache_page(page); 283 flush_dcache_page(page);
281} 284}
282 285
286/* NOTE: caller should hold meta->tb_lock with write-side */
283static void zram_free_page(struct zram *zram, size_t index) 287static void zram_free_page(struct zram *zram, size_t index)
284{ 288{
285 struct zram_meta *meta = zram->meta; 289 struct zram_meta *meta = zram->meta;
@@ -293,21 +297,21 @@ static void zram_free_page(struct zram *zram, size_t index)
293 */ 297 */
294 if (zram_test_flag(meta, index, ZRAM_ZERO)) { 298 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
295 zram_clear_flag(meta, index, ZRAM_ZERO); 299 zram_clear_flag(meta, index, ZRAM_ZERO);
296 zram->stats.pages_zero--; 300 atomic_dec(&zram->stats.pages_zero);
297 } 301 }
298 return; 302 return;
299 } 303 }
300 304
301 if (unlikely(size > max_zpage_size)) 305 if (unlikely(size > max_zpage_size))
302 zram->stats.bad_compress--; 306 atomic_dec(&zram->stats.bad_compress);
303 307
304 zs_free(meta->mem_pool, handle); 308 zs_free(meta->mem_pool, handle);
305 309
306 if (size <= PAGE_SIZE / 2) 310 if (size <= PAGE_SIZE / 2)
307 zram->stats.good_compress--; 311 atomic_dec(&zram->stats.good_compress);
308 312
309 atomic64_sub(meta->table[index].size, &zram->stats.compr_size); 313 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
310 zram->stats.pages_stored--; 314 atomic_dec(&zram->stats.pages_stored);
311 315
312 meta->table[index].handle = 0; 316 meta->table[index].handle = 0;
313 meta->table[index].size = 0; 317 meta->table[index].size = 0;
@@ -319,20 +323,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
319 size_t clen = PAGE_SIZE; 323 size_t clen = PAGE_SIZE;
320 unsigned char *cmem; 324 unsigned char *cmem;
321 struct zram_meta *meta = zram->meta; 325 struct zram_meta *meta = zram->meta;
322 unsigned long handle = meta->table[index].handle; 326 unsigned long handle;
327 u16 size;
328
329 read_lock(&meta->tb_lock);
330 handle = meta->table[index].handle;
331 size = meta->table[index].size;
323 332
324 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { 333 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
334 read_unlock(&meta->tb_lock);
325 clear_page(mem); 335 clear_page(mem);
326 return 0; 336 return 0;
327 } 337 }
328 338
329 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 339 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
330 if (meta->table[index].size == PAGE_SIZE) 340 if (size == PAGE_SIZE)
331 copy_page(mem, cmem); 341 copy_page(mem, cmem);
332 else 342 else
333 ret = lzo1x_decompress_safe(cmem, meta->table[index].size, 343 ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
334 mem, &clen);
335 zs_unmap_object(meta->mem_pool, handle); 344 zs_unmap_object(meta->mem_pool, handle);
345 read_unlock(&meta->tb_lock);
336 346
337 /* Should NEVER happen. Return bio error if it does. */ 347 /* Should NEVER happen. Return bio error if it does. */
338 if (unlikely(ret != LZO_E_OK)) { 348 if (unlikely(ret != LZO_E_OK)) {
@@ -353,11 +363,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
353 struct zram_meta *meta = zram->meta; 363 struct zram_meta *meta = zram->meta;
354 page = bvec->bv_page; 364 page = bvec->bv_page;
355 365
366 read_lock(&meta->tb_lock);
356 if (unlikely(!meta->table[index].handle) || 367 if (unlikely(!meta->table[index].handle) ||
357 zram_test_flag(meta, index, ZRAM_ZERO)) { 368 zram_test_flag(meta, index, ZRAM_ZERO)) {
369 read_unlock(&meta->tb_lock);
358 handle_zero_page(bvec); 370 handle_zero_page(bvec);
359 return 0; 371 return 0;
360 } 372 }
373 read_unlock(&meta->tb_lock);
361 374
362 if (is_partial_io(bvec)) 375 if (is_partial_io(bvec))
363 /* Use a temporary buffer to decompress the page */ 376 /* Use a temporary buffer to decompress the page */
@@ -400,6 +413,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
400 struct page *page; 413 struct page *page;
401 unsigned char *user_mem, *cmem, *src, *uncmem = NULL; 414 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
402 struct zram_meta *meta = zram->meta; 415 struct zram_meta *meta = zram->meta;
416 bool locked = false;
403 417
404 page = bvec->bv_page; 418 page = bvec->bv_page;
405 src = meta->compress_buffer; 419 src = meta->compress_buffer;
@@ -419,6 +433,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
419 goto out; 433 goto out;
420 } 434 }
421 435
436 mutex_lock(&meta->buffer_lock);
437 locked = true;
422 user_mem = kmap_atomic(page); 438 user_mem = kmap_atomic(page);
423 439
424 if (is_partial_io(bvec)) { 440 if (is_partial_io(bvec)) {
@@ -433,25 +449,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
433 if (page_zero_filled(uncmem)) { 449 if (page_zero_filled(uncmem)) {
434 kunmap_atomic(user_mem); 450 kunmap_atomic(user_mem);
435 /* Free memory associated with this sector now. */ 451 /* Free memory associated with this sector now. */
452 write_lock(&zram->meta->tb_lock);
436 zram_free_page(zram, index); 453 zram_free_page(zram, index);
437
438 zram->stats.pages_zero++;
439 zram_set_flag(meta, index, ZRAM_ZERO); 454 zram_set_flag(meta, index, ZRAM_ZERO);
455 write_unlock(&zram->meta->tb_lock);
456
457 atomic_inc(&zram->stats.pages_zero);
440 ret = 0; 458 ret = 0;
441 goto out; 459 goto out;
442 } 460 }
443 461
444 /*
445 * zram_slot_free_notify could miss free so that let's
446 * double check.
447 */
448 if (unlikely(meta->table[index].handle ||
449 zram_test_flag(meta, index, ZRAM_ZERO)))
450 zram_free_page(zram, index);
451
452 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, 462 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
453 meta->compress_workmem); 463 meta->compress_workmem);
454
455 if (!is_partial_io(bvec)) { 464 if (!is_partial_io(bvec)) {
456 kunmap_atomic(user_mem); 465 kunmap_atomic(user_mem);
457 user_mem = NULL; 466 user_mem = NULL;
@@ -464,7 +473,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
464 } 473 }
465 474
466 if (unlikely(clen > max_zpage_size)) { 475 if (unlikely(clen > max_zpage_size)) {
467 zram->stats.bad_compress++; 476 atomic_inc(&zram->stats.bad_compress);
468 clen = PAGE_SIZE; 477 clen = PAGE_SIZE;
469 src = NULL; 478 src = NULL;
470 if (is_partial_io(bvec)) 479 if (is_partial_io(bvec))
@@ -494,18 +503,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
494 * Free memory associated with this sector 503 * Free memory associated with this sector
495 * before overwriting unused sectors. 504 * before overwriting unused sectors.
496 */ 505 */
506 write_lock(&zram->meta->tb_lock);
497 zram_free_page(zram, index); 507 zram_free_page(zram, index);
498 508
499 meta->table[index].handle = handle; 509 meta->table[index].handle = handle;
500 meta->table[index].size = clen; 510 meta->table[index].size = clen;
511 write_unlock(&zram->meta->tb_lock);
501 512
502 /* Update stats */ 513 /* Update stats */
503 atomic64_add(clen, &zram->stats.compr_size); 514 atomic64_add(clen, &zram->stats.compr_size);
504 zram->stats.pages_stored++; 515 atomic_inc(&zram->stats.pages_stored);
505 if (clen <= PAGE_SIZE / 2) 516 if (clen <= PAGE_SIZE / 2)
506 zram->stats.good_compress++; 517 atomic_inc(&zram->stats.good_compress);
507 518
508out: 519out:
520 if (locked)
521 mutex_unlock(&meta->buffer_lock);
509 if (is_partial_io(bvec)) 522 if (is_partial_io(bvec))
510 kfree(uncmem); 523 kfree(uncmem);
511 524
@@ -514,36 +527,15 @@ out:
514 return ret; 527 return ret;
515} 528}
516 529
517static void handle_pending_slot_free(struct zram *zram)
518{
519 struct zram_slot_free *free_rq;
520
521 spin_lock(&zram->slot_free_lock);
522 while (zram->slot_free_rq) {
523 free_rq = zram->slot_free_rq;
524 zram->slot_free_rq = free_rq->next;
525 zram_free_page(zram, free_rq->index);
526 kfree(free_rq);
527 }
528 spin_unlock(&zram->slot_free_lock);
529}
530
531static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, 530static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
532 int offset, struct bio *bio, int rw) 531 int offset, struct bio *bio, int rw)
533{ 532{
534 int ret; 533 int ret;
535 534
536 if (rw == READ) { 535 if (rw == READ)
537 down_read(&zram->lock);
538 handle_pending_slot_free(zram);
539 ret = zram_bvec_read(zram, bvec, index, offset, bio); 536 ret = zram_bvec_read(zram, bvec, index, offset, bio);
540 up_read(&zram->lock); 537 else
541 } else {
542 down_write(&zram->lock);
543 handle_pending_slot_free(zram);
544 ret = zram_bvec_write(zram, bvec, index, offset); 538 ret = zram_bvec_write(zram, bvec, index, offset);
545 up_write(&zram->lock);
546 }
547 539
548 return ret; 540 return ret;
549} 541}
@@ -553,8 +545,6 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
553 size_t index; 545 size_t index;
554 struct zram_meta *meta; 546 struct zram_meta *meta;
555 547
556 flush_work(&zram->free_work);
557
558 down_write(&zram->init_lock); 548 down_write(&zram->init_lock);
559 if (!zram->init_done) { 549 if (!zram->init_done) {
560 up_write(&zram->init_lock); 550 up_write(&zram->init_lock);
@@ -762,40 +752,19 @@ error:
762 bio_io_error(bio); 752 bio_io_error(bio);
763} 753}
764 754
765static void zram_slot_free(struct work_struct *work)
766{
767 struct zram *zram;
768
769 zram = container_of(work, struct zram, free_work);
770 down_write(&zram->lock);
771 handle_pending_slot_free(zram);
772 up_write(&zram->lock);
773}
774
775static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
776{
777 spin_lock(&zram->slot_free_lock);
778 free_rq->next = zram->slot_free_rq;
779 zram->slot_free_rq = free_rq;
780 spin_unlock(&zram->slot_free_lock);
781}
782
783static void zram_slot_free_notify(struct block_device *bdev, 755static void zram_slot_free_notify(struct block_device *bdev,
784 unsigned long index) 756 unsigned long index)
785{ 757{
786 struct zram *zram; 758 struct zram *zram;
787 struct zram_slot_free *free_rq; 759 struct zram_meta *meta;
788 760
789 zram = bdev->bd_disk->private_data; 761 zram = bdev->bd_disk->private_data;
790 atomic64_inc(&zram->stats.notify_free); 762 meta = zram->meta;
791
792 free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
793 if (!free_rq)
794 return;
795 763
796 free_rq->index = index; 764 write_lock(&meta->tb_lock);
797 add_slot_free(zram, free_rq); 765 zram_free_page(zram, index);
798 schedule_work(&zram->free_work); 766 write_unlock(&meta->tb_lock);
767 atomic64_inc(&zram->stats.notify_free);
799} 768}
800 769
801static const struct block_device_operations zram_devops = { 770static const struct block_device_operations zram_devops = {
@@ -839,13 +808,8 @@ static int create_device(struct zram *zram, int device_id)
839{ 808{
840 int ret = -ENOMEM; 809 int ret = -ENOMEM;
841 810
842 init_rwsem(&zram->lock);
843 init_rwsem(&zram->init_lock); 811 init_rwsem(&zram->init_lock);
844 812
845 INIT_WORK(&zram->free_work, zram_slot_free);
846 spin_lock_init(&zram->slot_free_lock);
847 zram->slot_free_rq = NULL;
848
849 zram->queue = blk_alloc_queue(GFP_KERNEL); 813 zram->queue = blk_alloc_queue(GFP_KERNEL);
850 if (!zram->queue) { 814 if (!zram->queue) {
851 pr_err("Error allocating disk queue for device %d\n", 815 pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 97a3acf6ab76..ad8aa35bae00 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -2,6 +2,7 @@
2 * Compressed RAM block device 2 * Compressed RAM block device
3 * 3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
5 * 6 *
6 * This code is released using a dual license strategy: BSD/GPL 7 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements. 8 * You can choose the licence that better fits your requirements.
@@ -9,7 +10,6 @@
9 * Released under the terms of 3-clause BSD License 10 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0 11 * Released under the terms of GNU General Public License Version 2.0
11 * 12 *
12 * Project home: http://compcache.googlecode.com
13 */ 13 */
14 14
15#ifndef _ZRAM_DRV_H_ 15#ifndef _ZRAM_DRV_H_
@@ -17,8 +17,7 @@
17 17
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20 20#include <linux/zsmalloc.h>
21#include "../zsmalloc/zsmalloc.h"
22 21
23/* 22/*
24 * Some arbitrary value. This is just to catch 23 * Some arbitrary value. This is just to catch
@@ -69,10 +68,6 @@ struct table {
69 u8 flags; 68 u8 flags;
70} __aligned(4); 69} __aligned(4);
71 70
72/*
73 * All 64bit fields should only be manipulated by 64bit atomic accessors.
74 * All modifications to 32bit counter should be protected by zram->lock.
75 */
76struct zram_stats { 71struct zram_stats {
77 atomic64_t compr_size; /* compressed size of pages stored */ 72 atomic64_t compr_size; /* compressed size of pages stored */
78 atomic64_t num_reads; /* failed + successful */ 73 atomic64_t num_reads; /* failed + successful */
@@ -81,33 +76,23 @@ struct zram_stats {
81 atomic64_t failed_writes; /* can happen when memory is too low */ 76 atomic64_t failed_writes; /* can happen when memory is too low */
82 atomic64_t invalid_io; /* non-page-aligned I/O requests */ 77 atomic64_t invalid_io; /* non-page-aligned I/O requests */
83 atomic64_t notify_free; /* no. of swap slot free notifications */ 78 atomic64_t notify_free; /* no. of swap slot free notifications */
84 u32 pages_zero; /* no. of zero filled pages */ 79 atomic_t pages_zero; /* no. of zero filled pages */
85 u32 pages_stored; /* no. of pages currently stored */ 80 atomic_t pages_stored; /* no. of pages currently stored */
86 u32 good_compress; /* % of pages with compression ratio<=50% */ 81 atomic_t good_compress; /* % of pages with compression ratio<=50% */
87 u32 bad_compress; /* % of pages with compression ratio>=75% */ 82 atomic_t bad_compress; /* % of pages with compression ratio>=75% */
88}; 83};
89 84
90struct zram_meta { 85struct zram_meta {
86 rwlock_t tb_lock; /* protect table */
91 void *compress_workmem; 87 void *compress_workmem;
92 void *compress_buffer; 88 void *compress_buffer;
93 struct table *table; 89 struct table *table;
94 struct zs_pool *mem_pool; 90 struct zs_pool *mem_pool;
95}; 91 struct mutex buffer_lock; /* protect compress buffers */
96
97struct zram_slot_free {
98 unsigned long index;
99 struct zram_slot_free *next;
100}; 92};
101 93
102struct zram { 94struct zram {
103 struct zram_meta *meta; 95 struct zram_meta *meta;
104 struct rw_semaphore lock; /* protect compression buffers, table,
105 * 32bit stat counters against concurrent
106 * notifications, reads and writes */
107
108 struct work_struct free_work; /* handle pending free request */
109 struct zram_slot_free *slot_free_rq; /* list head of free request */
110
111 struct request_queue *queue; 96 struct request_queue *queue;
112 struct gendisk *disk; 97 struct gendisk *disk;
113 int init_done; 98 int init_done;
@@ -118,7 +103,6 @@ struct zram {
118 * we can store in a disk. 103 * we can store in a disk.
119 */ 104 */
120 u64 disksize; /* bytes */ 105 u64 disksize; /* bytes */
121 spinlock_t slot_free_lock;
122 106
123 struct zram_stats stats; 107 struct zram_stats stats;
124}; 108};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 930694d3a13f..71e49000fbf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -150,6 +150,7 @@ int mdiobus_register(struct mii_bus *bus)
150 err = device_register(&bus->dev); 150 err = device_register(&bus->dev);
151 if (err) { 151 if (err) {
152 pr_err("mii_bus %s failed to register\n", bus->id); 152 pr_err("mii_bus %s failed to register\n", bus->id);
153 put_device(&bus->dev);
153 return -EINVAL; 154 return -EINVAL;
154 } 155 }
155 156
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4bb6b11166b3..040a51525b42 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -76,10 +76,6 @@ source "drivers/staging/sep/Kconfig"
76 76
77source "drivers/staging/iio/Kconfig" 77source "drivers/staging/iio/Kconfig"
78 78
79source "drivers/staging/zsmalloc/Kconfig"
80
81source "drivers/staging/zram/Kconfig"
82
83source "drivers/staging/wlags49_h2/Kconfig" 79source "drivers/staging/wlags49_h2/Kconfig"
84 80
85source "drivers/staging/wlags49_h25/Kconfig" 81source "drivers/staging/wlags49_h25/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 9f07e5e16094..dea056bf7ff2 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -32,8 +32,6 @@ obj-$(CONFIG_VT6656) += vt6656/
32obj-$(CONFIG_VME_BUS) += vme/ 32obj-$(CONFIG_VME_BUS) += vme/
33obj-$(CONFIG_DX_SEP) += sep/ 33obj-$(CONFIG_DX_SEP) += sep/
34obj-$(CONFIG_IIO) += iio/ 34obj-$(CONFIG_IIO) += iio/
35obj-$(CONFIG_ZRAM) += zram/
36obj-$(CONFIG_ZSMALLOC) += zsmalloc/
37obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 35obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
38obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ 36obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
39obj-$(CONFIG_FB_SM7XX) += sm7xxfb/ 37obj-$(CONFIG_FB_SM7XX) += sm7xxfb/
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
deleted file mode 100644
index 9d1f2a24ad62..000000000000
--- a/drivers/staging/zsmalloc/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
1config ZSMALLOC
2 bool "Memory allocator for compressed pages"
3 depends on MMU
4 default n
5 help
6 zsmalloc is a slab-based memory allocator designed to store
7 compressed RAM pages. zsmalloc uses virtual memory mapping
8 in order to reduce fragmentation. However, this results in a
9 non-standard allocator interface where a handle, not a pointer, is
10 returned by an alloc(). This handle must be mapped in order to
11 access the allocated space.
12
13config PGTABLE_MAPPING
14 bool "Use page table mapping to access object in zsmalloc"
15 depends on ZSMALLOC
16 help
17 By default, zsmalloc uses a copy-based object mapping method to
18 access allocations that span two pages. However, if a particular
19 architecture (ex, ARM) performs VM mapping faster than copying,
20 then you should select this. This causes zsmalloc to use page table
21 mapping rather than copying for object mapping.
22
23 You can check speed with zsmalloc benchmark[1].
24 [1] https://github.com/spartacus06/zsmalloc
diff --git a/drivers/staging/zsmalloc/Makefile b/drivers/staging/zsmalloc/Makefile
deleted file mode 100644
index b134848a590d..000000000000
--- a/drivers/staging/zsmalloc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1zsmalloc-y := zsmalloc-main.o
2
3obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 93cf15efc717..7de847df224f 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -228,7 +228,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
228 228
229 rc = device_register(&new_ld->dev); 229 rc = device_register(&new_ld->dev);
230 if (rc) { 230 if (rc) {
231 kfree(new_ld); 231 put_device(&new_ld->dev);
232 return ERR_PTR(rc); 232 return ERR_PTR(rc);
233 } 233 }
234 234
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0375654adb28..8678c4322b44 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -95,10 +95,7 @@ enum rq_cmd_type_bits {
95 * as well! 95 * as well!
96 */ 96 */
97struct request { 97struct request {
98 union { 98 struct list_head queuelist;
99 struct list_head queuelist;
100 struct llist_node ll_list;
101 };
102 union { 99 union {
103 struct call_single_data csd; 100 struct call_single_data csd;
104 struct work_struct mq_flush_data; 101 struct work_struct mq_flush_data;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index b388223bd4a9..db51fe4fe317 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -264,7 +264,7 @@ static inline void * __init memblock_virt_alloc_low(
264{ 264{
265 if (!align) 265 if (!align)
266 align = SMP_CACHE_BYTES; 266 align = SMP_CACHE_BYTES;
267 return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT); 267 return __alloc_bootmem_low(size, align, 0);
268} 268}
269 269
270static inline void * __init memblock_virt_alloc_low_nopanic( 270static inline void * __init memblock_virt_alloc_low_nopanic(
@@ -272,7 +272,7 @@ static inline void * __init memblock_virt_alloc_low_nopanic(
272{ 272{
273 if (!align) 273 if (!align)
274 align = SMP_CACHE_BYTES; 274 align = SMP_CACHE_BYTES;
275 return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT); 275 return __alloc_bootmem_low_nopanic(size, align, 0);
276} 276}
277 277
278static inline void * __init memblock_virt_alloc_from_nopanic( 278static inline void * __init memblock_virt_alloc_from_nopanic(
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5da22ee42e16..3834f43f9993 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,12 +11,16 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/llist.h>
14 15
15extern void cpu_idle(void); 16extern void cpu_idle(void);
16 17
17typedef void (*smp_call_func_t)(void *info); 18typedef void (*smp_call_func_t)(void *info);
18struct call_single_data { 19struct call_single_data {
19 struct list_head list; 20 union {
21 struct list_head list;
22 struct llist_node llist;
23 };
20 smp_call_func_t func; 24 smp_call_func_t func;
21 void *info; 25 void *info;
22 u16 flags; 26 u16 flags;
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/include/linux/zsmalloc.h
index c2eb174b97ee..e44d634e7fb7 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -2,6 +2,7 @@
2 * zsmalloc memory allocator 2 * zsmalloc memory allocator
3 * 3 *
4 * Copyright (C) 2011 Nitin Gupta 4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
5 * 6 *
6 * This code is released using a dual license strategy: BSD/GPL 7 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements. 8 * You can choose the license that better fits your requirements.
diff --git a/kernel/smp.c b/kernel/smp.c
index bd9f94028838..ffee35bef179 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -23,17 +23,11 @@ enum {
23struct call_function_data { 23struct call_function_data {
24 struct call_single_data __percpu *csd; 24 struct call_single_data __percpu *csd;
25 cpumask_var_t cpumask; 25 cpumask_var_t cpumask;
26 cpumask_var_t cpumask_ipi;
27}; 26};
28 27
29static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 28static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
30 29
31struct call_single_queue { 30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
32 struct list_head list;
33 raw_spinlock_t lock;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
37 31
38static int 32static int
39hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) 33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 41 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 cpu_to_node(cpu))) 42 cpu_to_node(cpu)))
49 return notifier_from_errno(-ENOMEM); 43 return notifier_from_errno(-ENOMEM);
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu))) {
52 free_cpumask_var(cfd->cpumask);
53 return notifier_from_errno(-ENOMEM);
54 }
55 cfd->csd = alloc_percpu(struct call_single_data); 44 cfd->csd = alloc_percpu(struct call_single_data);
56 if (!cfd->csd) { 45 if (!cfd->csd) {
57 free_cpumask_var(cfd->cpumask_ipi);
58 free_cpumask_var(cfd->cpumask); 46 free_cpumask_var(cfd->cpumask);
59 return notifier_from_errno(-ENOMEM); 47 return notifier_from_errno(-ENOMEM);
60 } 48 }
@@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
67 case CPU_DEAD: 55 case CPU_DEAD:
68 case CPU_DEAD_FROZEN: 56 case CPU_DEAD_FROZEN:
69 free_cpumask_var(cfd->cpumask); 57 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd); 58 free_percpu(cfd->csd);
72 break; 59 break;
73#endif 60#endif
@@ -85,12 +72,8 @@ void __init call_function_init(void)
85 void *cpu = (void *)(long)smp_processor_id(); 72 void *cpu = (void *)(long)smp_processor_id();
86 int i; 73 int i;
87 74
88 for_each_possible_cpu(i) { 75 for_each_possible_cpu(i)
89 struct call_single_queue *q = &per_cpu(call_single_queue, i); 76 init_llist_head(&per_cpu(call_single_queue, i));
90
91 raw_spin_lock_init(&q->lock);
92 INIT_LIST_HEAD(&q->list);
93 }
94 77
95 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); 78 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
96 register_cpu_notifier(&hotplug_cfd_notifier); 79 register_cpu_notifier(&hotplug_cfd_notifier);
@@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd)
141 */ 124 */
142static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) 125static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
143{ 126{
144 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
145 unsigned long flags;
146 int ipi;
147
148 if (wait) 127 if (wait)
149 csd->flags |= CSD_FLAG_WAIT; 128 csd->flags |= CSD_FLAG_WAIT;
150 129
151 raw_spin_lock_irqsave(&dst->lock, flags);
152 ipi = list_empty(&dst->list);
153 list_add_tail(&csd->list, &dst->list);
154 raw_spin_unlock_irqrestore(&dst->lock, flags);
155
156 /* 130 /*
157 * The list addition should be visible before sending the IPI 131 * The list addition should be visible before sending the IPI
158 * handler locks the list to pull the entry off it because of 132 * handler locks the list to pull the entry off it because of
@@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
164 * locking and barrier primitives. Generic code isn't really 138 * locking and barrier primitives. Generic code isn't really
165 * equipped to do the right thing... 139 * equipped to do the right thing...
166 */ 140 */
167 if (ipi) 141 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
168 arch_send_call_function_single_ipi(cpu); 142 arch_send_call_function_single_ipi(cpu);
169 143
170 if (wait) 144 if (wait)
@@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
177 */ 151 */
178void generic_smp_call_function_single_interrupt(void) 152void generic_smp_call_function_single_interrupt(void)
179{ 153{
180 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 154 struct llist_node *entry, *next;
181 LIST_HEAD(list);
182 155
183 /* 156 /*
184 * Shouldn't receive this interrupt on a cpu that is not yet online. 157 * Shouldn't receive this interrupt on a cpu that is not yet online.
185 */ 158 */
186 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 159 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
187 160
188 raw_spin_lock(&q->lock); 161 entry = llist_del_all(&__get_cpu_var(call_single_queue));
189 list_replace_init(&q->list, &list); 162 entry = llist_reverse_order(entry);
190 raw_spin_unlock(&q->lock);
191 163
192 while (!list_empty(&list)) { 164 while (entry) {
193 struct call_single_data *csd; 165 struct call_single_data *csd;
194 166
195 csd = list_entry(list.next, struct call_single_data, list); 167 next = entry->next;
196 list_del(&csd->list);
197 168
169 csd = llist_entry(entry, struct call_single_data, llist);
198 csd->func(csd->info); 170 csd->func(csd->info);
199
200 csd_unlock(csd); 171 csd_unlock(csd);
172
173 entry = next;
201 } 174 }
202} 175}
203 176
@@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask,
402 if (unlikely(!cpumask_weight(cfd->cpumask))) 375 if (unlikely(!cpumask_weight(cfd->cpumask)))
403 return; 376 return;
404 377
405 /*
406 * After we put an entry into the list, cfd->cpumask may be cleared
407 * again when another CPU sends another IPI for a SMP function call, so
408 * cfd->cpumask will be zero.
409 */
410 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
411
412 for_each_cpu(cpu, cfd->cpumask) { 378 for_each_cpu(cpu, cfd->cpumask) {
413 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); 379 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
414 struct call_single_queue *dst =
415 &per_cpu(call_single_queue, cpu);
416 unsigned long flags;
417 380
418 csd_lock(csd); 381 csd_lock(csd);
419 csd->func = func; 382 csd->func = func;
420 csd->info = info; 383 csd->info = info;
421 384 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
422 raw_spin_lock_irqsave(&dst->lock, flags);
423 list_add_tail(&csd->list, &dst->list);
424 raw_spin_unlock_irqrestore(&dst->lock, flags);
425 } 385 }
426 386
427 /* Send a message to all CPUs in the map */ 387 /* Send a message to all CPUs in the map */
428 arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 388 arch_send_call_function_ipi_mask(cfd->cpumask);
429 389
430 if (wait) { 390 if (wait) {
431 for_each_cpu(cpu, cfd->cpumask) { 391 for_each_cpu(cpu, cfd->cpumask) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 723bbe04a0b0..2d9f1504d75e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -552,3 +552,28 @@ config MEM_SOFT_DIRTY
552 it can be cleared by hands. 552 it can be cleared by hands.
553 553
554 See Documentation/vm/soft-dirty.txt for more details. 554 See Documentation/vm/soft-dirty.txt for more details.
555
556config ZSMALLOC
557 bool "Memory allocator for compressed pages"
558 depends on MMU
559 default n
560 help
561 zsmalloc is a slab-based memory allocator designed to store
562 compressed RAM pages. zsmalloc uses virtual memory mapping
563 in order to reduce fragmentation. However, this results in a
564 non-standard allocator interface where a handle, not a pointer, is
565 returned by an alloc(). This handle must be mapped in order to
566 access the allocated space.
567
568config PGTABLE_MAPPING
569 bool "Use page table mapping to access object in zsmalloc"
570 depends on ZSMALLOC
571 help
572 By default, zsmalloc uses a copy-based object mapping method to
573 access allocations that span two pages. However, if a particular
574 architecture (ex, ARM) performs VM mapping faster than copying,
575 then you should select this. This causes zsmalloc to use page table
576 mapping rather than copying for object mapping.
577
578 You can check speed with zsmalloc benchmark[1].
579 [1] https://github.com/spartacus06/zsmalloc
diff --git a/mm/Makefile b/mm/Makefile
index 305d10acd081..310c90a09264 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
60obj-$(CONFIG_CLEANCACHE) += cleancache.o 60obj-$(CONFIG_CLEANCACHE) += cleancache.o
61obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o 61obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
62obj-$(CONFIG_ZBUD) += zbud.o 62obj-$(CONFIG_ZBUD) += zbud.o
63obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 19d5d4274e22..53385cd4e6f0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3400,7 +3400,7 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3400static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 3400static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3401 struct kmem_cache *s) 3401 struct kmem_cache *s)
3402{ 3402{
3403 struct kmem_cache *new; 3403 struct kmem_cache *new = NULL;
3404 static char *tmp_name = NULL; 3404 static char *tmp_name = NULL;
3405 static DEFINE_MUTEX(mutex); /* protects tmp_name */ 3405 static DEFINE_MUTEX(mutex); /* protects tmp_name */
3406 3406
@@ -3416,7 +3416,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3416 if (!tmp_name) { 3416 if (!tmp_name) {
3417 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); 3417 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
3418 if (!tmp_name) 3418 if (!tmp_name)
3419 return NULL; 3419 goto out;
3420 } 3420 }
3421 3421
3422 rcu_read_lock(); 3422 rcu_read_lock();
@@ -3426,12 +3426,11 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3426 3426
3427 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, 3427 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
3428 (s->flags & ~SLAB_PANIC), s->ctor, s); 3428 (s->flags & ~SLAB_PANIC), s->ctor, s);
3429
3430 if (new) 3429 if (new)
3431 new->allocflags |= __GFP_KMEMCG; 3430 new->allocflags |= __GFP_KMEMCG;
3432 else 3431 else
3433 new = s; 3432 new = s;
3434 3433out:
3435 mutex_unlock(&mutex); 3434 mutex_unlock(&mutex);
3436 return new; 3435 return new;
3437} 3436}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 873de7e542bc..ae3c8f3595d4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2930,7 +2930,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2930 unsigned short mode = MPOL_DEFAULT; 2930 unsigned short mode = MPOL_DEFAULT;
2931 unsigned short flags = 0; 2931 unsigned short flags = 0;
2932 2932
2933 if (pol && pol != &default_policy) { 2933 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2934 mode = pol->mode; 2934 mode = pol->mode;
2935 flags = pol->flags; 2935 flags = pol->flags;
2936 } 2936 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 37b1b1903fb2..3291e82d4352 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -178,7 +178,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
178 * implementation used by LSMs. 178 * implementation used by LSMs.
179 */ 179 */
180 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 180 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
181 adj -= 30; 181 points -= (points * 3) / 100;
182 182
183 /* Normalize to oom_score_adj units */ 183 /* Normalize to oom_score_adj units */
184 adj *= totalpages / 1000; 184 adj *= totalpages / 1000;
diff --git a/mm/slub.c b/mm/slub.c
index 545a170ebf9f..2b1a6970e46f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -355,6 +355,21 @@ static __always_inline void slab_unlock(struct page *page)
355 __bit_spin_unlock(PG_locked, &page->flags); 355 __bit_spin_unlock(PG_locked, &page->flags);
356} 356}
357 357
358static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
359{
360 struct page tmp;
361 tmp.counters = counters_new;
362 /*
363 * page->counters can cover frozen/inuse/objects as well
364 * as page->_count. If we assign to ->counters directly
365 * we run the risk of losing updates to page->_count, so
366 * be careful and only assign to the fields we need.
367 */
368 page->frozen = tmp.frozen;
369 page->inuse = tmp.inuse;
370 page->objects = tmp.objects;
371}
372
358/* Interrupts must be disabled (for the fallback code to work right) */ 373/* Interrupts must be disabled (for the fallback code to work right) */
359static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 374static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
360 void *freelist_old, unsigned long counters_old, 375 void *freelist_old, unsigned long counters_old,
@@ -376,7 +391,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
376 if (page->freelist == freelist_old && 391 if (page->freelist == freelist_old &&
377 page->counters == counters_old) { 392 page->counters == counters_old) {
378 page->freelist = freelist_new; 393 page->freelist = freelist_new;
379 page->counters = counters_new; 394 set_page_slub_counters(page, counters_new);
380 slab_unlock(page); 395 slab_unlock(page);
381 return 1; 396 return 1;
382 } 397 }
@@ -415,7 +430,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
415 if (page->freelist == freelist_old && 430 if (page->freelist == freelist_old &&
416 page->counters == counters_old) { 431 page->counters == counters_old) {
417 page->freelist = freelist_new; 432 page->freelist = freelist_new;
418 page->counters = counters_new; 433 set_page_slub_counters(page, counters_new);
419 slab_unlock(page); 434 slab_unlock(page);
420 local_irq_restore(flags); 435 local_irq_restore(flags);
421 return 1; 436 return 1;
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/mm/zsmalloc.c
index 7660c87d8b2a..c03ca5e9fe15 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/mm/zsmalloc.c
@@ -2,6 +2,7 @@
2 * zsmalloc memory allocator 2 * zsmalloc memory allocator
3 * 3 *
4 * Copyright (C) 2011 Nitin Gupta 4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
5 * 6 *
6 * This code is released using a dual license strategy: BSD/GPL 7 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the license that better fits your requirements. 8 * You can choose the license that better fits your requirements.
@@ -90,8 +91,7 @@
90#include <linux/hardirq.h> 91#include <linux/hardirq.h>
91#include <linux/spinlock.h> 92#include <linux/spinlock.h>
92#include <linux/types.h> 93#include <linux/types.h>
93 94#include <linux/zsmalloc.h>
94#include "zsmalloc.h"
95 95
96/* 96/*
97 * This must be power of 2 and greater than of equal to sizeof(link_free). 97 * This must be power of 2 and greater than of equal to sizeof(link_free).