aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-30 17:53:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-30 17:53:32 -0400
commit65fb0d23fcddd8697c871047b700c78817bdaa43 (patch)
tree119e6e5f276622c4c862f6c9b6d795264ba1603a /mm
parent8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff)
parentdfbbe89e197a77f2c8046a51c74e33e35f878080 (diff)
Merge branch 'linus' into cpumask-for-linus
Conflicts: arch/x86/kernel/cpu/common.c
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/backing-dev.c26
-rw-r--r--mm/highmem.c65
-rw-r--r--mm/memory.c6
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/percpu.c130
-rw-r--r--mm/readahead.c25
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slob.c43
-rw-r--r--mm/slub.c82
-rw-r--r--mm/vmscan.c2
12 files changed, 290 insertions, 101 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 3653c570232b..1882923bc706 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -120,7 +120,7 @@ void *__alloc_percpu(size_t size, size_t align)
120 * on it. Larger alignment should only be used for module 120 * on it. Larger alignment should only be used for module
121 * percpu sections on SMP for which this path isn't used. 121 * percpu sections on SMP for which this path isn't used.
122 */ 122 */
123 WARN_ON_ONCE(align > __alignof__(unsigned long long)); 123 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
124 124
125 if (unlikely(!pdata)) 125 if (unlikely(!pdata))
126 return NULL; 126 return NULL;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8e8587444132..be68c956a660 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -2,11 +2,24 @@
2#include <linux/wait.h> 2#include <linux/wait.h>
3#include <linux/backing-dev.h> 3#include <linux/backing-dev.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
5#include <linux/pagemap.h>
5#include <linux/sched.h> 6#include <linux/sched.h>
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/writeback.h> 8#include <linux/writeback.h>
8#include <linux/device.h> 9#include <linux/device.h>
9 10
11void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
12{
13}
14EXPORT_SYMBOL(default_unplug_io_fn);
15
16struct backing_dev_info default_backing_dev_info = {
17 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
18 .state = 0,
19 .capabilities = BDI_CAP_MAP_COPY,
20 .unplug_io_fn = default_unplug_io_fn,
21};
22EXPORT_SYMBOL_GPL(default_backing_dev_info);
10 23
11static struct class *bdi_class; 24static struct class *bdi_class;
12 25
@@ -166,9 +179,20 @@ static __init int bdi_class_init(void)
166 bdi_debug_init(); 179 bdi_debug_init();
167 return 0; 180 return 0;
168} 181}
169
170postcore_initcall(bdi_class_init); 182postcore_initcall(bdi_class_init);
171 183
184static int __init default_bdi_init(void)
185{
186 int err;
187
188 err = bdi_init(&default_backing_dev_info);
189 if (!err)
190 bdi_register(&default_backing_dev_info, NULL, "default");
191
192 return err;
193}
194subsys_initcall(default_bdi_init);
195
172int bdi_register(struct backing_dev_info *bdi, struct device *parent, 196int bdi_register(struct backing_dev_info *bdi, struct device *parent,
173 const char *fmt, ...) 197 const char *fmt, ...)
174{ 198{
diff --git a/mm/highmem.c b/mm/highmem.c
index b36b83b920ff..910198037bf5 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table;
67 67
68static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); 68static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
69 69
70/*
71 * Most architectures have no use for kmap_high_get(), so let's abstract
72 * the disabling of IRQ out of the locking in that case to save on a
73 * potential useless overhead.
74 */
75#ifdef ARCH_NEEDS_KMAP_HIGH_GET
76#define lock_kmap() spin_lock_irq(&kmap_lock)
77#define unlock_kmap() spin_unlock_irq(&kmap_lock)
78#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
79#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
80#else
81#define lock_kmap() spin_lock(&kmap_lock)
82#define unlock_kmap() spin_unlock(&kmap_lock)
83#define lock_kmap_any(flags) \
84 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
85#define unlock_kmap_any(flags) \
86 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
87#endif
88
70static void flush_all_zero_pkmaps(void) 89static void flush_all_zero_pkmaps(void)
71{ 90{
72 int i; 91 int i;
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
113 */ 132 */
114void kmap_flush_unused(void) 133void kmap_flush_unused(void)
115{ 134{
116 spin_lock(&kmap_lock); 135 lock_kmap();
117 flush_all_zero_pkmaps(); 136 flush_all_zero_pkmaps();
118 spin_unlock(&kmap_lock); 137 unlock_kmap();
119} 138}
120 139
121static inline unsigned long map_new_virtual(struct page *page) 140static inline unsigned long map_new_virtual(struct page *page)
@@ -145,10 +164,10 @@ start:
145 164
146 __set_current_state(TASK_UNINTERRUPTIBLE); 165 __set_current_state(TASK_UNINTERRUPTIBLE);
147 add_wait_queue(&pkmap_map_wait, &wait); 166 add_wait_queue(&pkmap_map_wait, &wait);
148 spin_unlock(&kmap_lock); 167 unlock_kmap();
149 schedule(); 168 schedule();
150 remove_wait_queue(&pkmap_map_wait, &wait); 169 remove_wait_queue(&pkmap_map_wait, &wait);
151 spin_lock(&kmap_lock); 170 lock_kmap();
152 171
153 /* Somebody else might have mapped it while we slept */ 172 /* Somebody else might have mapped it while we slept */
154 if (page_address(page)) 173 if (page_address(page))
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page)
184 * For highmem pages, we can't trust "virtual" until 203 * For highmem pages, we can't trust "virtual" until
185 * after we have the lock. 204 * after we have the lock.
186 */ 205 */
187 spin_lock(&kmap_lock); 206 lock_kmap();
188 vaddr = (unsigned long)page_address(page); 207 vaddr = (unsigned long)page_address(page);
189 if (!vaddr) 208 if (!vaddr)
190 vaddr = map_new_virtual(page); 209 vaddr = map_new_virtual(page);
191 pkmap_count[PKMAP_NR(vaddr)]++; 210 pkmap_count[PKMAP_NR(vaddr)]++;
192 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); 211 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
193 spin_unlock(&kmap_lock); 212 unlock_kmap();
194 return (void*) vaddr; 213 return (void*) vaddr;
195} 214}
196 215
197EXPORT_SYMBOL(kmap_high); 216EXPORT_SYMBOL(kmap_high);
198 217
218#ifdef ARCH_NEEDS_KMAP_HIGH_GET
219/**
220 * kmap_high_get - pin a highmem page into memory
221 * @page: &struct page to pin
222 *
223 * Returns the page's current virtual memory address, or NULL if no mapping
224 * exists. When and only when a non null address is returned then a
225 * matching call to kunmap_high() is necessary.
226 *
227 * This can be called from any context.
228 */
229void *kmap_high_get(struct page *page)
230{
231 unsigned long vaddr, flags;
232
233 lock_kmap_any(flags);
234 vaddr = (unsigned long)page_address(page);
235 if (vaddr) {
236 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
237 pkmap_count[PKMAP_NR(vaddr)]++;
238 }
239 unlock_kmap_any(flags);
240 return (void*) vaddr;
241}
242#endif
243
199/** 244/**
200 * kunmap_high - map a highmem page into memory 245 * kunmap_high - map a highmem page into memory
201 * @page: &struct page to unmap 246 * @page: &struct page to unmap
247 *
248 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
249 * only from user context.
202 */ 250 */
203void kunmap_high(struct page *page) 251void kunmap_high(struct page *page)
204{ 252{
205 unsigned long vaddr; 253 unsigned long vaddr;
206 unsigned long nr; 254 unsigned long nr;
255 unsigned long flags;
207 int need_wakeup; 256 int need_wakeup;
208 257
209 spin_lock(&kmap_lock); 258 lock_kmap_any(flags);
210 vaddr = (unsigned long)page_address(page); 259 vaddr = (unsigned long)page_address(page);
211 BUG_ON(!vaddr); 260 BUG_ON(!vaddr);
212 nr = PKMAP_NR(vaddr); 261 nr = PKMAP_NR(vaddr);
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page)
232 */ 281 */
233 need_wakeup = waitqueue_active(&pkmap_map_wait); 282 need_wakeup = waitqueue_active(&pkmap_map_wait);
234 } 283 }
235 spin_unlock(&kmap_lock); 284 unlock_kmap_any(flags);
236 285
237 /* do wake-up, if needed, race-free outside of the spin lock */ 286 /* do wake-up, if needed, race-free outside of the spin lock */
238 if (need_wakeup) 287 if (need_wakeup)
diff --git a/mm/memory.c b/mm/memory.c
index baa999e87cd2..2032ad2fc34b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1665,9 +1665,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1665 * behaviour that some programs depend on. We mark the "original" 1665 * behaviour that some programs depend on. We mark the "original"
1666 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1666 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1667 */ 1667 */
1668 if (addr == vma->vm_start && end == vma->vm_end) 1668 if (addr == vma->vm_start && end == vma->vm_end) {
1669 vma->vm_pgoff = pfn; 1669 vma->vm_pgoff = pfn;
1670 else if (is_cow_mapping(vma->vm_flags)) 1670 vma->vm_flags |= VM_PFN_AT_MMAP;
1671 } else if (is_cow_mapping(vma->vm_flags))
1671 return -EINVAL; 1672 return -EINVAL;
1672 1673
1673 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1674 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
@@ -1679,6 +1680,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1679 * needed from higher level routine calling unmap_vmas 1680 * needed from higher level routine calling unmap_vmas
1680 */ 1681 */
1681 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); 1682 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1683 vma->vm_flags &= ~VM_PFN_AT_MMAP;
1682 return -EINVAL; 1684 return -EINVAL;
1683 } 1685 }
1684 1686
diff --git a/mm/mmap.c b/mm/mmap.c
index 00ced3ee49a8..1abb9185a686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
23#include <linux/hugetlb.h> 24#include <linux/hugetlb.h>
24#include <linux/profile.h> 25#include <linux/profile.h>
25#include <linux/module.h> 26#include <linux/module.h>
@@ -1049,6 +1050,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1049 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1050 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1050 if (error) 1051 if (error)
1051 return error; 1052 return error;
1053 error = ima_file_mmap(file, prot);
1054 if (error)
1055 return error;
1052 1056
1053 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1057 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1054} 1058}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 74dc57c74349..40ca7cdb653e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void)
66/* 66/*
67 * Start background writeback (via pdflush) at this percentage 67 * Start background writeback (via pdflush) at this percentage
68 */ 68 */
69int dirty_background_ratio = 5; 69int dirty_background_ratio = 10;
70 70
71/* 71/*
72 * dirty_background_bytes starts at 0 (disabled) so that it is a function of 72 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable;
83/* 83/*
84 * The generator of dirty data starts writeback at this percentage 84 * The generator of dirty data starts writeback at this percentage
85 */ 85 */
86int vm_dirty_ratio = 10; 86int vm_dirty_ratio = 20;
87 87
88/* 88/*
89 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of 89 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
diff --git a/mm/percpu.c b/mm/percpu.c
index bfe6a3afaf45..1aa5d8fbca12 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,7 +46,8 @@
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
47 * 47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back 49 * regular address to percpu pointer and back if they need to be
50 * different from the default
50 * 51 *
51 * - use pcpu_setup_first_chunk() during percpu area initialization to 52 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area 53 * setup the first chunk containing the kernel static percpu area
@@ -67,11 +68,24 @@
67#include <linux/workqueue.h> 68#include <linux/workqueue.h>
68 69
69#include <asm/cacheflush.h> 70#include <asm/cacheflush.h>
71#include <asm/sections.h>
70#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
71 73
72#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 74#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
73#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 75#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74 76
77/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
78#ifndef __addr_to_pcpu_ptr
79#define __addr_to_pcpu_ptr(addr) \
80 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
81 + (unsigned long)__per_cpu_start)
82#endif
83#ifndef __pcpu_ptr_to_addr
84#define __pcpu_ptr_to_addr(ptr) \
85 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
86 - (unsigned long)__per_cpu_start)
87#endif
88
75struct pcpu_chunk { 89struct pcpu_chunk {
76 struct list_head list; /* linked to pcpu_slot lists */ 90 struct list_head list; /* linked to pcpu_slot lists */
77 struct rb_node rb_node; /* key is chunk->vm->addr */ 91 struct rb_node rb_node; /* key is chunk->vm->addr */
@@ -1013,8 +1027,8 @@ EXPORT_SYMBOL_GPL(free_percpu);
1013 * @get_page_fn: callback to fetch page pointer 1027 * @get_page_fn: callback to fetch page pointer
1014 * @static_size: the size of static percpu area in bytes 1028 * @static_size: the size of static percpu area in bytes
1015 * @reserved_size: the size of reserved percpu area in bytes 1029 * @reserved_size: the size of reserved percpu area in bytes
1016 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1017 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1030 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1031 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1018 * @base_addr: mapped address, NULL for auto 1032 * @base_addr: mapped address, NULL for auto
1019 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary 1033 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
1020 * 1034 *
@@ -1039,14 +1053,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
1039 * limited offset range for symbol relocations to guarantee module 1053 * limited offset range for symbol relocations to guarantee module
1040 * percpu symbols fall inside the relocatable range. 1054 * percpu symbols fall inside the relocatable range.
1041 * 1055 *
1056 * @dyn_size, if non-negative, determines the number of bytes
1057 * available for dynamic allocation in the first chunk. Specifying
1058 * non-negative value makes percpu leave alone the area beyond
1059 * @static_size + @reserved_size + @dyn_size.
1060 *
1042 * @unit_size, if non-negative, specifies unit size and must be 1061 * @unit_size, if non-negative, specifies unit size and must be
1043 * aligned to PAGE_SIZE and equal to or larger than @static_size + 1062 * aligned to PAGE_SIZE and equal to or larger than @static_size +
1044 * @reserved_size + @dyn_size. 1063 * @reserved_size + if non-negative, @dyn_size.
1045 *
1046 * @dyn_size, if non-negative, limits the number of bytes available
1047 * for dynamic allocation in the first chunk. Specifying non-negative
1048 * value make percpu leave alone the area beyond @static_size +
1049 * @reserved_size + @dyn_size.
1050 * 1064 *
1051 * Non-null @base_addr means that the caller already allocated virtual 1065 * Non-null @base_addr means that the caller already allocated virtual
1052 * region for the first chunk and mapped it. percpu must not mess 1066 * region for the first chunk and mapped it. percpu must not mess
@@ -1069,12 +1083,14 @@ EXPORT_SYMBOL_GPL(free_percpu);
1069 */ 1083 */
1070size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, 1084size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1071 size_t static_size, size_t reserved_size, 1085 size_t static_size, size_t reserved_size,
1072 ssize_t unit_size, ssize_t dyn_size, 1086 ssize_t dyn_size, ssize_t unit_size,
1073 void *base_addr, 1087 void *base_addr,
1074 pcpu_populate_pte_fn_t populate_pte_fn) 1088 pcpu_populate_pte_fn_t populate_pte_fn)
1075{ 1089{
1076 static struct vm_struct first_vm; 1090 static struct vm_struct first_vm;
1077 static int smap[2], dmap[2]; 1091 static int smap[2], dmap[2];
1092 size_t size_sum = static_size + reserved_size +
1093 (dyn_size >= 0 ? dyn_size : 0);
1078 struct pcpu_chunk *schunk, *dchunk = NULL; 1094 struct pcpu_chunk *schunk, *dchunk = NULL;
1079 unsigned int cpu; 1095 unsigned int cpu;
1080 int nr_pages; 1096 int nr_pages;
@@ -1085,20 +1101,18 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1085 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1101 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1086 BUG_ON(!static_size); 1102 BUG_ON(!static_size);
1087 if (unit_size >= 0) { 1103 if (unit_size >= 0) {
1088 BUG_ON(unit_size < static_size + reserved_size + 1104 BUG_ON(unit_size < size_sum);
1089 (dyn_size >= 0 ? dyn_size : 0));
1090 BUG_ON(unit_size & ~PAGE_MASK); 1105 BUG_ON(unit_size & ~PAGE_MASK);
1091 } else { 1106 BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
1092 BUG_ON(dyn_size >= 0); 1107 } else
1093 BUG_ON(base_addr); 1108 BUG_ON(base_addr);
1094 }
1095 BUG_ON(base_addr && populate_pte_fn); 1109 BUG_ON(base_addr && populate_pte_fn);
1096 1110
1097 if (unit_size >= 0) 1111 if (unit_size >= 0)
1098 pcpu_unit_pages = unit_size >> PAGE_SHIFT; 1112 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
1099 else 1113 else
1100 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, 1114 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
1101 PFN_UP(static_size + reserved_size)); 1115 PFN_UP(size_sum));
1102 1116
1103 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1117 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1104 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; 1118 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
@@ -1224,3 +1238,89 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1224 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); 1238 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1225 return pcpu_unit_size; 1239 return pcpu_unit_size;
1226} 1240}
1241
1242/*
1243 * Embedding first chunk setup helper.
1244 */
1245static void *pcpue_ptr __initdata;
1246static size_t pcpue_size __initdata;
1247static size_t pcpue_unit_size __initdata;
1248
1249static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1250{
1251 size_t off = (size_t)pageno << PAGE_SHIFT;
1252
1253 if (off >= pcpue_size)
1254 return NULL;
1255
1256 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
1257}
1258
1259/**
1260 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1261 * @static_size: the size of static percpu area in bytes
1262 * @reserved_size: the size of reserved percpu area in bytes
1263 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1264 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1265 *
1266 * This is a helper to ease setting up embedded first percpu chunk and
1267 * can be called where pcpu_setup_first_chunk() is expected.
1268 *
1269 * If this function is used to setup the first chunk, it is allocated
1270 * as a contiguous area using bootmem allocator and used as-is without
1271 * being mapped into vmalloc area. This enables the first chunk to
1272 * piggy back on the linear physical mapping which often uses larger
1273 * page size.
1274 *
1275 * When @dyn_size is positive, dynamic area might be larger than
1276 * specified to fill page alignment. Also, when @dyn_size is auto,
1277 * @dyn_size does not fill the whole first chunk but only what's
1278 * necessary for page alignment after static and reserved areas.
1279 *
1280 * If the needed size is smaller than the minimum or specified unit
1281 * size, the leftover is returned to the bootmem allocator.
1282 *
1283 * RETURNS:
1284 * The determined pcpu_unit_size which can be used to initialize
1285 * percpu access on success, -errno on failure.
1286 */
1287ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1288 ssize_t dyn_size, ssize_t unit_size)
1289{
1290 unsigned int cpu;
1291
1292 /* determine parameters and allocate */
1293 pcpue_size = PFN_ALIGN(static_size + reserved_size +
1294 (dyn_size >= 0 ? dyn_size : 0));
1295 if (dyn_size != 0)
1296 dyn_size = pcpue_size - static_size - reserved_size;
1297
1298 if (unit_size >= 0) {
1299 BUG_ON(unit_size < pcpue_size);
1300 pcpue_unit_size = unit_size;
1301 } else
1302 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1303
1304 pcpue_ptr = __alloc_bootmem_nopanic(
1305 num_possible_cpus() * pcpue_unit_size,
1306 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1307 if (!pcpue_ptr)
1308 return -ENOMEM;
1309
1310 /* return the leftover and copy */
1311 for_each_possible_cpu(cpu) {
1312 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
1313
1314 free_bootmem(__pa(ptr + pcpue_size),
1315 pcpue_unit_size - pcpue_size);
1316 memcpy(ptr, __per_cpu_load, static_size);
1317 }
1318
1319 /* we're ready, commit */
1320 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1321 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
1322
1323 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
1324 reserved_size, dyn_size,
1325 pcpue_unit_size, pcpue_ptr, NULL);
1326}
diff --git a/mm/readahead.c b/mm/readahead.c
index bec83c15a78f..9ce303d4b810 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,19 +17,6 @@
17#include <linux/pagevec.h> 17#include <linux/pagevec.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19 19
20void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
21{
22}
23EXPORT_SYMBOL(default_unplug_io_fn);
24
25struct backing_dev_info default_backing_dev_info = {
26 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
27 .state = 0,
28 .capabilities = BDI_CAP_MAP_COPY,
29 .unplug_io_fn = default_unplug_io_fn,
30};
31EXPORT_SYMBOL_GPL(default_backing_dev_info);
32
33/* 20/*
34 * Initialise a struct file's readahead state. Assumes that the caller has 21 * Initialise a struct file's readahead state. Assumes that the caller has
35 * memset *ra to zero. 22 * memset *ra to zero.
@@ -233,18 +220,6 @@ unsigned long max_sane_readahead(unsigned long nr)
233 + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); 220 + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
234} 221}
235 222
236static int __init readahead_init(void)
237{
238 int err;
239
240 err = bdi_init(&default_backing_dev_info);
241 if (!err)
242 bdi_register(&default_backing_dev_info, NULL, "default");
243
244 return err;
245}
246subsys_initcall(readahead_init);
247
248/* 223/*
249 * Submit IO for the read-ahead request in file_ra_state. 224 * Submit IO for the read-ahead request in file_ra_state.
250 */ 225 */
diff --git a/mm/shmem.c b/mm/shmem.c
index 4103a239ce84..7ec78e24a30d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/swap.h> 30#include <linux/swap.h>
31#include <linux/ima.h>
31 32
32static struct vfsmount *shm_mnt; 33static struct vfsmount *shm_mnt;
33 34
@@ -2665,6 +2666,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
2665 if (IS_ERR(file)) 2666 if (IS_ERR(file))
2666 return PTR_ERR(file); 2667 return PTR_ERR(file);
2667 2668
2669 ima_shm_check(file);
2668 if (vma->vm_file) 2670 if (vma->vm_file)
2669 fput(vma->vm_file); 2671 fput(vma->vm_file);
2670 vma->vm_file = file; 2672 vma->vm_file = file;
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..0bfa680a8981 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large); 126static LIST_HEAD(free_slob_large);
127 127
128/* 128/*
129 * slob_page: True for all slob pages (false for bigblock pages) 129 * is_slob_page: True for all slob pages (false for bigblock pages)
130 */ 130 */
131static inline int slob_page(struct slob_page *sp) 131static inline int is_slob_page(struct slob_page *sp)
132{ 132{
133 return PageSlobPage((struct page *)sp); 133 return PageSlobPage((struct page *)sp);
134} 134}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
143 __ClearPageSlobPage((struct page *)sp); 143 __ClearPageSlobPage((struct page *)sp);
144} 144}
145 145
146static inline struct slob_page *slob_page(const void *addr)
147{
148 return (struct slob_page *)virt_to_page(addr);
149}
150
146/* 151/*
147 * slob_page_free: true for pages on free_slob_pages list. 152 * slob_page_free: true for pages on free_slob_pages list.
148 */ 153 */
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
230 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 235 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231} 236}
232 237
233static void *slob_new_page(gfp_t gfp, int order, int node) 238static void *slob_new_pages(gfp_t gfp, int order, int node)
234{ 239{
235 void *page; 240 void *page;
236 241
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
247 return page_address(page); 252 return page_address(page);
248} 253}
249 254
255static void slob_free_pages(void *b, int order)
256{
257 free_pages((unsigned long)b, order);
258}
259
250/* 260/*
251 * Allocate a slob block within a given slob_page sp. 261 * Allocate a slob block within a given slob_page sp.
252 */ 262 */
253static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 263static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254{ 264{
255 slob_t *prev, *cur, *aligned = 0; 265 slob_t *prev, *cur, *aligned = NULL;
256 int delta = 0, units = SLOB_UNITS(size); 266 int delta = 0, units = SLOB_UNITS(size);
257 267
258 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 268 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
349 359
350 /* Not enough space: must allocate a new page */ 360 /* Not enough space: must allocate a new page */
351 if (!b) { 361 if (!b) {
352 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); 362 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
353 if (!b) 363 if (!b)
354 return 0; 364 return NULL;
355 sp = (struct slob_page *)virt_to_page(b); 365 sp = slob_page(b);
356 set_slob_page(sp); 366 set_slob_page(sp);
357 367
358 spin_lock_irqsave(&slob_lock, flags); 368 spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
384 return; 394 return;
385 BUG_ON(!size); 395 BUG_ON(!size);
386 396
387 sp = (struct slob_page *)virt_to_page(block); 397 sp = slob_page(block);
388 units = SLOB_UNITS(size); 398 units = SLOB_UNITS(size);
389 399
390 spin_lock_irqsave(&slob_lock, flags); 400 spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +403,11 @@ static void slob_free(void *block, int size)
393 /* Go directly to page allocator. Do not pass slob allocator */ 403 /* Go directly to page allocator. Do not pass slob allocator */
394 if (slob_page_free(sp)) 404 if (slob_page_free(sp))
395 clear_slob_page_free(sp); 405 clear_slob_page_free(sp);
406 spin_unlock_irqrestore(&slob_lock, flags);
396 clear_slob_page(sp); 407 clear_slob_page(sp);
397 free_slob_page(sp); 408 free_slob_page(sp);
398 free_page((unsigned long)b); 409 free_page((unsigned long)b);
399 goto out; 410 return;
400 } 411 }
401 412
402 if (!slob_page_free(sp)) { 413 if (!slob_page_free(sp)) {
@@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
476 } else { 487 } else {
477 void *ret; 488 void *ret;
478 489
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 490 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
480 if (ret) { 491 if (ret) {
481 struct page *page; 492 struct page *page;
482 page = virt_to_page(ret); 493 page = virt_to_page(ret);
@@ -494,8 +505,8 @@ void kfree(const void *block)
494 if (unlikely(ZERO_OR_NULL_PTR(block))) 505 if (unlikely(ZERO_OR_NULL_PTR(block)))
495 return; 506 return;
496 507
497 sp = (struct slob_page *)virt_to_page(block); 508 sp = slob_page(block);
498 if (slob_page(sp)) { 509 if (is_slob_page(sp)) {
499 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 510 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
500 unsigned int *m = (unsigned int *)(block - align); 511 unsigned int *m = (unsigned int *)(block - align);
501 slob_free(m, *m + align); 512 slob_free(m, *m + align);
@@ -513,8 +524,8 @@ size_t ksize(const void *block)
513 if (unlikely(block == ZERO_SIZE_PTR)) 524 if (unlikely(block == ZERO_SIZE_PTR))
514 return 0; 525 return 0;
515 526
516 sp = (struct slob_page *)virt_to_page(block); 527 sp = slob_page(block);
517 if (slob_page(sp)) { 528 if (is_slob_page(sp)) {
518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 529 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 unsigned int *m = (unsigned int *)(block - align); 530 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT; 531 return SLOB_UNITS(*m) * SLOB_UNIT;
@@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
573 if (c->size < PAGE_SIZE) 584 if (c->size < PAGE_SIZE)
574 b = slob_alloc(c->size, flags, c->align, node); 585 b = slob_alloc(c->size, flags, c->align, node);
575 else 586 else
576 b = slob_new_page(flags, get_order(c->size), node); 587 b = slob_new_pages(flags, get_order(c->size), node);
577 588
578 if (c->ctor) 589 if (c->ctor)
579 c->ctor(b); 590 c->ctor(b);
@@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size)
587 if (size < PAGE_SIZE) 598 if (size < PAGE_SIZE)
588 slob_free(b, size); 599 slob_free(b, size);
589 else 600 else
590 free_pages((unsigned long)b, get_order(size)); 601 slob_free_pages(b, get_order(size));
591} 602}
592 603
593static void kmem_rcu_free(struct rcu_head *head) 604static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..c65a4edafc33 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
374static void set_track(struct kmem_cache *s, void *object, 374static void set_track(struct kmem_cache *s, void *object,
375 enum track_item alloc, unsigned long addr) 375 enum track_item alloc, unsigned long addr)
376{ 376{
377 struct track *p; 377 struct track *p = get_track(s, object, alloc);
378
379 if (s->offset)
380 p = object + s->offset + sizeof(void *);
381 else
382 p = object + s->inuse;
383 378
384 p += alloc;
385 if (addr) { 379 if (addr) {
386 p->addr = addr; 380 p->addr = addr;
387 p->cpu = smp_processor_id(); 381 p->cpu = smp_processor_id();
@@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1335 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1336 1330
1337 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1338 n->nr_partial > n->min_partial) { 1332 n->nr_partial > s->min_partial) {
1339 page = get_partial_node(n); 1333 page = get_partial_node(n);
1340 if (page) 1334 if (page)
1341 return page; 1335 return page;
@@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1387 slab_unlock(page); 1381 slab_unlock(page);
1388 } else { 1382 } else {
1389 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1390 if (n->nr_partial < n->min_partial) { 1384 if (n->nr_partial < s->min_partial) {
1391 /* 1385 /*
1392 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1393 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1724 c = get_cpu_slab(s, smp_processor_id()); 1718 c = get_cpu_slab(s, smp_processor_id());
1725 debug_check_no_locks_freed(object, c->objsize); 1719 debug_check_no_locks_freed(object, c->objsize);
1726 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1720 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1727 debug_check_no_obj_freed(object, s->objsize); 1721 debug_check_no_obj_freed(object, c->objsize);
1728 if (likely(page == c->page && c->node >= 0)) { 1722 if (likely(page == c->page && c->node >= 0)) {
1729 object[c->offset] = c->freelist; 1723 object[c->offset] = c->freelist;
1730 c->freelist = object; 1724 c->freelist = object;
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size)
1844 int order; 1838 int order;
1845 int min_objects; 1839 int min_objects;
1846 int fraction; 1840 int fraction;
1841 int max_objects;
1847 1842
1848 /* 1843 /*
1849 * Attempt to find best configuration for a slab. This 1844 * Attempt to find best configuration for a slab. This
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size)
1856 min_objects = slub_min_objects; 1851 min_objects = slub_min_objects;
1857 if (!min_objects) 1852 if (!min_objects)
1858 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1853 min_objects = 4 * (fls(nr_cpu_ids) + 1);
1854 max_objects = (PAGE_SIZE << slub_max_order)/size;
1855 min_objects = min(min_objects, max_objects);
1856
1859 while (min_objects > 1) { 1857 while (min_objects > 1) {
1860 fraction = 16; 1858 fraction = 16;
1861 while (fraction >= 4) { 1859 while (fraction >= 4) {
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size)
1865 return order; 1863 return order;
1866 fraction /= 2; 1864 fraction /= 2;
1867 } 1865 }
1868 min_objects /= 2; 1866 min_objects --;
1869 } 1867 }
1870 1868
1871 /* 1869 /*
@@ -1928,17 +1926,6 @@ static void
1928init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1926init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1929{ 1927{
1930 n->nr_partial = 0; 1928 n->nr_partial = 0;
1931
1932 /*
1933 * The larger the object size is, the more pages we want on the partial
1934 * list to avoid pounding the page allocator excessively.
1935 */
1936 n->min_partial = ilog2(s->size);
1937 if (n->min_partial < MIN_PARTIAL)
1938 n->min_partial = MIN_PARTIAL;
1939 else if (n->min_partial > MAX_PARTIAL)
1940 n->min_partial = MAX_PARTIAL;
1941
1942 spin_lock_init(&n->list_lock); 1929 spin_lock_init(&n->list_lock);
1943 INIT_LIST_HEAD(&n->partial); 1930 INIT_LIST_HEAD(&n->partial);
1944#ifdef CONFIG_SLUB_DEBUG 1931#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2181} 2168}
2182#endif 2169#endif
2183 2170
2171static void set_min_partial(struct kmem_cache *s, unsigned long min)
2172{
2173 if (min < MIN_PARTIAL)
2174 min = MIN_PARTIAL;
2175 else if (min > MAX_PARTIAL)
2176 min = MAX_PARTIAL;
2177 s->min_partial = min;
2178}
2179
2184/* 2180/*
2185 * calculate_sizes() determines the order and the distribution of data within 2181 * calculate_sizes() determines the order and the distribution of data within
2186 * a slab object. 2182 * a slab object.
@@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2319 if (!calculate_sizes(s, -1)) 2315 if (!calculate_sizes(s, -1))
2320 goto error; 2316 goto error;
2321 2317
2318 /*
2319 * The larger the object size is, the more pages we want on the partial
2320 * list to avoid pounding the page allocator excessively.
2321 */
2322 set_min_partial(s, ilog2(s->size));
2322 s->refcount = 1; 2323 s->refcount = 1;
2323#ifdef CONFIG_NUMA 2324#ifdef CONFIG_NUMA
2324 s->remote_node_defrag_ratio = 1000; 2325 s->remote_node_defrag_ratio = 1000;
@@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2475 * Kmalloc subsystem 2476 * Kmalloc subsystem
2476 *******************************************************************/ 2477 *******************************************************************/
2477 2478
2478struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2479struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2479EXPORT_SYMBOL(kmalloc_caches); 2480EXPORT_SYMBOL(kmalloc_caches);
2480 2481
2481static int __init setup_slub_min_order(char *str) 2482static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2538,7 @@ panic:
2537} 2538}
2538 2539
2539#ifdef CONFIG_ZONE_DMA 2540#ifdef CONFIG_ZONE_DMA
2540static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2541static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2541 2542
2542static void sysfs_add_func(struct work_struct *w) 2543static void sysfs_add_func(struct work_struct *w)
2543{ 2544{
@@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2658{ 2659{
2659 struct kmem_cache *s; 2660 struct kmem_cache *s;
2660 2661
2661 if (unlikely(size > PAGE_SIZE)) 2662 if (unlikely(size > SLUB_MAX_SIZE))
2662 return kmalloc_large(size, flags); 2663 return kmalloc_large(size, flags);
2663 2664
2664 s = get_slab(size, flags); 2665 s = get_slab(size, flags);
@@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2687{
2687 struct kmem_cache *s; 2688 struct kmem_cache *s;
2688 2689
2689 if (unlikely(size > PAGE_SIZE)) 2690 if (unlikely(size > SLUB_MAX_SIZE))
2690 return kmalloc_large_node(size, flags, node); 2691 return kmalloc_large_node(size, flags, node);
2691 2692
2692 s = get_slab(size, flags); 2693 s = get_slab(size, flags);
@@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void)
2986 caches++; 2987 caches++;
2987 } 2988 }
2988 2989
2989 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 2990 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
2990 create_kmalloc_cache(&kmalloc_caches[i], 2991 create_kmalloc_cache(&kmalloc_caches[i],
2991 "kmalloc", 1 << i, GFP_KERNEL); 2992 "kmalloc", 1 << i, GFP_KERNEL);
2992 caches++; 2993 caches++;
@@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void)
3023 slab_state = UP; 3024 slab_state = UP;
3024 3025
3025 /* Provide the correct kmalloc names now that the caches are up */ 3026 /* Provide the correct kmalloc names now that the caches are up */
3026 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3027 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3027 kmalloc_caches[i]. name = 3028 kmalloc_caches[i]. name =
3028 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3029 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3029 3030
@@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3223{ 3224{
3224 struct kmem_cache *s; 3225 struct kmem_cache *s;
3225 3226
3226 if (unlikely(size > PAGE_SIZE)) 3227 if (unlikely(size > SLUB_MAX_SIZE))
3227 return kmalloc_large(size, gfpflags); 3228 return kmalloc_large(size, gfpflags);
3228 3229
3229 s = get_slab(size, gfpflags); 3230 s = get_slab(size, gfpflags);
@@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3239{ 3240{
3240 struct kmem_cache *s; 3241 struct kmem_cache *s;
3241 3242
3242 if (unlikely(size > PAGE_SIZE)) 3243 if (unlikely(size > SLUB_MAX_SIZE))
3243 return kmalloc_large_node(size, gfpflags, node); 3244 return kmalloc_large_node(size, gfpflags, node);
3244 3245
3245 s = get_slab(size, gfpflags); 3246 s = get_slab(size, gfpflags);
@@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
3836} 3837}
3837SLAB_ATTR(order); 3838SLAB_ATTR(order);
3838 3839
3840static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3841{
3842 return sprintf(buf, "%lu\n", s->min_partial);
3843}
3844
3845static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3846 size_t length)
3847{
3848 unsigned long min;
3849 int err;
3850
3851 err = strict_strtoul(buf, 10, &min);
3852 if (err)
3853 return err;
3854
3855 set_min_partial(s, min);
3856 return length;
3857}
3858SLAB_ATTR(min_partial);
3859
3839static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3860static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3840{ 3861{
3841 if (s->ctor) { 3862 if (s->ctor) {
@@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = {
4151 &object_size_attr.attr, 4172 &object_size_attr.attr,
4152 &objs_per_slab_attr.attr, 4173 &objs_per_slab_attr.attr,
4153 &order_attr.attr, 4174 &order_attr.attr,
4175 &min_partial_attr.attr,
4154 &objects_attr.attr, 4176 &objects_attr.attr,
4155 &objects_partial_attr.attr, 4177 &objects_partial_attr.attr,
4156 &total_objects_attr.attr, 4178 &total_objects_attr.attr,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 592bb9619f75..1cdbf0b05727 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1262,7 +1262,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1262 * Move the pages to the [file or anon] inactive list. 1262 * Move the pages to the [file or anon] inactive list.
1263 */ 1263 */
1264 pagevec_init(&pvec, 1); 1264 pagevec_init(&pvec, 1);
1265 pgmoved = 0;
1266 lru = LRU_BASE + file * LRU_FILE; 1265 lru = LRU_BASE + file * LRU_FILE;
1267 1266
1268 spin_lock_irq(&zone->lru_lock); 1267 spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1273,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1274 */ 1273 */
1275 reclaim_stat->recent_rotated[!!file] += pgmoved; 1274 reclaim_stat->recent_rotated[!!file] += pgmoved;
1276 1275
1276 pgmoved = 0;
1277 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1278 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);
1279 prefetchw_prev_lru_page(page, &l_inactive, flags); 1279 prefetchw_prev_lru_page(page, &l_inactive, flags);