aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-05-17 11:36:59 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-05-17 11:36:59 -0400
commitdd504ea16f34a29da4aa933ae7ab917fcfd25fd7 (patch)
tree0502645dc159be29c33c992e9e56dc3156074279
parent5cf4cf65a8ccca44ec9b357ebdb2b517269d7e8a (diff)
parent0479ea0eab197b3e5d4c731f526c02e5e3fbfbd0 (diff)
Merge branch 'master' of /home/trondmy/repositories/git/linux-2.6/
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl123
-rw-r--r--Documentation/gpio.txt8
-rw-r--r--Documentation/vm/slabinfo.c17
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/blackfin/Kconfig8
-rw-r--r--arch/frv/Kconfig8
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/i386/kernel/smp.c2
-rw-r--r--arch/m68knommu/Kconfig8
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/v850/Kconfig8
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/serial/8250.c21
-rw-r--r--drivers/serial/icom.c55
-rw-r--r--drivers/video/console/vgacon.c9
-rw-r--r--fs/adfs/super.c3
-rw-r--r--fs/affs/super.c8
-rw-r--r--fs/afs/super.c20
-rw-r--r--fs/befs/linuxvfs.c6
-rw-r--r--fs/bfs/inode.c3
-rw-r--r--fs/block_dev.c16
-rw-r--r--fs/buffer.c25
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/coda/inode.c3
-rw-r--r--fs/compat.c13
-rw-r--r--fs/dquot.c2
-rw-r--r--fs/ecryptfs/main.c3
-rw-r--r--fs/ecryptfs/mmap.c14
-rw-r--r--fs/efs/super.c3
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext2/super.c8
-rw-r--r--fs/ext3/super.c10
-rw-r--r--fs/ext4/super.c10
-rw-r--r--fs/fat/cache.c3
-rw-r--r--fs/fat/inode.c14
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/gfs2/main.c34
-rw-r--r--fs/hfs/super.c3
-rw-r--r--fs/hfsplus/super.c3
-rw-r--r--fs/hpfs/super.c8
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/inode.c3
-rw-r--r--fs/isofs/inode.c3
-rw-r--r--fs/jffs2/super.c6
-rw-r--r--fs/jfs/jfs_metapage.c18
-rw-r--r--fs/jfs/super.c22
-rw-r--r--fs/locks.c3
-rw-r--r--fs/minix/inode.c3
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/nfs/inode.c28
-rw-r--r--fs/ntfs/super.c3
-rw-r--r--fs/ocfs2/dlm/dlmfs.c8
-rw-r--r--fs/ocfs2/super.c38
-rw-r--r--fs/openpromfs/inode.c3
-rw-r--r--fs/proc/inode.c3
-rw-r--r--fs/qnx4/inode.c3
-rw-r--r--fs/quota.c23
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/romfs/inode.c7
-rw-r--r--fs/smbfs/inode.c3
-rw-r--r--fs/sysv/inode.c3
-rw-r--r--fs/udf/super.c6
-rw-r--r--fs/ufs/super.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--include/acpi/acpi_numa.h7
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/kmalloc_sizes.h20
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/rmap.h13
-rw-r--r--include/linux/slab.h21
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h27
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--init/Kconfig8
-rw-r--r--ipc/mqueue.c3
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/power/disk.c3
-rw-r--r--kernel/power/main.c4
-rw-r--r--kernel/sysctl.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/rmap.c66
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c57
-rw-r--r--mm/slob.c53
-rw-r--r--mm/slub.c234
-rw-r--r--mm/vmalloc.c2
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/rpc_pipe.c24
94 files changed, 647 insertions, 669 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 644c3884fab..0a441f73261 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -551,10 +551,12 @@
551 <function>spin_lock_irqsave()</function>, which is a superset 551 <function>spin_lock_irqsave()</function>, which is a superset
552 of all other spinlock primitives. 552 of all other spinlock primitives.
553 </para> 553 </para>
554
554 <table> 555 <table>
555<title>Table of Locking Requirements</title> 556<title>Table of Locking Requirements</title>
556<tgroup cols="11"> 557<tgroup cols="11">
557<tbody> 558<tbody>
559
558<row> 560<row>
559<entry></entry> 561<entry></entry>
560<entry>IRQ Handler A</entry> 562<entry>IRQ Handler A</entry>
@@ -576,97 +578,128 @@
576 578
577<row> 579<row>
578<entry>IRQ Handler B</entry> 580<entry>IRQ Handler B</entry>
579<entry>spin_lock_irqsave</entry> 581<entry>SLIS</entry>
580<entry>None</entry> 582<entry>None</entry>
581</row> 583</row>
582 584
583<row> 585<row>
584<entry>Softirq A</entry> 586<entry>Softirq A</entry>
585<entry>spin_lock_irq</entry> 587<entry>SLI</entry>
586<entry>spin_lock_irq</entry> 588<entry>SLI</entry>
587<entry>spin_lock</entry> 589<entry>SL</entry>
588</row> 590</row>
589 591
590<row> 592<row>
591<entry>Softirq B</entry> 593<entry>Softirq B</entry>
592<entry>spin_lock_irq</entry> 594<entry>SLI</entry>
593<entry>spin_lock_irq</entry> 595<entry>SLI</entry>
594<entry>spin_lock</entry> 596<entry>SL</entry>
595<entry>spin_lock</entry> 597<entry>SL</entry>
596</row> 598</row>
597 599
598<row> 600<row>
599<entry>Tasklet A</entry> 601<entry>Tasklet A</entry>
600<entry>spin_lock_irq</entry> 602<entry>SLI</entry>
601<entry>spin_lock_irq</entry> 603<entry>SLI</entry>
602<entry>spin_lock</entry> 604<entry>SL</entry>
603<entry>spin_lock</entry> 605<entry>SL</entry>
604<entry>None</entry> 606<entry>None</entry>
605</row> 607</row>
606 608
607<row> 609<row>
608<entry>Tasklet B</entry> 610<entry>Tasklet B</entry>
609<entry>spin_lock_irq</entry> 611<entry>SLI</entry>
610<entry>spin_lock_irq</entry> 612<entry>SLI</entry>
611<entry>spin_lock</entry> 613<entry>SL</entry>
612<entry>spin_lock</entry> 614<entry>SL</entry>
613<entry>spin_lock</entry> 615<entry>SL</entry>
614<entry>None</entry> 616<entry>None</entry>
615</row> 617</row>
616 618
617<row> 619<row>
618<entry>Timer A</entry> 620<entry>Timer A</entry>
619<entry>spin_lock_irq</entry> 621<entry>SLI</entry>
620<entry>spin_lock_irq</entry> 622<entry>SLI</entry>
621<entry>spin_lock</entry> 623<entry>SL</entry>
622<entry>spin_lock</entry> 624<entry>SL</entry>
623<entry>spin_lock</entry> 625<entry>SL</entry>
624<entry>spin_lock</entry> 626<entry>SL</entry>
625<entry>None</entry> 627<entry>None</entry>
626</row> 628</row>
627 629
628<row> 630<row>
629<entry>Timer B</entry> 631<entry>Timer B</entry>
630<entry>spin_lock_irq</entry> 632<entry>SLI</entry>
631<entry>spin_lock_irq</entry> 633<entry>SLI</entry>
632<entry>spin_lock</entry> 634<entry>SL</entry>
633<entry>spin_lock</entry> 635<entry>SL</entry>
634<entry>spin_lock</entry> 636<entry>SL</entry>
635<entry>spin_lock</entry> 637<entry>SL</entry>
636<entry>spin_lock</entry> 638<entry>SL</entry>
637<entry>None</entry> 639<entry>None</entry>
638</row> 640</row>
639 641
640<row> 642<row>
641<entry>User Context A</entry> 643<entry>User Context A</entry>
642<entry>spin_lock_irq</entry> 644<entry>SLI</entry>
643<entry>spin_lock_irq</entry> 645<entry>SLI</entry>
644<entry>spin_lock_bh</entry> 646<entry>SLBH</entry>
645<entry>spin_lock_bh</entry> 647<entry>SLBH</entry>
646<entry>spin_lock_bh</entry> 648<entry>SLBH</entry>
647<entry>spin_lock_bh</entry> 649<entry>SLBH</entry>
648<entry>spin_lock_bh</entry> 650<entry>SLBH</entry>
649<entry>spin_lock_bh</entry> 651<entry>SLBH</entry>
650<entry>None</entry> 652<entry>None</entry>
651</row> 653</row>
652 654
653<row> 655<row>
654<entry>User Context B</entry> 656<entry>User Context B</entry>
657<entry>SLI</entry>
658<entry>SLI</entry>
659<entry>SLBH</entry>
660<entry>SLBH</entry>
661<entry>SLBH</entry>
662<entry>SLBH</entry>
663<entry>SLBH</entry>
664<entry>SLBH</entry>
665<entry>DI</entry>
666<entry>None</entry>
667</row>
668
669</tbody>
670</tgroup>
671</table>
672
673 <table>
674<title>Legend for Locking Requirements Table</title>
675<tgroup cols="2">
676<tbody>
677
678<row>
679<entry>SLIS</entry>
680<entry>spin_lock_irqsave</entry>
681</row>
682<row>
683<entry>SLI</entry>
655<entry>spin_lock_irq</entry> 684<entry>spin_lock_irq</entry>
656<entry>spin_lock_irq</entry> 685</row>
657<entry>spin_lock_bh</entry> 686<row>
658<entry>spin_lock_bh</entry> 687<entry>SL</entry>
659<entry>spin_lock_bh</entry> 688<entry>spin_lock</entry>
660<entry>spin_lock_bh</entry> 689</row>
661<entry>spin_lock_bh</entry> 690<row>
691<entry>SLBH</entry>
662<entry>spin_lock_bh</entry> 692<entry>spin_lock_bh</entry>
693</row>
694<row>
695<entry>DI</entry>
663<entry>down_interruptible</entry> 696<entry>down_interruptible</entry>
664<entry>None</entry>
665</row> 697</row>
666 698
667</tbody> 699</tbody>
668</tgroup> 700</tgroup>
669</table> 701</table>
702
670</sect1> 703</sect1>
671</chapter> 704</chapter>
672 705
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index e8be0abb346..36af58eba13 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -111,7 +111,9 @@ setting up a platform_device using the GPIO, is mark its direction:
111 111
112The return value is zero for success, else a negative errno. It should 112The return value is zero for success, else a negative errno. It should
113be checked, since the get/set calls don't have error returns and since 113be checked, since the get/set calls don't have error returns and since
114misconfiguration is possible. (These calls could sleep.) 114misconfiguration is possible. You should normally issue these calls from
115a task context. However, for spinlock-safe GPIOs it's OK to use them
116before tasking is enabled, as part of early board setup.
115 117
116For output GPIOs, the value provided becomes the initial output value. 118For output GPIOs, the value provided becomes the initial output value.
117This helps avoid signal glitching during system startup. 119This helps avoid signal glitching during system startup.
@@ -197,7 +199,9 @@ However, many platforms don't currently support this mechanism.
197 199
198Passing invalid GPIO numbers to gpio_request() will fail, as will requesting 200Passing invalid GPIO numbers to gpio_request() will fail, as will requesting
199GPIOs that have already been claimed with that call. The return value of 201GPIOs that have already been claimed with that call. The return value of
200gpio_request() must be checked. (These calls could sleep.) 202gpio_request() must be checked. You should normally issue these calls from
203a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs
204before tasking is enabled, as part of early board setup.
201 205
202These calls serve two basic purposes. One is marking the signals which 206These calls serve two basic purposes. One is marking the signals which
203are actually in use as GPIOs, for better diagnostics; systems may have 207are actually in use as GPIOs, for better diagnostics; systems may have
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index 686a8e04a4f..434af27a32a 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -242,6 +242,9 @@ void decode_numa_list(int *numa, char *t)
242 242
243 memset(numa, 0, MAX_NODES * sizeof(int)); 243 memset(numa, 0, MAX_NODES * sizeof(int));
244 244
245 if (!t)
246 return;
247
245 while (*t == 'N') { 248 while (*t == 'N') {
246 t++; 249 t++;
247 node = strtoul(t, &t, 10); 250 node = strtoul(t, &t, 10);
@@ -386,7 +389,9 @@ void report(struct slabinfo *s)
386{ 389{
387 if (strcmp(s->name, "*") == 0) 390 if (strcmp(s->name, "*") == 0)
388 return; 391 return;
389 printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order); 392
393 printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n",
394 s->name, s->aliases, s->order, s->objects);
390 if (s->hwcache_align) 395 if (s->hwcache_align)
391 printf("** Hardware cacheline aligned\n"); 396 printf("** Hardware cacheline aligned\n");
392 if (s->cache_dma) 397 if (s->cache_dma)
@@ -791,11 +796,11 @@ void totals(void)
791 796
792 store_size(b1, total_size);store_size(b2, total_waste); 797 store_size(b1, total_size);store_size(b2, total_waste);
793 store_size(b3, total_waste * 100 / total_used); 798 store_size(b3, total_waste * 100 / total_used);
794 printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3); 799 printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3);
795 800
796 store_size(b1, total_objects);store_size(b2, total_partobj); 801 store_size(b1, total_objects);store_size(b2, total_partobj);
797 store_size(b3, total_partobj * 100 / total_objects); 802 store_size(b3, total_partobj * 100 / total_objects);
798 printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3); 803 printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3);
799 804
800 printf("\n"); 805 printf("\n");
801 printf("Per Cache Average Min Max Total\n"); 806 printf("Per Cache Average Min Max Total\n");
@@ -818,7 +823,7 @@ void totals(void)
818 store_size(b1, avg_ppart);store_size(b2, min_ppart); 823 store_size(b1, avg_ppart);store_size(b2, min_ppart);
819 store_size(b3, max_ppart); 824 store_size(b3, max_ppart);
820 store_size(b4, total_partial * 100 / total_slabs); 825 store_size(b4, total_partial * 100 / total_slabs);
821 printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n", 826 printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n",
822 b1, b2, b3, b4); 827 b1, b2, b3, b4);
823 828
824 store_size(b1, avg_partobj);store_size(b2, min_partobj); 829 store_size(b1, avg_partobj);store_size(b2, min_partobj);
@@ -830,7 +835,7 @@ void totals(void)
830 store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); 835 store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
831 store_size(b3, max_ppartobj); 836 store_size(b3, max_ppartobj);
832 store_size(b4, total_partobj * 100 / total_objects); 837 store_size(b4, total_partobj * 100 / total_objects);
833 printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n", 838 printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n",
834 b1, b2, b3, b4); 839 b1, b2, b3, b4);
835 840
836 store_size(b1, avg_size);store_size(b2, min_size); 841 store_size(b1, avg_size);store_size(b2, min_size);
@@ -1100,6 +1105,8 @@ void output_slabs(void)
1100 ops(slab); 1105 ops(slab);
1101 else if (show_slab) 1106 else if (show_slab)
1102 slabcache(slab); 1107 slabcache(slab);
1108 else if (show_report)
1109 report(slab);
1103 } 1110 }
1104} 1111}
1105 1112
diff --git a/MAINTAINERS b/MAINTAINERS
index bbeb5b6b5b0..4c3277cb925 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2689,13 +2689,13 @@ L: i2c@lm-sensors.org
2689S: Maintained 2689S: Maintained
2690 2690
2691PARALLEL PORT SUPPORT 2691PARALLEL PORT SUPPORT
2692L: linux-parport@lists.infradead.org 2692L: linux-parport@lists.infradead.org (subscribers-only)
2693S: Orphan 2693S: Orphan
2694 2694
2695PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 2695PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
2696P: Tim Waugh 2696P: Tim Waugh
2697M: tim@cyberelk.net 2697M: tim@cyberelk.net
2698L: linux-parport@lists.infradead.org 2698L: linux-parport@lists.infradead.org (subscribers-only)
2699W: http://www.torque.net/linux-pp.html 2699W: http://www.torque.net/linux-pp.html
2700S: Maintained 2700S: Maintained
2701 2701
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 1a493050932..d80e5b1d686 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -560,14 +560,6 @@ endchoice
560 560
561source "mm/Kconfig" 561source "mm/Kconfig"
562 562
563config LARGE_ALLOCS
564 bool "Allow allocating large blocks (> 1MB) of memory"
565 help
566 Allow the slab memory allocator to keep chains for very large
567 memory sizes - upto 32MB. You may need this if your system has
568 a lot of RAM, and you need to able to allocate very large
569 contiguous chunks. If unsure, say N.
570
571config BFIN_DMA_5XX 563config BFIN_DMA_5XX
572 bool "Enable DMA Support" 564 bool "Enable DMA Support"
573 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561) 565 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 114738a4558..74eef7111f2 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -102,14 +102,6 @@ config HIGHPTE
102 with a lot of RAM, this can be wasteful of precious low memory. 102 with a lot of RAM, this can be wasteful of precious low memory.
103 Setting this option will put user-space page tables in high memory. 103 Setting this option will put user-space page tables in high memory.
104 104
105config LARGE_ALLOCS
106 bool "Allow allocating large blocks (> 1MB) of memory"
107 help
108 Allow the slab memory allocator to keep chains for very large memory
109 sizes - up to 32MB. You may need this if your system has a lot of
110 RAM, and you need to able to allocate very large contiguous chunks.
111 If unsure, say N.
112
113source "mm/Kconfig" 105source "mm/Kconfig"
114 106
115choice 107choice
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 5367e32e040..c4ebb5126ef 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -78,7 +78,7 @@ static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type*
78} 78}
79 79
80/* Grab all of the MTRR state for this CPU into *state */ 80/* Grab all of the MTRR state for this CPU into *state */
81void __init get_mtrr_state(void) 81void get_mtrr_state(void)
82{ 82{
83 unsigned int i; 83 unsigned int i;
84 struct mtrr_var_range *vrs; 84 struct mtrr_var_range *vrs;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 02a2f39e5e0..1cf466df330 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -639,7 +639,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
639 * initialized (i.e. before smp_init()). 639 * initialized (i.e. before smp_init()).
640 * 640 *
641 */ 641 */
642void __init mtrr_bp_init(void) 642void mtrr_bp_init(void)
643{ 643{
644 init_ifs(); 644 init_ifs();
645 645
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index c9a7c9835ab..6299c080f6e 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm)
421 } 421 }
422 if (!cpus_empty(cpu_mask)) 422 if (!cpus_empty(cpu_mask))
423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
424 check_pgt_cache(); 424
425 preempt_enable(); 425 preempt_enable();
426} 426}
427 427
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 823f73736bb..adc64a2bafb 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -470,14 +470,6 @@ config AVNET
470 default y 470 default y
471 depends on (AVNET5282) 471 depends on (AVNET5282)
472 472
473config LARGE_ALLOCS
474 bool "Allow allocating large blocks (> 1MB) of memory"
475 help
476 Allow the slab memory allocator to keep chains for very large
477 memory sizes - upto 32MB. You may need this if your system has
478 a lot of RAM, and you need to able to allocate very large
479 contiguous chunks. If unsure, say N.
480
481config 4KSTACKS 473config 4KSTACKS
482 bool "Use 4Kb for kernel stacks instead of 8Kb" 474 bool "Use 4Kb for kernel stacks instead of 8Kb"
483 default y 475 default y
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index a93f328a731..7150730e2ff 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,9 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
71{ 71{
72 struct spufs_inode_info *ei = p; 72 struct spufs_inode_info *ei = p;
73 73
74 if (flags & SLAB_CTOR_CONSTRUCTOR) { 74 inode_init_once(&ei->vfs_inode);
75 inode_init_once(&ei->vfs_inode);
76 }
77} 75}
78 76
79static struct inode * 77static struct inode *
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index 5f54c1236c1..ace479ab273 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -240,14 +240,6 @@ menu "Processor type and features"
240 config RESET_GUARD 240 config RESET_GUARD
241 bool "Reset Guard" 241 bool "Reset Guard"
242 242
243 config LARGE_ALLOCS
244 bool "Allow allocating large blocks (> 1MB) of memory"
245 help
246 Allow the slab memory allocator to keep chains for very large
247 memory sizes - upto 32MB. You may need this if your system has
248 a lot of RAM, and you need to able to allocate very large
249 contiguous chunks. If unsure, say N.
250
251source "mm/Kconfig" 243source "mm/Kconfig"
252 244
253endmenu 245endmenu
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 8fcd6a15517..a2efae8a4c4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -40,19 +40,19 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE;
40#define NID_INVAL -1 40#define NID_INVAL -1
41 41
42/* maps to convert between proximity domain and logical node ID */ 42/* maps to convert between proximity domain and logical node ID */
43int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS] 43static int pxm_to_node_map[MAX_PXM_DOMAINS]
44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; 44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45static int node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48int __cpuinit pxm_to_node(int pxm) 48int pxm_to_node(int pxm)
49{ 49{
50 if (pxm < 0) 50 if (pxm < 0)
51 return NID_INVAL; 51 return NID_INVAL;
52 return pxm_to_node_map[pxm]; 52 return pxm_to_node_map[pxm];
53} 53}
54 54
55int __cpuinit node_to_pxm(int node) 55int node_to_pxm(int node)
56{ 56{
57 if (node < 0) 57 if (node < 0)
58 return PXM_INVAL; 58 return PXM_INVAL;
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 3dba5733ed1..74002945b71 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -940,9 +940,6 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
940{ 940{
941 struct ltree_entry *le = obj; 941 struct ltree_entry *le = obj;
942 942
943 if (flags & SLAB_CTOR_CONSTRUCTOR)
944 return;
945
946 le->users = 0; 943 le->users = 0;
947 init_rwsem(&le->mutex); 944 init_rwsem(&le->mutex);
948} 945}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 95ce8f49e38..4e4c10a7fd3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -59,7 +59,7 @@ comment "RTC interfaces"
59 depends on RTC_CLASS 59 depends on RTC_CLASS
60 60
61config RTC_INTF_SYSFS 61config RTC_INTF_SYSFS
62 boolean "sysfs" 62 boolean "/sys/class/rtc/rtcN (sysfs)"
63 depends on RTC_CLASS && SYSFS 63 depends on RTC_CLASS && SYSFS
64 default RTC_CLASS 64 default RTC_CLASS
65 help 65 help
@@ -70,7 +70,7 @@ config RTC_INTF_SYSFS
70 will be called rtc-sysfs. 70 will be called rtc-sysfs.
71 71
72config RTC_INTF_PROC 72config RTC_INTF_PROC
73 boolean "proc" 73 boolean "/proc/driver/rtc (procfs for rtc0)"
74 depends on RTC_CLASS && PROC_FS 74 depends on RTC_CLASS && PROC_FS
75 default RTC_CLASS 75 default RTC_CLASS
76 help 76 help
@@ -82,7 +82,7 @@ config RTC_INTF_PROC
82 will be called rtc-proc. 82 will be called rtc-proc.
83 83
84config RTC_INTF_DEV 84config RTC_INTF_DEV
85 boolean "dev" 85 boolean "/dev/rtcN (character devices)"
86 depends on RTC_CLASS 86 depends on RTC_CLASS
87 default RTC_CLASS 87 default RTC_CLASS
88 help 88 help
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 60a8a4bb8bd..a2f84f16958 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -371,7 +371,7 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev)
371 goto fail; 371 goto fail;
372 } 372 }
373 platform_set_drvdata(pdev, rtc); 373 platform_set_drvdata(pdev, rtc);
374 dev_set_devdata(&rtc->dev, mem); 374 dev_set_drvdata(&rtc->dev, mem);
375 375
376 /* clear pending irqs, and set 1/second periodic, 376 /* clear pending irqs, and set 1/second periodic,
377 * which we'll use instead of update irqs 377 * which we'll use instead of update irqs
@@ -453,7 +453,7 @@ static int __devexit omap_rtc_remove(struct platform_device *pdev)
453 free_irq(omap_rtc_timer, rtc); 453 free_irq(omap_rtc_timer, rtc);
454 free_irq(omap_rtc_alarm, rtc); 454 free_irq(omap_rtc_alarm, rtc);
455 455
456 release_resource(dev_get_devdata(&rtc->dev)); 456 release_resource(dev_get_drvdata(&rtc->dev));
457 rtc_device_unregister(rtc); 457 rtc_device_unregister(rtc);
458 return 0; 458 return 0;
459} 459}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 48e259a0167..c84dab083a8 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -894,7 +894,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
894 quot = serial_dl_read(up); 894 quot = serial_dl_read(up);
895 quot <<= 3; 895 quot <<= 3;
896 896
897 status1 = serial_in(up, 0x04); /* EXCR1 */ 897 status1 = serial_in(up, 0x04); /* EXCR2 */
898 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 898 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
899 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 899 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
900 serial_outp(up, 0x04, status1); 900 serial_outp(up, 0x04, status1);
@@ -2617,7 +2617,22 @@ void serial8250_suspend_port(int line)
2617 */ 2617 */
2618void serial8250_resume_port(int line) 2618void serial8250_resume_port(int line)
2619{ 2619{
2620 uart_resume_port(&serial8250_reg, &serial8250_ports[line].port); 2620 struct uart_8250_port *up = &serial8250_ports[line];
2621
2622 if (up->capabilities & UART_NATSEMI) {
2623 unsigned char tmp;
2624
2625 /* Ensure it's still in high speed mode */
2626 serial_outp(up, UART_LCR, 0xE0);
2627
2628 tmp = serial_in(up, 0x04); /* EXCR2 */
2629 tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
2630 tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
2631 serial_outp(up, 0x04, tmp);
2632
2633 serial_outp(up, UART_LCR, 0);
2634 }
2635 uart_resume_port(&serial8250_reg, &up->port);
2621} 2636}
2622 2637
2623/* 2638/*
@@ -2694,7 +2709,7 @@ static int serial8250_resume(struct platform_device *dev)
2694 struct uart_8250_port *up = &serial8250_ports[i]; 2709 struct uart_8250_port *up = &serial8250_ports[i];
2695 2710
2696 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) 2711 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
2697 uart_resume_port(&serial8250_reg, &up->port); 2712 serial8250_resume_port(i);
2698 } 2713 }
2699 2714
2700 return 0; 2715 return 0;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 6202995e821..9d3105b64a7 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -69,33 +69,40 @@
69 69
70static const struct pci_device_id icom_pci_table[] = { 70static const struct pci_device_id icom_pci_table[] = {
71 { 71 {
72 .vendor = PCI_VENDOR_ID_IBM, 72 .vendor = PCI_VENDOR_ID_IBM,
73 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, 73 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
74 .subvendor = PCI_ANY_ID, 74 .subvendor = PCI_ANY_ID,
75 .subdevice = PCI_ANY_ID, 75 .subdevice = PCI_ANY_ID,
76 .driver_data = ADAPTER_V1, 76 .driver_data = ADAPTER_V1,
77 }, 77 },
78 { 78 {
79 .vendor = PCI_VENDOR_ID_IBM, 79 .vendor = PCI_VENDOR_ID_IBM,
80 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 80 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
81 .subvendor = PCI_VENDOR_ID_IBM, 81 .subvendor = PCI_VENDOR_ID_IBM,
82 .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, 82 .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
83 .driver_data = ADAPTER_V2, 83 .driver_data = ADAPTER_V2,
84 }, 84 },
85 { 85 {
86 .vendor = PCI_VENDOR_ID_IBM, 86 .vendor = PCI_VENDOR_ID_IBM,
87 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 87 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
88 .subvendor = PCI_VENDOR_ID_IBM, 88 .subvendor = PCI_VENDOR_ID_IBM,
89 .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, 89 .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
90 .driver_data = ADAPTER_V2, 90 .driver_data = ADAPTER_V2,
91 }, 91 },
92 { 92 {
93 .vendor = PCI_VENDOR_ID_IBM, 93 .vendor = PCI_VENDOR_ID_IBM,
94 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 94 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
95 .subvendor = PCI_VENDOR_ID_IBM, 95 .subvendor = PCI_VENDOR_ID_IBM,
96 .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, 96 .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
97 .driver_data = ADAPTER_V2, 97 .driver_data = ADAPTER_V2,
98 }, 98 },
99 {
100 .vendor = PCI_VENDOR_ID_IBM,
101 .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
102 .subvendor = PCI_VENDOR_ID_IBM,
103 .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE,
104 .driver_data = ADAPTER_V2,
105 },
99 {} 106 {}
100}; 107};
101 108
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 2460b82a1d9..f46fe95f69f 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -368,9 +368,14 @@ static const char *vgacon_startup(void)
368#endif 368#endif
369 } 369 }
370 370
371 /* SCREEN_INFO initialized? */
372 if ((ORIG_VIDEO_MODE == 0) &&
373 (ORIG_VIDEO_LINES == 0) &&
374 (ORIG_VIDEO_COLS == 0))
375 goto no_vga;
376
371 /* VGA16 modes are not handled by VGACON */ 377 /* VGA16 modes are not handled by VGACON */
372 if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */ 378 if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
373 (ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
374 (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ 379 (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */
375 (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ 380 (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */
376 (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */ 381 (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 30c29650849..de2ed5ca335 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
232{ 232{
233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
234 234
235 if (flags & SLAB_CTOR_CONSTRUCTOR) 235 inode_init_once(&ei->vfs_inode);
236 inode_init_once(&ei->vfs_inode);
237} 236}
238 237
239static int init_inodecache(void) 238static int init_inodecache(void)
diff --git a/fs/affs/super.c b/fs/affs/super.c
index beff7d21e6e..b800d451cd6 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -87,11 +87,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
87{ 87{
88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo;
89 89
90 if (flags & SLAB_CTOR_CONSTRUCTOR) { 90 init_MUTEX(&ei->i_link_lock);
91 init_MUTEX(&ei->i_link_lock); 91 init_MUTEX(&ei->i_ext_lock);
92 init_MUTEX(&ei->i_ext_lock); 92 inode_init_once(&ei->vfs_inode);
93 inode_init_once(&ei->vfs_inode);
94 }
95} 93}
96 94
97static int init_inodecache(void) 95static int init_inodecache(void)
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 370cecc910d..8d47ad88a09 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -451,17 +451,15 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
451{ 451{
452 struct afs_vnode *vnode = _vnode; 452 struct afs_vnode *vnode = _vnode;
453 453
454 if (flags & SLAB_CTOR_CONSTRUCTOR) { 454 memset(vnode, 0, sizeof(*vnode));
455 memset(vnode, 0, sizeof(*vnode)); 455 inode_init_once(&vnode->vfs_inode);
456 inode_init_once(&vnode->vfs_inode); 456 init_waitqueue_head(&vnode->update_waitq);
457 init_waitqueue_head(&vnode->update_waitq); 457 mutex_init(&vnode->permits_lock);
458 mutex_init(&vnode->permits_lock); 458 mutex_init(&vnode->validate_lock);
459 mutex_init(&vnode->validate_lock); 459 spin_lock_init(&vnode->writeback_lock);
460 spin_lock_init(&vnode->writeback_lock); 460 spin_lock_init(&vnode->lock);
461 spin_lock_init(&vnode->lock); 461 INIT_LIST_HEAD(&vnode->writebacks);
462 INIT_LIST_HEAD(&vnode->writebacks); 462 INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
463 INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
464 }
465} 463}
466 464
467/* 465/*
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index fe96108a788..a5c5171c282 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -292,10 +292,8 @@ befs_destroy_inode(struct inode *inode)
292static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 292static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
293{ 293{
294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo;
295 295
296 if (flags & SLAB_CTOR_CONSTRUCTOR) { 296 inode_init_once(&bi->vfs_inode);
297 inode_init_once(&bi->vfs_inode);
298 }
299} 297}
300 298
301static void 299static void
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index edc08d89aab..58c7bd9f530 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
248{ 248{
249 struct bfs_inode_info *bi = foo; 249 struct bfs_inode_info *bi = foo;
250 250
251 if (flags & SLAB_CTOR_CONSTRUCTOR) 251 inode_init_once(&bi->vfs_inode);
252 inode_init_once(&bi->vfs_inode);
253} 252}
254 253
255static int init_inodecache(void) 254static int init_inodecache(void)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 74289924087..ea1480a16f5 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -458,17 +458,15 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
458 struct bdev_inode *ei = (struct bdev_inode *) foo; 458 struct bdev_inode *ei = (struct bdev_inode *) foo;
459 struct block_device *bdev = &ei->bdev; 459 struct block_device *bdev = &ei->bdev;
460 460
461 if (flags & SLAB_CTOR_CONSTRUCTOR) { 461 memset(bdev, 0, sizeof(*bdev));
462 memset(bdev, 0, sizeof(*bdev)); 462 mutex_init(&bdev->bd_mutex);
463 mutex_init(&bdev->bd_mutex); 463 sema_init(&bdev->bd_mount_sem, 1);
464 sema_init(&bdev->bd_mount_sem, 1); 464 INIT_LIST_HEAD(&bdev->bd_inodes);
465 INIT_LIST_HEAD(&bdev->bd_inodes); 465 INIT_LIST_HEAD(&bdev->bd_list);
466 INIT_LIST_HEAD(&bdev->bd_list);
467#ifdef CONFIG_SYSFS 466#ifdef CONFIG_SYSFS
468 INIT_LIST_HEAD(&bdev->bd_holder_list); 467 INIT_LIST_HEAD(&bdev->bd_holder_list);
469#endif 468#endif
470 inode_init_once(&ei->vfs_inode); 469 inode_init_once(&ei->vfs_inode);
471 }
472} 470}
473 471
474static inline void __bd_forget(struct inode *inode) 472static inline void __bd_forget(struct inode *inode)
diff --git a/fs/buffer.c b/fs/buffer.c
index aecd057cd0e..49590d590d7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -981,7 +981,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
981 struct page *page; 981 struct page *page;
982 struct buffer_head *bh; 982 struct buffer_head *bh;
983 983
984 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 984 page = find_or_create_page(inode->i_mapping, index,
985 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
985 if (!page) 986 if (!page)
986 return NULL; 987 return NULL;
987 988
@@ -2898,8 +2899,9 @@ static void recalc_bh_state(void)
2898 2899
2899struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 2900struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2900{ 2901{
2901 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 2902 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2902 if (ret) { 2903 if (ret) {
2904 INIT_LIST_HEAD(&ret->b_assoc_buffers);
2903 get_cpu_var(bh_accounting).nr++; 2905 get_cpu_var(bh_accounting).nr++;
2904 recalc_bh_state(); 2906 recalc_bh_state();
2905 put_cpu_var(bh_accounting); 2907 put_cpu_var(bh_accounting);
@@ -2918,17 +2920,6 @@ void free_buffer_head(struct buffer_head *bh)
2918} 2920}
2919EXPORT_SYMBOL(free_buffer_head); 2921EXPORT_SYMBOL(free_buffer_head);
2920 2922
2921static void
2922init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
2923{
2924 if (flags & SLAB_CTOR_CONSTRUCTOR) {
2925 struct buffer_head * bh = (struct buffer_head *)data;
2926
2927 memset(bh, 0, sizeof(*bh));
2928 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2929 }
2930}
2931
2932static void buffer_exit_cpu(int cpu) 2923static void buffer_exit_cpu(int cpu)
2933{ 2924{
2934 int i; 2925 int i;
@@ -2955,12 +2946,8 @@ void __init buffer_init(void)
2955{ 2946{
2956 int nrpages; 2947 int nrpages;
2957 2948
2958 bh_cachep = kmem_cache_create("buffer_head", 2949 bh_cachep = KMEM_CACHE(buffer_head,
2959 sizeof(struct buffer_head), 0, 2950 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2960 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2961 SLAB_MEM_SPREAD),
2962 init_buffer_head,
2963 NULL);
2964 2951
2965 /* 2952 /*
2966 * Limit the bh occupancy to 10% of ZONE_NORMAL 2953 * Limit the bh occupancy to 10% of ZONE_NORMAL
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8568e100953..d38c69b591c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -701,10 +701,8 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
701{ 701{
702 struct cifsInodeInfo *cifsi = inode; 702 struct cifsInodeInfo *cifsi = inode;
703 703
704 if (flags & SLAB_CTOR_CONSTRUCTOR) { 704 inode_init_once(&cifsi->vfs_inode);
705 inode_init_once(&cifsi->vfs_inode); 705 INIT_LIST_HEAD(&cifsi->lockList);
706 INIT_LIST_HEAD(&cifsi->lockList);
707 }
708} 706}
709 707
710static int 708static int
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 0aaff3651d1..dbff1bd4fb9 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
62{ 62{
63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo;
64 64
65 if (flags & SLAB_CTOR_CONSTRUCTOR) 65 inode_init_once(&ei->vfs_inode);
66 inode_init_once(&ei->vfs_inode);
67} 66}
68 67
69int coda_init_inodecache(void) 68int coda_init_inodecache(void)
diff --git a/fs/compat.c b/fs/compat.c
index 7b21b0a8259..1de2331db84 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -2230,21 +2230,16 @@ asmlinkage long compat_sys_signalfd(int ufd,
2230asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, 2230asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
2231 const struct compat_itimerspec __user *utmr) 2231 const struct compat_itimerspec __user *utmr)
2232{ 2232{
2233 long res;
2234 struct itimerspec t; 2233 struct itimerspec t;
2235 struct itimerspec __user *ut; 2234 struct itimerspec __user *ut;
2236 2235
2237 res = -EFAULT;
2238 if (get_compat_itimerspec(&t, utmr)) 2236 if (get_compat_itimerspec(&t, utmr))
2239 goto err_exit; 2237 return -EFAULT;
2240 ut = compat_alloc_user_space(sizeof(*ut)); 2238 ut = compat_alloc_user_space(sizeof(*ut));
2241 if (copy_to_user(ut, &t, sizeof(t)) ) 2239 if (copy_to_user(ut, &t, sizeof(t)))
2242 goto err_exit; 2240 return -EFAULT;
2243 2241
2244 res = sys_timerfd(ufd, clockid, flags, ut); 2242 return sys_timerfd(ufd, clockid, flags, ut);
2245err_exit:
2246 return res;
2247} 2243}
2248 2244
2249#endif /* CONFIG_TIMERFD */ 2245#endif /* CONFIG_TIMERFD */
2250
diff --git a/fs/dquot.c b/fs/dquot.c
index 3a995841de9..8819d281500 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1421,7 +1421,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1421 /* If quota was reenabled in the meantime, we have 1421 /* If quota was reenabled in the meantime, we have
1422 * nothing to do */ 1422 * nothing to do */
1423 if (!sb_has_quota_enabled(sb, cnt)) { 1423 if (!sb_has_quota_enabled(sb, cnt)) {
1424 mutex_lock(&toputinode[cnt]->i_mutex); 1424 mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1426 S_NOATIME | S_NOQUOTA); 1426 S_NOATIME | S_NOQUOTA);
1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0); 1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8cbf3f69ebe..606128f5c92 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
583{ 583{
584 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; 584 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
585 585
586 if (flags & SLAB_CTOR_CONSTRUCTOR) 586 inode_init_once(&ei->vfs_inode);
587 inode_init_once(&ei->vfs_inode);
588} 587}
589 588
590static struct ecryptfs_cache_info { 589static struct ecryptfs_cache_info {
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 0770c4b66f5..88ea6697908 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -364,18 +364,14 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
364{ 364{
365 struct inode *inode = page->mapping->host; 365 struct inode *inode = page->mapping->host;
366 int end_byte_in_page; 366 int end_byte_in_page;
367 char *page_virt;
368 367
369 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 368 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
370 goto out; 369 goto out;
371 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 370 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
372 if (to > end_byte_in_page) 371 if (to > end_byte_in_page)
373 end_byte_in_page = to; 372 end_byte_in_page = to;
374 page_virt = kmap_atomic(page, KM_USER0); 373 zero_user_page(page, end_byte_in_page,
375 memset((page_virt + end_byte_in_page), 0, 374 PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
376 (PAGE_CACHE_SIZE - end_byte_in_page));
377 kunmap_atomic(page_virt, KM_USER0);
378 flush_dcache_page(page);
379out: 375out:
380 return 0; 376 return 0;
381} 377}
@@ -740,7 +736,6 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
740{ 736{
741 int rc = 0; 737 int rc = 0;
742 struct page *tmp_page; 738 struct page *tmp_page;
743 char *tmp_page_virt;
744 739
745 tmp_page = ecryptfs_get1page(file, index); 740 tmp_page = ecryptfs_get1page(file, index);
746 if (IS_ERR(tmp_page)) { 741 if (IS_ERR(tmp_page)) {
@@ -757,10 +752,7 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
757 page_cache_release(tmp_page); 752 page_cache_release(tmp_page);
758 goto out; 753 goto out;
759 } 754 }
760 tmp_page_virt = kmap_atomic(tmp_page, KM_USER0); 755 zero_user_page(tmp_page, start, num_zeros, KM_USER0);
761 memset(((char *)tmp_page_virt + start), 0, num_zeros);
762 kunmap_atomic(tmp_page_virt, KM_USER0);
763 flush_dcache_page(tmp_page);
764 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); 756 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros);
765 if (rc < 0) { 757 if (rc < 0) {
766 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's " 758 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
diff --git a/fs/efs/super.c b/fs/efs/super.c
index ba7a8b9da0c..e0a6839e68a 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
72{ 72{
73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo;
74 74
75 if (flags & SLAB_CTOR_CONSTRUCTOR) 75 inode_init_once(&ei->vfs_inode);
76 inode_init_once(&ei->vfs_inode);
77} 76}
78 77
79static int init_inodecache(void) 78static int init_inodecache(void)
diff --git a/fs/exec.c b/fs/exec.c
index 70fa36554c1..0b685888ff6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -60,7 +60,7 @@
60#endif 60#endif
61 61
62int core_uses_pid; 62int core_uses_pid;
63char core_pattern[128] = "core"; 63char core_pattern[CORENAME_MAX_SIZE] = "core";
64int suid_dumpable = 0; 64int suid_dumpable = 0;
65 65
66EXPORT_SYMBOL(suid_dumpable); 66EXPORT_SYMBOL(suid_dumpable);
@@ -1264,8 +1264,6 @@ int set_binfmt(struct linux_binfmt *new)
1264 1264
1265EXPORT_SYMBOL(set_binfmt); 1265EXPORT_SYMBOL(set_binfmt);
1266 1266
1267#define CORENAME_MAX_SIZE 64
1268
1269/* format_corename will inspect the pattern parameter, and output a 1267/* format_corename will inspect the pattern parameter, and output a
1270 * name into corename, which must have space for at least 1268 * name into corename, which must have space for at least
1271 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 1269 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 685a1c28717..16337bff027 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -160,13 +160,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
160{ 160{
161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
162 162
163 if (flags & SLAB_CTOR_CONSTRUCTOR) { 163 rwlock_init(&ei->i_meta_lock);
164 rwlock_init(&ei->i_meta_lock);
165#ifdef CONFIG_EXT2_FS_XATTR 164#ifdef CONFIG_EXT2_FS_XATTR
166 init_rwsem(&ei->xattr_sem); 165 init_rwsem(&ei->xattr_sem);
167#endif 166#endif
168 inode_init_once(&ei->vfs_inode); 167 inode_init_once(&ei->vfs_inode);
169 }
170} 168}
171 169
172static int init_inodecache(void) 170static int init_inodecache(void)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 54d3c904125..6e3062913a9 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -466,14 +466,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
466{ 466{
467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
468 468
469 if (flags & SLAB_CTOR_CONSTRUCTOR) { 469 INIT_LIST_HEAD(&ei->i_orphan);
470 INIT_LIST_HEAD(&ei->i_orphan);
471#ifdef CONFIG_EXT3_FS_XATTR 470#ifdef CONFIG_EXT3_FS_XATTR
472 init_rwsem(&ei->xattr_sem); 471 init_rwsem(&ei->xattr_sem);
473#endif 472#endif
474 mutex_init(&ei->truncate_mutex); 473 mutex_init(&ei->truncate_mutex);
475 inode_init_once(&ei->vfs_inode); 474 inode_init_once(&ei->vfs_inode);
476 }
477} 475}
478 476
479static int init_inodecache(void) 477static int init_inodecache(void)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 71912693235..cb9afdd0e26 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -517,14 +517,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
517{ 517{
518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
519 519
520 if (flags & SLAB_CTOR_CONSTRUCTOR) { 520 INIT_LIST_HEAD(&ei->i_orphan);
521 INIT_LIST_HEAD(&ei->i_orphan);
522#ifdef CONFIG_EXT4DEV_FS_XATTR 521#ifdef CONFIG_EXT4DEV_FS_XATTR
523 init_rwsem(&ei->xattr_sem); 522 init_rwsem(&ei->xattr_sem);
524#endif 523#endif
525 mutex_init(&ei->truncate_mutex); 524 mutex_init(&ei->truncate_mutex);
526 inode_init_once(&ei->vfs_inode); 525 inode_init_once(&ei->vfs_inode);
527 }
528} 526}
529 527
530static int init_inodecache(void) 528static int init_inodecache(void)
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 1959143c1d2..3c9c8a15ec7 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
40{ 40{
41 struct fat_cache *cache = (struct fat_cache *)foo; 41 struct fat_cache *cache = (struct fat_cache *)foo;
42 42
43 if (flags & SLAB_CTOR_CONSTRUCTOR) 43 INIT_LIST_HEAD(&cache->cache_list);
44 INIT_LIST_HEAD(&cache->cache_list);
45} 44}
46 45
47int __init fat_cache_init(void) 46int __init fat_cache_init(void)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 2c55e8dce79..479722d8966 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -500,14 +500,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
500{ 500{
501 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 501 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
502 502
503 if (flags & SLAB_CTOR_CONSTRUCTOR) { 503 spin_lock_init(&ei->cache_lru_lock);
504 spin_lock_init(&ei->cache_lru_lock); 504 ei->nr_caches = 0;
505 ei->nr_caches = 0; 505 ei->cache_valid_id = FAT_CACHE_VALID + 1;
506 ei->cache_valid_id = FAT_CACHE_VALID + 1; 506 INIT_LIST_HEAD(&ei->cache_lru);
507 INIT_LIST_HEAD(&ei->cache_lru); 507 INIT_HLIST_NODE(&ei->i_fat_hash);
508 INIT_HLIST_NODE(&ei->i_fat_hash); 508 inode_init_once(&ei->vfs_inode);
509 inode_init_once(&ei->vfs_inode);
510 }
511} 509}
512 510
513static int __init fat_init_inodecache(void) 511static int __init fat_init_inodecache(void)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1397018ff47..c3a2ad0da43 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -687,8 +687,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
687{ 687{
688 struct inode * inode = foo; 688 struct inode * inode = foo;
689 689
690 if (flags & SLAB_CTOR_CONSTRUCTOR) 690 inode_init_once(inode);
691 inode_init_once(inode);
692} 691}
693 692
694static int __init fuse_fs_init(void) 693static int __init fuse_fs_init(void)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e460487c055..787a0edef10 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -27,29 +27,27 @@
27static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 27static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
28{ 28{
29 struct gfs2_inode *ip = foo; 29 struct gfs2_inode *ip = foo;
30 if (flags & SLAB_CTOR_CONSTRUCTOR) { 30
31 inode_init_once(&ip->i_inode); 31 inode_init_once(&ip->i_inode);
32 spin_lock_init(&ip->i_spin); 32 spin_lock_init(&ip->i_spin);
33 init_rwsem(&ip->i_rw_mutex); 33 init_rwsem(&ip->i_rw_mutex);
34 memset(ip->i_cache, 0, sizeof(ip->i_cache)); 34 memset(ip->i_cache, 0, sizeof(ip->i_cache));
35 }
36} 35}
37 36
38static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 37static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
39{ 38{
40 struct gfs2_glock *gl = foo; 39 struct gfs2_glock *gl = foo;
41 if (flags & SLAB_CTOR_CONSTRUCTOR) { 40
42 INIT_HLIST_NODE(&gl->gl_list); 41 INIT_HLIST_NODE(&gl->gl_list);
43 spin_lock_init(&gl->gl_spin); 42 spin_lock_init(&gl->gl_spin);
44 INIT_LIST_HEAD(&gl->gl_holders); 43 INIT_LIST_HEAD(&gl->gl_holders);
45 INIT_LIST_HEAD(&gl->gl_waiters1); 44 INIT_LIST_HEAD(&gl->gl_waiters1);
46 INIT_LIST_HEAD(&gl->gl_waiters3); 45 INIT_LIST_HEAD(&gl->gl_waiters3);
47 gl->gl_lvb = NULL; 46 gl->gl_lvb = NULL;
48 atomic_set(&gl->gl_lvb_count, 0); 47 atomic_set(&gl->gl_lvb_count, 0);
49 INIT_LIST_HEAD(&gl->gl_reclaim); 48 INIT_LIST_HEAD(&gl->gl_reclaim);
50 INIT_LIST_HEAD(&gl->gl_ail_list); 49 INIT_LIST_HEAD(&gl->gl_ail_list);
51 atomic_set(&gl->gl_ail_count, 0); 50 atomic_set(&gl->gl_ail_count, 0);
52 }
53} 51}
54 52
55/** 53/**
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 4f1888f16cf..92cf8751e42 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -434,8 +434,7 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag
434{ 434{
435 struct hfs_inode_info *i = p; 435 struct hfs_inode_info *i = p;
436 436
437 if (flags & SLAB_CTOR_CONSTRUCTOR) 437 inode_init_once(&i->vfs_inode);
438 inode_init_once(&i->vfs_inode);
439} 438}
440 439
441static int __init init_hfs_fs(void) 440static int __init init_hfs_fs(void)
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 37afbec8a76..ebd1b380cbb 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -470,8 +470,7 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long
470{ 470{
471 struct hfsplus_inode_info *i = p; 471 struct hfsplus_inode_info *i = p;
472 472
473 if (flags & SLAB_CTOR_CONSTRUCTOR) 473 inode_init_once(&i->vfs_inode);
474 inode_init_once(&i->vfs_inode);
475} 474}
476 475
477static int __init init_hfsplus_fs(void) 476static int __init init_hfsplus_fs(void)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 1b95f39fbc3..fca1165d719 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -176,11 +176,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
176{ 176{
177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
178 178
179 if (flags & SLAB_CTOR_CONSTRUCTOR) { 179 mutex_init(&ei->i_mutex);
180 mutex_init(&ei->i_mutex); 180 mutex_init(&ei->i_parent_mutex);
181 mutex_init(&ei->i_parent_mutex); 181 inode_init_once(&ei->vfs_inode);
182 inode_init_once(&ei->vfs_inode);
183 }
184} 182}
185 183
186static int init_inodecache(void) 184static int init_inodecache(void)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 98959b87cdf..aa083dd34e9 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -556,8 +556,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
556{ 556{
557 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 557 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
558 558
559 if (flags & SLAB_CTOR_CONSTRUCTOR) 559 inode_init_once(&ei->vfs_inode);
560 inode_init_once(&ei->vfs_inode);
561} 560}
562 561
563const struct file_operations hugetlbfs_file_operations = { 562const struct file_operations hugetlbfs_file_operations = {
diff --git a/fs/inode.c b/fs/inode.c
index df2ef15d03d..9a012cc5b6c 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -213,8 +213,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
213{ 213{
214 struct inode * inode = (struct inode *) foo; 214 struct inode * inode = (struct inode *) foo;
215 215
216 if (flags & SLAB_CTOR_CONSTRUCTOR) 216 inode_init_once(inode);
217 inode_init_once(inode);
218} 217}
219 218
220/* 219/*
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index e99f7ff4ecb..5c3eecf7542 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -77,8 +77,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
77{ 77{
78 struct iso_inode_info *ei = foo; 78 struct iso_inode_info *ei = foo;
79 79
80 if (flags & SLAB_CTOR_CONSTRUCTOR) 80 inode_init_once(&ei->vfs_inode);
81 inode_init_once(&ei->vfs_inode);
82} 81}
83 82
84static int init_inodecache(void) 83static int init_inodecache(void)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 45368f8bbe7..6488af43bc9 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,10 +47,8 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l
47{ 47{
48 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 48 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
49 49
50 if (flags & SLAB_CTOR_CONSTRUCTOR) { 50 init_MUTEX(&ei->sem);
51 init_MUTEX(&ei->sem); 51 inode_init_once(&ei->vfs_inode);
52 inode_init_once(&ei->vfs_inode);
53 }
54} 52}
55 53
56static int jffs2_sync_fs(struct super_block *sb, int wait) 54static int jffs2_sync_fs(struct super_block *sb, int wait)
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 6b3acb0b578..43d4f69afbe 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -184,16 +184,14 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
184{ 184{
185 struct metapage *mp = (struct metapage *)foo; 185 struct metapage *mp = (struct metapage *)foo;
186 186
187 if (flags & SLAB_CTOR_CONSTRUCTOR) { 187 mp->lid = 0;
188 mp->lid = 0; 188 mp->lsn = 0;
189 mp->lsn = 0; 189 mp->flag = 0;
190 mp->flag = 0; 190 mp->data = NULL;
191 mp->data = NULL; 191 mp->clsn = 0;
192 mp->clsn = 0; 192 mp->log = NULL;
193 mp->log = NULL; 193 set_bit(META_free, &mp->flag);
194 set_bit(META_free, &mp->flag); 194 init_waitqueue_head(&mp->wait);
195 init_waitqueue_head(&mp->wait);
196 }
197} 195}
198 196
199static inline struct metapage *alloc_metapage(gfp_t gfp_mask) 197static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index ea9dc3e65dc..20e4ac1c79a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -752,20 +752,18 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
752{ 752{
753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
754 754
755 if (flags & SLAB_CTOR_CONSTRUCTOR) { 755 memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
756 memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 756 INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
757 INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 757 init_rwsem(&jfs_ip->rdwrlock);
758 init_rwsem(&jfs_ip->rdwrlock); 758 mutex_init(&jfs_ip->commit_mutex);
759 mutex_init(&jfs_ip->commit_mutex); 759 init_rwsem(&jfs_ip->xattr_sem);
760 init_rwsem(&jfs_ip->xattr_sem); 760 spin_lock_init(&jfs_ip->ag_lock);
761 spin_lock_init(&jfs_ip->ag_lock); 761 jfs_ip->active_ag = -1;
762 jfs_ip->active_ag = -1;
763#ifdef CONFIG_JFS_POSIX_ACL 762#ifdef CONFIG_JFS_POSIX_ACL
764 jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 763 jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
765 jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 764 jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
766#endif 765#endif
767 inode_init_once(&jfs_ip->vfs_inode); 766 inode_init_once(&jfs_ip->vfs_inode);
768 }
769} 767}
770 768
771static int __init init_jfs_fs(void) 769static int __init init_jfs_fs(void)
diff --git a/fs/locks.c b/fs/locks.c
index 8ec16ab5ef7..431a8b871fc 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -203,9 +203,6 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
203{ 203{
204 struct file_lock *lock = (struct file_lock *) foo; 204 struct file_lock *lock = (struct file_lock *) foo;
205 205
206 if (!(flags & SLAB_CTOR_CONSTRUCTOR))
207 return;
208
209 locks_init_lock(lock); 206 locks_init_lock(lock);
210} 207}
211 208
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 2f4d43a2a31..be4044614ac 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -73,8 +73,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
73{ 73{
74 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 74 struct minix_inode_info *ei = (struct minix_inode_info *) foo;
75 75
76 if (flags & SLAB_CTOR_CONSTRUCTOR) 76 inode_init_once(&ei->vfs_inode);
77 inode_init_once(&ei->vfs_inode);
78} 77}
79 78
80static int init_inodecache(void) 79static int init_inodecache(void)
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index c29f00ad495..cf06eb9f050 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -60,10 +60,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
60{ 60{
61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
62 62
63 if (flags & SLAB_CTOR_CONSTRUCTOR) { 63 mutex_init(&ei->open_mutex);
64 mutex_init(&ei->open_mutex); 64 inode_init_once(&ei->vfs_inode);
65 inode_init_once(&ei->vfs_inode);
66 }
67} 65}
68 66
69static int init_inodecache(void) 67static int init_inodecache(void)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 2a3fd957320..2b26ad7c977 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1164,21 +1164,19 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
1164{ 1164{
1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo;
1166 1166
1167 if (flags & SLAB_CTOR_CONSTRUCTOR) { 1167 inode_init_once(&nfsi->vfs_inode);
1168 inode_init_once(&nfsi->vfs_inode); 1168 spin_lock_init(&nfsi->req_lock);
1169 spin_lock_init(&nfsi->req_lock); 1169 INIT_LIST_HEAD(&nfsi->dirty);
1170 INIT_LIST_HEAD(&nfsi->dirty); 1170 INIT_LIST_HEAD(&nfsi->commit);
1171 INIT_LIST_HEAD(&nfsi->commit); 1171 INIT_LIST_HEAD(&nfsi->open_files);
1172 INIT_LIST_HEAD(&nfsi->open_files); 1172 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
1173 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1173 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
1174 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1174 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
1175 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1175 atomic_set(&nfsi->data_updates, 0);
1176 atomic_set(&nfsi->data_updates, 0); 1176 nfsi->ndirty = 0;
1177 nfsi->ndirty = 0; 1177 nfsi->ncommit = 0;
1178 nfsi->ncommit = 0; 1178 nfsi->npages = 0;
1179 nfsi->npages = 0; 1179 nfs4_init_once(nfsi);
1180 nfs4_init_once(nfsi);
1181 }
1182} 1180}
1183 1181
1184static int __init nfs_init_inodecache(void) 1182static int __init nfs_init_inodecache(void)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 21d834e5ed7..4566b918255 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -3085,8 +3085,7 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
3085{ 3085{
3086 ntfs_inode *ni = (ntfs_inode *)foo; 3086 ntfs_inode *ni = (ntfs_inode *)foo;
3087 3087
3088 if (flags & SLAB_CTOR_CONSTRUCTOR) 3088 inode_init_once(VFS_I(ni));
3089 inode_init_once(VFS_I(ni));
3090} 3089}
3091 3090
3092/* 3091/*
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 5671cf9d638..fd8cb1badc9 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -262,12 +262,10 @@ static void dlmfs_init_once(void *foo,
262 struct dlmfs_inode_private *ip = 262 struct dlmfs_inode_private *ip =
263 (struct dlmfs_inode_private *) foo; 263 (struct dlmfs_inode_private *) foo;
264 264
265 if (flags & SLAB_CTOR_CONSTRUCTOR) { 265 ip->ip_dlm = NULL;
266 ip->ip_dlm = NULL; 266 ip->ip_parent = NULL;
267 ip->ip_parent = NULL;
268 267
269 inode_init_once(&ip->ip_vfs_inode); 268 inode_init_once(&ip->ip_vfs_inode);
270 }
271} 269}
272 270
273static struct inode *dlmfs_alloc_inode(struct super_block *sb) 271static struct inode *dlmfs_alloc_inode(struct super_block *sb)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7c5e3f5d663..86b559c7dce 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -937,31 +937,29 @@ static void ocfs2_inode_init_once(void *data,
937{ 937{
938 struct ocfs2_inode_info *oi = data; 938 struct ocfs2_inode_info *oi = data;
939 939
940 if (flags & SLAB_CTOR_CONSTRUCTOR) { 940 oi->ip_flags = 0;
941 oi->ip_flags = 0; 941 oi->ip_open_count = 0;
942 oi->ip_open_count = 0; 942 spin_lock_init(&oi->ip_lock);
943 spin_lock_init(&oi->ip_lock); 943 ocfs2_extent_map_init(&oi->vfs_inode);
944 ocfs2_extent_map_init(&oi->vfs_inode); 944 INIT_LIST_HEAD(&oi->ip_io_markers);
945 INIT_LIST_HEAD(&oi->ip_io_markers); 945 oi->ip_created_trans = 0;
946 oi->ip_created_trans = 0; 946 oi->ip_last_trans = 0;
947 oi->ip_last_trans = 0; 947 oi->ip_dir_start_lookup = 0;
948 oi->ip_dir_start_lookup = 0;
949 948
950 init_rwsem(&oi->ip_alloc_sem); 949 init_rwsem(&oi->ip_alloc_sem);
951 mutex_init(&oi->ip_io_mutex); 950 mutex_init(&oi->ip_io_mutex);
952 951
953 oi->ip_blkno = 0ULL; 952 oi->ip_blkno = 0ULL;
954 oi->ip_clusters = 0; 953 oi->ip_clusters = 0;
955 954
956 ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 955 ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
957 ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 956 ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
958 ocfs2_lock_res_init_once(&oi->ip_data_lockres); 957 ocfs2_lock_res_init_once(&oi->ip_data_lockres);
959 ocfs2_lock_res_init_once(&oi->ip_open_lockres); 958 ocfs2_lock_res_init_once(&oi->ip_open_lockres);
960 959
961 ocfs2_metadata_cache_init(&oi->vfs_inode); 960 ocfs2_metadata_cache_init(&oi->vfs_inode);
962 961
963 inode_init_once(&oi->vfs_inode); 962 inode_init_once(&oi->vfs_inode);
964 }
965} 963}
966 964
967static int ocfs2_initialize_mem_caches(void) 965static int ocfs2_initialize_mem_caches(void)
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 731a90e9f0c..e62397341c3 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -419,8 +419,7 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned
419{ 419{
420 struct op_inode_info *oi = (struct op_inode_info *) data; 420 struct op_inode_info *oi = (struct op_inode_info *) data;
421 421
422 if (flags & SLAB_CTOR_CONSTRUCTOR) 422 inode_init_once(&oi->vfs_inode);
423 inode_init_once(&oi->vfs_inode);
424} 423}
425 424
426static int __init init_openprom_fs(void) 425static int __init init_openprom_fs(void)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index b8171907c83..d5ce65c68d7 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
109{ 109{
110 struct proc_inode *ei = (struct proc_inode *) foo; 110 struct proc_inode *ei = (struct proc_inode *) foo;
111 111
112 if (flags & SLAB_CTOR_CONSTRUCTOR) 112 inode_init_once(&ei->vfs_inode);
113 inode_init_once(&ei->vfs_inode);
114} 113}
115 114
116int __init proc_init_inodecache(void) 115int __init proc_init_inodecache(void)
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 75fc8498f2e..8d256eb1181 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -536,8 +536,7 @@ static void init_once(void *foo, struct kmem_cache * cachep,
536{ 536{
537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
538 538
539 if (flags & SLAB_CTOR_CONSTRUCTOR) 539 inode_init_once(&ei->vfs_inode);
540 inode_init_once(&ei->vfs_inode);
541} 540}
542 541
543static int init_inodecache(void) 542static int init_inodecache(void)
diff --git a/fs/quota.c b/fs/quota.c
index e9d88fd0eca..9f237d6182c 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -157,7 +157,6 @@ static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t
157static void quota_sync_sb(struct super_block *sb, int type) 157static void quota_sync_sb(struct super_block *sb, int type)
158{ 158{
159 int cnt; 159 int cnt;
160 struct inode *discard[MAXQUOTAS];
161 160
162 sb->s_qcop->quota_sync(sb, type); 161 sb->s_qcop->quota_sync(sb, type);
163 /* This is not very clever (and fast) but currently I don't know about 162 /* This is not very clever (and fast) but currently I don't know about
@@ -167,29 +166,21 @@ static void quota_sync_sb(struct super_block *sb, int type)
167 sb->s_op->sync_fs(sb, 1); 166 sb->s_op->sync_fs(sb, 1);
168 sync_blockdev(sb->s_bdev); 167 sync_blockdev(sb->s_bdev);
169 168
170 /* Now when everything is written we can discard the pagecache so 169 /*
171 * that userspace sees the changes. We need i_mutex and so we could 170 * Now when everything is written we can discard the pagecache so
172 * not do it inside dqonoff_mutex. Moreover we need to be carefull 171 * that userspace sees the changes.
173 * about races with quotaoff() (that is the reason why we have own 172 */
174 * reference to inode). */
175 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 173 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
176 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 174 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
177 discard[cnt] = NULL;
178 if (type != -1 && cnt != type) 175 if (type != -1 && cnt != type)
179 continue; 176 continue;
180 if (!sb_has_quota_enabled(sb, cnt)) 177 if (!sb_has_quota_enabled(sb, cnt))
181 continue; 178 continue;
182 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); 179 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
180 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
181 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
183 } 182 }
184 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 183 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
185 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
186 if (discard[cnt]) {
187 mutex_lock(&discard[cnt]->i_mutex);
188 truncate_inode_pages(&discard[cnt]->i_data, 0);
189 mutex_unlock(&discard[cnt]->i_mutex);
190 iput(discard[cnt]);
191 }
192 }
193} 184}
194 185
195void sync_dquots(struct super_block *sb, int type) 186void sync_dquots(struct super_block *sb, int type)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index c7762140c42..b4ac9119200 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -511,14 +511,12 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
511{ 511{
512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
513 513
514 if (flags & SLAB_CTOR_CONSTRUCTOR) { 514 INIT_LIST_HEAD(&ei->i_prealloc_list);
515 INIT_LIST_HEAD(&ei->i_prealloc_list); 515 inode_init_once(&ei->vfs_inode);
516 inode_init_once(&ei->vfs_inode);
517#ifdef CONFIG_REISERFS_FS_POSIX_ACL 516#ifdef CONFIG_REISERFS_FS_POSIX_ACL
518 ei->i_acl_access = NULL; 517 ei->i_acl_access = NULL;
519 ei->i_acl_default = NULL; 518 ei->i_acl_default = NULL;
520#endif 519#endif
521 }
522} 520}
523 521
524static int init_inodecache(void) 522static int init_inodecache(void)
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 80428519027..2284e03342c 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -566,12 +566,11 @@ static void romfs_destroy_inode(struct inode *inode)
566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
567} 567}
568 568
569static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 569static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
570{ 570{
571 struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; 571 struct romfs_inode_info *ei = foo;
572 572
573 if (flags & SLAB_CTOR_CONSTRUCTOR) 573 inode_init_once(&ei->vfs_inode);
574 inode_init_once(&ei->vfs_inode);
575} 574}
576 575
577static int init_inodecache(void) 576static int init_inodecache(void)
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 424a3ddf86d..5c9243a23b9 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -70,8 +70,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
70{ 70{
71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo;
72 72
73 if (flags & SLAB_CTOR_CONSTRUCTOR) 73 inode_init_once(&ei->vfs_inode);
74 inode_init_once(&ei->vfs_inode);
75} 74}
76 75
77static int init_inodecache(void) 76static int init_inodecache(void)
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3152d741560..56441169339 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -322,8 +322,7 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
322{ 322{
323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p;
324 324
325 if (flags & SLAB_CTOR_CONSTRUCTOR) 325 inode_init_once(&si->vfs_inode);
326 inode_init_once(&si->vfs_inode);
327} 326}
328 327
329const struct super_operations sysv_sops = { 328const struct super_operations sysv_sops = {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9b8644a06e5..3a743d854c1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -134,10 +134,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
134{ 134{
135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo;
136 136
137 if (flags & SLAB_CTOR_CONSTRUCTOR) { 137 ei->i_ext.i_data = NULL;
138 ei->i_ext.i_data = NULL; 138 inode_init_once(&ei->vfs_inode);
139 inode_init_once(&ei->vfs_inode);
140 }
141} 139}
142 140
143static int init_inodecache(void) 141static int init_inodecache(void)
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index be7c48c5f20..22ff6ed55ce 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1237,8 +1237,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
1237{ 1237{
1238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
1239 1239
1240 if (flags & SLAB_CTOR_CONSTRUCTOR) 1240 inode_init_once(&ei->vfs_inode);
1241 inode_init_once(&ei->vfs_inode);
1242} 1241}
1243 1242
1244static int init_inodecache(void) 1243static int init_inodecache(void)
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 14e2cbe5a8d..bf9a9d5909b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -360,8 +360,7 @@ xfs_fs_inode_init_once(
360 kmem_zone_t *zonep, 360 kmem_zone_t *zonep,
361 unsigned long flags) 361 unsigned long flags)
362{ 362{
363 if (flags & SLAB_CTOR_CONSTRUCTOR) 363 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
364 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
365} 364}
366 365
367STATIC int 366STATIC int
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index f9d2bde9a7b..b62cd36ff32 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -11,11 +11,8 @@
11#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ 11#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
12#endif 12#endif
13 13
14extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]; 14extern int pxm_to_node(int);
15extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]; 15extern int node_to_pxm(int);
16
17extern int __cpuinit pxm_to_node(int);
18extern int __cpuinit node_to_pxm(int);
19extern int __cpuinit acpi_map_pxm_to_node(int); 16extern int __cpuinit acpi_map_pxm_to_node(int);
20extern void __cpuinit acpi_unmap_pxm_to_node(int); 17extern void __cpuinit acpi_unmap_pxm_to_node(int);
21 18
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 2d956cd566a..e1a708337be 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -17,6 +17,8 @@ struct pt_regs;
17 17
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20#define CORENAME_MAX_SIZE 128
21
20/* 22/*
21 * This structure is used to hold the arguments that are used when loading binaries. 23 * This structure is used to hold the arguments that are used when loading binaries.
22 */ 24 */
diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h
index bda23e00ed7..e576b848ce1 100644
--- a/include/linux/kmalloc_sizes.h
+++ b/include/linux/kmalloc_sizes.h
@@ -19,17 +19,27 @@
19 CACHE(32768) 19 CACHE(32768)
20 CACHE(65536) 20 CACHE(65536)
21 CACHE(131072) 21 CACHE(131072)
22#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) 22#if KMALLOC_MAX_SIZE >= 262144
23 CACHE(262144) 23 CACHE(262144)
24#endif 24#endif
25#ifndef CONFIG_MMU 25#if KMALLOC_MAX_SIZE >= 524288
26 CACHE(524288) 26 CACHE(524288)
27#endif
28#if KMALLOC_MAX_SIZE >= 1048576
27 CACHE(1048576) 29 CACHE(1048576)
28#ifdef CONFIG_LARGE_ALLOCS 30#endif
31#if KMALLOC_MAX_SIZE >= 2097152
29 CACHE(2097152) 32 CACHE(2097152)
33#endif
34#if KMALLOC_MAX_SIZE >= 4194304
30 CACHE(4194304) 35 CACHE(4194304)
36#endif
37#if KMALLOC_MAX_SIZE >= 8388608
31 CACHE(8388608) 38 CACHE(8388608)
39#endif
40#if KMALLOC_MAX_SIZE >= 16777216
32 CACHE(16777216) 41 CACHE(16777216)
42#endif
43#if KMALLOC_MAX_SIZE >= 33554432
33 CACHE(33554432) 44 CACHE(33554432)
34#endif /* CONFIG_LARGE_ALLOCS */ 45#endif
35#endif /* CONFIG_MMU */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3b1fbf49fa7..62b3e008e64 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -471,6 +471,7 @@
471#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 471#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
472#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A 472#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
473#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 473#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
474#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
474#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 475#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
475 476
476#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ 477#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bdd277223af..97347f22fc2 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
74void page_add_file_rmap(struct page *); 74void page_add_file_rmap(struct page *);
75void page_remove_rmap(struct page *, struct vm_area_struct *); 75void page_remove_rmap(struct page *, struct vm_area_struct *);
76 76
77/** 77#ifdef CONFIG_DEBUG_VM
78 * page_dup_rmap - duplicate pte mapping to a page 78void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
79 * @page: the page to add the mapping to 79#else
80 * 80static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
81 * For copy_page_range only: minimal extract from page_add_rmap,
82 * avoiding unnecessary tests (already checked) so it's quicker.
83 */
84static inline void page_dup_rmap(struct page *page)
85{ 81{
86 atomic_inc(&page->_mapcount); 82 atomic_inc(&page->_mapcount);
87} 83}
84#endif
88 85
89/* 86/*
90 * Called from mm/vmscan.c to handle paging out 87 * Called from mm/vmscan.c to handle paging out
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 71829efc40b..a015236cc57 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,9 +32,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
32#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 32#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
33#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 33#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
34 34
35/* Flags passed to a constructor functions */
36#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
37
38/* 35/*
39 * struct kmem_cache related prototypes 36 * struct kmem_cache related prototypes
40 */ 37 */
@@ -77,6 +74,21 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
77#endif 74#endif
78 75
79/* 76/*
77 * The largest kmalloc size supported by the slab allocators is
78 * 32 megabyte (2^25) or the maximum allocatable page order if that is
79 * less than 32 MB.
80 *
81 * WARNING: Its not easy to increase this value since the allocators have
82 * to do various tricks to work around compiler limitations in order to
83 * ensure proper constant folding.
84 */
85#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \
86 (MAX_ORDER + PAGE_SHIFT) : 25)
87
88#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
89#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
90
91/*
80 * Common kmalloc functions provided by all allocators 92 * Common kmalloc functions provided by all allocators
81 */ 93 */
82void *__kmalloc(size_t, gfp_t); 94void *__kmalloc(size_t, gfp_t);
@@ -233,9 +245,6 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
233 245
234#endif /* DEBUG_SLAB */ 246#endif /* DEBUG_SLAB */
235 247
236extern const struct seq_operations slabinfo_op;
237ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
238
239#endif /* __KERNEL__ */ 248#endif /* __KERNEL__ */
240#endif /* _LINUX_SLAB_H */ 249#endif /* _LINUX_SLAB_H */
241 250
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 5e4364644ed..8d81a60518e 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -109,4 +109,7 @@ found:
109 109
110#endif /* CONFIG_NUMA */ 110#endif /* CONFIG_NUMA */
111 111
112extern const struct seq_operations slabinfo_op;
113ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
114
112#endif /* _LINUX_SLAB_DEF_H */ 115#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c6c1f4a120e..0764c829d96 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -40,7 +40,6 @@ struct kmem_cache {
40 int objects; /* Number of objects in slab */ 40 int objects; /* Number of objects in slab */
41 int refcount; /* Refcount for slab cache destroy */ 41 int refcount; /* Refcount for slab cache destroy */
42 void (*ctor)(void *, struct kmem_cache *, unsigned long); 42 void (*ctor)(void *, struct kmem_cache *, unsigned long);
43 void (*dtor)(void *, struct kmem_cache *, unsigned long);
44 int inuse; /* Offset to metadata */ 43 int inuse; /* Offset to metadata */
45 int align; /* Alignment */ 44 int align; /* Alignment */
46 const char *name; /* Name (only for display!) */ 45 const char *name; /* Name (only for display!) */
@@ -59,17 +58,6 @@ struct kmem_cache {
59 */ 58 */
60#define KMALLOC_SHIFT_LOW 3 59#define KMALLOC_SHIFT_LOW 3
61 60
62#ifdef CONFIG_LARGE_ALLOCS
63#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
64 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
65#else
66#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
67#define KMALLOC_SHIFT_HIGH 20
68#else
69#define KMALLOC_SHIFT_HIGH 18
70#endif
71#endif
72
73/* 61/*
74 * We keep the general caches in an array of slab caches that are used for 62 * We keep the general caches in an array of slab caches that are used for
75 * 2^x bytes of allocations. 63 * 2^x bytes of allocations.
@@ -80,7 +68,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
80 * Sorry that the following has to be that ugly but some versions of GCC 68 * Sorry that the following has to be that ugly but some versions of GCC
81 * have trouble with constant propagation and loops. 69 * have trouble with constant propagation and loops.
82 */ 70 */
83static inline int kmalloc_index(int size) 71static inline int kmalloc_index(size_t size)
84{ 72{
85 /* 73 /*
86 * We should return 0 if size == 0 but we use the smallest object 74 * We should return 0 if size == 0 but we use the smallest object
@@ -88,7 +76,7 @@ static inline int kmalloc_index(int size)
88 */ 76 */
89 WARN_ON_ONCE(size == 0); 77 WARN_ON_ONCE(size == 0);
90 78
91 if (size > (1 << KMALLOC_SHIFT_HIGH)) 79 if (size > KMALLOC_MAX_SIZE)
92 return -1; 80 return -1;
93 81
94 if (size > 64 && size <= 96) 82 if (size > 64 && size <= 96)
@@ -111,17 +99,13 @@ static inline int kmalloc_index(int size)
111 if (size <= 64 * 1024) return 16; 99 if (size <= 64 * 1024) return 16;
112 if (size <= 128 * 1024) return 17; 100 if (size <= 128 * 1024) return 17;
113 if (size <= 256 * 1024) return 18; 101 if (size <= 256 * 1024) return 18;
114#if KMALLOC_SHIFT_HIGH > 18
115 if (size <= 512 * 1024) return 19; 102 if (size <= 512 * 1024) return 19;
116 if (size <= 1024 * 1024) return 20; 103 if (size <= 1024 * 1024) return 20;
117#endif
118#if KMALLOC_SHIFT_HIGH > 20
119 if (size <= 2 * 1024 * 1024) return 21; 104 if (size <= 2 * 1024 * 1024) return 21;
120 if (size <= 4 * 1024 * 1024) return 22; 105 if (size <= 4 * 1024 * 1024) return 22;
121 if (size <= 8 * 1024 * 1024) return 23; 106 if (size <= 8 * 1024 * 1024) return 23;
122 if (size <= 16 * 1024 * 1024) return 24; 107 if (size <= 16 * 1024 * 1024) return 24;
123 if (size <= 32 * 1024 * 1024) return 25; 108 if (size <= 32 * 1024 * 1024) return 25;
124#endif
125 return -1; 109 return -1;
126 110
127/* 111/*
@@ -146,7 +130,12 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
146 if (index == 0) 130 if (index == 0)
147 return NULL; 131 return NULL;
148 132
149 if (index < 0) { 133 /*
134 * This function only gets expanded if __builtin_constant_p(size), so
135 * testing it here shouldn't be needed. But some versions of gcc need
136 * help.
137 */
138 if (__builtin_constant_p(size) && index < 0) {
150 /* 139 /*
151 * Generate a link failure. Would be great if we could 140 * Generate a link failure. Would be great if we could
152 * do something to stop the compile here. 141 * do something to stop the compile here.
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3f70149eabb..96ac21f8dd7 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -6,6 +6,7 @@
6 * Alan Cox. <alan@redhat.com> 6 * Alan Cox. <alan@redhat.com>
7 */ 7 */
8 8
9#include <linux/errno.h>
9 10
10extern void cpu_idle(void); 11extern void cpu_idle(void);
11 12
@@ -99,11 +100,9 @@ static inline void smp_send_reschedule(int cpu) { }
99#define num_booting_cpus() 1 100#define num_booting_cpus() 1
100#define smp_prepare_boot_cpu() do {} while (0) 101#define smp_prepare_boot_cpu() do {} while (0)
101static inline int smp_call_function_single(int cpuid, void (*func) (void *info), 102static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
102 void *info, int retry, int wait) 103 void *info, int retry, int wait)
103{ 104{
104 /* Disable interrupts here? */ 105 return -EBUSY;
105 func(info);
106 return 0;
107} 106}
108 107
109#endif /* !SMP */ 108#endif /* !SMP */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d555f31c074..7eae8665ff5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -122,7 +122,7 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
122 int singlethread, 122 int singlethread,
123 int freezeable); 123 int freezeable);
124#define create_workqueue(name) __create_workqueue((name), 0, 0) 124#define create_workqueue(name) __create_workqueue((name), 0, 0)
125#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1) 125#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
126#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 126#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
127 127
128extern void destroy_workqueue(struct workqueue_struct *wq); 128extern void destroy_workqueue(struct workqueue_struct *wq);
diff --git a/init/Kconfig b/init/Kconfig
index 4e009fde4b6..a9e99f8328f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -567,7 +567,6 @@ config SLAB
567 a slab allocator. 567 a slab allocator.
568 568
569config SLUB 569config SLUB
570 depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
571 bool "SLUB (Unqueued Allocator)" 570 bool "SLUB (Unqueued Allocator)"
572 help 571 help
573 SLUB is a slab allocator that minimizes cache line usage 572 SLUB is a slab allocator that minimizes cache line usage
@@ -577,14 +576,11 @@ config SLUB
577 and has enhanced diagnostics. 576 and has enhanced diagnostics.
578 577
579config SLOB 578config SLOB
580# 579 depends on EMBEDDED && !SPARSEMEM
581# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
582#
583 depends on EMBEDDED && !SMP && !SPARSEMEM
584 bool "SLOB (Simple Allocator)" 580 bool "SLOB (Simple Allocator)"
585 help 581 help
586 SLOB replaces the SLAB allocator with a drastically simpler 582 SLOB replaces the SLAB allocator with a drastically simpler
587 allocator. SLOB is more space efficient that SLAB but does not 583 allocator. SLOB is more space efficient than SLAB but does not
588 scale well (single lock for all operations) and is also highly 584 scale well (single lock for all operations) and is also highly
589 susceptible to fragmentation. SLUB can accomplish a higher object 585 susceptible to fragmentation. SLUB can accomplish a higher object
590 density. It is usually better to use SLUB instead of SLOB. 586 density. It is usually better to use SLUB instead of SLOB.
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index fab5707cb5f..a242c83d89d 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -215,8 +215,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
215{ 215{
216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
217 217
218 if (flags & SLAB_CTOR_CONSTRUCTOR) 218 inode_init_once(&p->vfs_inode);
219 inode_init_once(&p->vfs_inode);
220} 219}
221 220
222static struct inode *mqueue_alloc_inode(struct super_block *sb) 221static struct inode *mqueue_alloc_inode(struct super_block *sb)
diff --git a/kernel/fork.c b/kernel/fork.c
index 49530e40ea8..87069cfc18a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1427,10 +1427,8 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep,
1427{ 1427{
1428 struct sighand_struct *sighand = data; 1428 struct sighand_struct *sighand = data;
1429 1429
1430 if (flags & SLAB_CTOR_CONSTRUCTOR) { 1430 spin_lock_init(&sighand->siglock);
1431 spin_lock_init(&sighand->siglock); 1431 INIT_LIST_HEAD(&sighand->signalfd_list);
1432 INIT_LIST_HEAD(&sighand->signalfd_list);
1433 }
1434} 1432}
1435 1433
1436void __init proc_caches_init(void) 1434void __init proc_caches_init(void)
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b5f0543ed84..f445b9cd60f 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -416,7 +416,8 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
416 416
417 mutex_lock(&pm_mutex); 417 mutex_lock(&pm_mutex);
418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
419 if (!strncmp(buf, hibernation_modes[i], len)) { 419 if (len == strlen(hibernation_modes[i])
420 && !strncmp(buf, hibernation_modes[i], len)) {
420 mode = i; 421 mode = i;
421 break; 422 break;
422 } 423 }
diff --git a/kernel/power/main.c b/kernel/power/main.c
index b98b80ccf43..8812985f302 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -290,13 +290,13 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
290 len = p ? p - buf : n; 290 len = p ? p - buf : n;
291 291
292 /* First, check if we are requested to hibernate */ 292 /* First, check if we are requested to hibernate */
293 if (!strncmp(buf, "disk", len)) { 293 if (len == 4 && !strncmp(buf, "disk", len)) {
294 error = hibernate(); 294 error = hibernate();
295 return error ? error : n; 295 return error ? error : n;
296 } 296 }
297 297
298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
299 if (*s && !strncmp(buf, *s, len)) 299 if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
300 break; 300 break;
301 } 301 }
302 if (state < PM_SUSPEND_MAX && *s) 302 if (state < PM_SUSPEND_MAX && *s)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4073353abd4..30ee462ee79 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -227,7 +227,7 @@ static ctl_table kern_table[] = {
227 .ctl_name = KERN_CORE_PATTERN, 227 .ctl_name = KERN_CORE_PATTERN,
228 .procname = "core_pattern", 228 .procname = "core_pattern",
229 .data = core_pattern, 229 .data = core_pattern,
230 .maxlen = 128, 230 .maxlen = CORENAME_MAX_SIZE,
231 .mode = 0644, 231 .mode = 0644,
232 .proc_handler = &proc_dostring, 232 .proc_handler = &proc_dostring,
233 .strategy = &sysctl_string, 233 .strategy = &sysctl_string,
diff --git a/mm/memory.c b/mm/memory.c
index 1d647ab0ee7..cb94488ab96 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
481 page = vm_normal_page(vma, addr, pte); 481 page = vm_normal_page(vma, addr, pte);
482 if (page) { 482 if (page) {
483 get_page(page); 483 get_page(page);
484 page_dup_rmap(page); 484 page_dup_rmap(page, vma, addr);
485 rss[!!PageAnon(page)]++; 485 rss[!!PageAnon(page)]++;
486 } 486 }
487 487
diff --git a/mm/rmap.c b/mm/rmap.c
index 304f51985c7..850165d32b7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma)
162static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 162static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
163 unsigned long flags) 163 unsigned long flags)
164{ 164{
165 if (flags & SLAB_CTOR_CONSTRUCTOR) { 165 struct anon_vma *anon_vma = data;
166 struct anon_vma *anon_vma = data;
167 166
168 spin_lock_init(&anon_vma->lock); 167 spin_lock_init(&anon_vma->lock);
169 INIT_LIST_HEAD(&anon_vma->head); 168 INIT_LIST_HEAD(&anon_vma->head);
170 }
171} 169}
172 170
173void __init anon_vma_init(void) 171void __init anon_vma_init(void)
@@ -532,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page,
532} 530}
533 531
534/** 532/**
533 * page_set_anon_rmap - sanity check anonymous rmap addition
534 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped
537 */
538static void __page_check_anon_rmap(struct page *page,
539 struct vm_area_struct *vma, unsigned long address)
540{
541#ifdef CONFIG_DEBUG_VM
542 /*
543 * The page's anon-rmap details (mapping and index) are guaranteed to
544 * be set up correctly at this point.
545 *
546 * We have exclusion against page_add_anon_rmap because the caller
547 * always holds the page locked, except if called from page_dup_rmap,
548 * in which case the page is already known to be setup.
549 *
550 * We have exclusion against page_add_new_anon_rmap because those pages
551 * are initially only visible via the pagetables, and the pte is locked
552 * over the call to page_add_new_anon_rmap.
553 */
554 struct anon_vma *anon_vma = vma->anon_vma;
555 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
556 BUG_ON(page->mapping != (struct address_space *)anon_vma);
557 BUG_ON(page->index != linear_page_index(vma, address));
558#endif
559}
560
561/**
535 * page_add_anon_rmap - add pte mapping to an anonymous page 562 * page_add_anon_rmap - add pte mapping to an anonymous page
536 * @page: the page to add the mapping to 563 * @page: the page to add the mapping to
537 * @vma: the vm area in which the mapping is added 564 * @vma: the vm area in which the mapping is added
538 * @address: the user virtual address mapped 565 * @address: the user virtual address mapped
539 * 566 *
540 * The caller needs to hold the pte lock. 567 * The caller needs to hold the pte lock and the page must be locked.
541 */ 568 */
542void page_add_anon_rmap(struct page *page, 569void page_add_anon_rmap(struct page *page,
543 struct vm_area_struct *vma, unsigned long address) 570 struct vm_area_struct *vma, unsigned long address)
544{ 571{
572 VM_BUG_ON(!PageLocked(page));
573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
545 if (atomic_inc_and_test(&page->_mapcount)) 574 if (atomic_inc_and_test(&page->_mapcount))
546 __page_set_anon_rmap(page, vma, address); 575 __page_set_anon_rmap(page, vma, address);
547 /* else checking page index and mapping is racy */ 576 else
577 __page_check_anon_rmap(page, vma, address);
548} 578}
549 579
550/* 580/*
@@ -555,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
555 * 585 *
556 * Same as page_add_anon_rmap but must only be called on *new* pages. 586 * Same as page_add_anon_rmap but must only be called on *new* pages.
557 * This means the inc-and-test can be bypassed. 587 * This means the inc-and-test can be bypassed.
588 * Page does not have to be locked.
558 */ 589 */
559void page_add_new_anon_rmap(struct page *page, 590void page_add_new_anon_rmap(struct page *page,
560 struct vm_area_struct *vma, unsigned long address) 591 struct vm_area_struct *vma, unsigned long address)
561{ 592{
593 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
562 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 594 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
563 __page_set_anon_rmap(page, vma, address); 595 __page_set_anon_rmap(page, vma, address);
564} 596}
@@ -575,6 +607,26 @@ void page_add_file_rmap(struct page *page)
575 __inc_zone_page_state(page, NR_FILE_MAPPED); 607 __inc_zone_page_state(page, NR_FILE_MAPPED);
576} 608}
577 609
610#ifdef CONFIG_DEBUG_VM
611/**
612 * page_dup_rmap - duplicate pte mapping to a page
613 * @page: the page to add the mapping to
614 *
615 * For copy_page_range only: minimal extract from page_add_file_rmap /
616 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617 * quicker.
618 *
619 * The caller needs to hold the pte lock.
620 */
621void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
622{
623 BUG_ON(page_mapcount(page) == 0);
624 if (PageAnon(page))
625 __page_check_anon_rmap(page, vma, address);
626 atomic_inc(&page->_mapcount);
627}
628#endif
629
578/** 630/**
579 * page_remove_rmap - take down pte mapping from a page 631 * page_remove_rmap - take down pte mapping from a page
580 * @page: page to remove mapping from 632 * @page: page to remove mapping from
diff --git a/mm/shmem.c b/mm/shmem.c
index f01e8deed64..e537317bec4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep,
2358{ 2358{
2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2360 2360
2361 if (flags & SLAB_CTOR_CONSTRUCTOR) { 2361 inode_init_once(&p->vfs_inode);
2362 inode_init_once(&p->vfs_inode);
2363#ifdef CONFIG_TMPFS_POSIX_ACL 2362#ifdef CONFIG_TMPFS_POSIX_ACL
2364 p->i_acl = NULL; 2363 p->i_acl = NULL;
2365 p->i_default_acl = NULL; 2364 p->i_default_acl = NULL;
2366#endif 2365#endif
2367 }
2368} 2366}
2369 2367
2370static int init_inodecache(void) 2368static int init_inodecache(void)
diff --git a/mm/slab.c b/mm/slab.c
index 944b20581f8..528243e15cc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -409,9 +409,6 @@ struct kmem_cache {
409 /* constructor func */ 409 /* constructor func */
410 void (*ctor) (void *, struct kmem_cache *, unsigned long); 410 void (*ctor) (void *, struct kmem_cache *, unsigned long);
411 411
412 /* de-constructor func */
413 void (*dtor) (void *, struct kmem_cache *, unsigned long);
414
415/* 5) cache creation/removal */ 412/* 5) cache creation/removal */
416 const char *name; 413 const char *name;
417 struct list_head next; 414 struct list_head next;
@@ -572,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
572#endif 569#endif
573 570
574/* 571/*
575 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
576 * order.
577 */
578#if defined(CONFIG_LARGE_ALLOCS)
579#define MAX_OBJ_ORDER 13 /* up to 32Mb */
580#define MAX_GFP_ORDER 13 /* up to 32Mb */
581#elif defined(CONFIG_MMU)
582#define MAX_OBJ_ORDER 5 /* 32 pages */
583#define MAX_GFP_ORDER 5 /* 32 pages */
584#else
585#define MAX_OBJ_ORDER 8 /* up to 1Mb */
586#define MAX_GFP_ORDER 8 /* up to 1Mb */
587#endif
588
589/*
590 * Do not go above this order unless 0 objects fit into the slab. 572 * Do not go above this order unless 0 objects fit into the slab.
591 */ 573 */
592#define BREAK_GFP_ORDER_HI 1 574#define BREAK_GFP_ORDER_HI 1
@@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
792 */ 774 */
793 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 775 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
794#endif 776#endif
777 WARN_ON_ONCE(size == 0);
795 while (size > csizep->cs_size) 778 while (size > csizep->cs_size)
796 csizep++; 779 csizep++;
797 780
@@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1911 slab_error(cachep, "end of a freed object " 1894 slab_error(cachep, "end of a freed object "
1912 "was overwritten"); 1895 "was overwritten");
1913 } 1896 }
1914 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1915 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1916 } 1897 }
1917} 1898}
1918#else 1899#else
1919static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1900static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1920{ 1901{
1921 if (cachep->dtor) {
1922 int i;
1923 for (i = 0; i < cachep->num; i++) {
1924 void *objp = index_to_obj(cachep, slabp, i);
1925 (cachep->dtor) (objp, cachep, 0);
1926 }
1927 }
1928} 1902}
1929#endif 1903#endif
1930 1904
@@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2013 size_t left_over = 0; 1987 size_t left_over = 0;
2014 int gfporder; 1988 int gfporder;
2015 1989
2016 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1990 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2017 unsigned int num; 1991 unsigned int num;
2018 size_t remainder; 1992 size_t remainder;
2019 1993
@@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
2124 * @align: The required alignment for the objects. 2098 * @align: The required alignment for the objects.
2125 * @flags: SLAB flags 2099 * @flags: SLAB flags
2126 * @ctor: A constructor for the objects. 2100 * @ctor: A constructor for the objects.
2127 * @dtor: A destructor for the objects. 2101 * @dtor: A destructor for the objects (not implemented anymore).
2128 * 2102 *
2129 * Returns a ptr to the cache on success, NULL on failure. 2103 * Returns a ptr to the cache on success, NULL on failure.
2130 * Cannot be called within a int, but can be interrupted. 2104 * Cannot be called within a int, but can be interrupted.
@@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2159 * Sanity checks... these are all serious usage bugs. 2133 * Sanity checks... these are all serious usage bugs.
2160 */ 2134 */
2161 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2135 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2162 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2136 size > KMALLOC_MAX_SIZE || dtor) {
2163 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2137 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2164 name); 2138 name);
2165 BUG(); 2139 BUG();
@@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2213 if (flags & SLAB_DESTROY_BY_RCU) 2187 if (flags & SLAB_DESTROY_BY_RCU)
2214 BUG_ON(flags & SLAB_POISON); 2188 BUG_ON(flags & SLAB_POISON);
2215#endif 2189#endif
2216 if (flags & SLAB_DESTROY_BY_RCU)
2217 BUG_ON(dtor);
2218
2219 /* 2190 /*
2220 * Always checks flags, a caller might be expecting debug support which 2191 * Always checks flags, a caller might be expecting debug support which
2221 * isn't available. 2192 * isn't available.
@@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2370 BUG_ON(!cachep->slabp_cache); 2341 BUG_ON(!cachep->slabp_cache);
2371 } 2342 }
2372 cachep->ctor = ctor; 2343 cachep->ctor = ctor;
2373 cachep->dtor = dtor;
2374 cachep->name = name; 2344 cachep->name = name;
2375 2345
2376 if (setup_cpu_cache(cachep)) { 2346 if (setup_cpu_cache(cachep)) {
@@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2625} 2595}
2626 2596
2627static void cache_init_objs(struct kmem_cache *cachep, 2597static void cache_init_objs(struct kmem_cache *cachep,
2628 struct slab *slabp, unsigned long ctor_flags) 2598 struct slab *slabp)
2629{ 2599{
2630 int i; 2600 int i;
2631 2601
@@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2649 */ 2619 */
2650 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2620 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2651 cachep->ctor(objp + obj_offset(cachep), cachep, 2621 cachep->ctor(objp + obj_offset(cachep), cachep,
2652 ctor_flags); 2622 0);
2653 2623
2654 if (cachep->flags & SLAB_RED_ZONE) { 2624 if (cachep->flags & SLAB_RED_ZONE) {
2655 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2625 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2665 cachep->buffer_size / PAGE_SIZE, 0); 2635 cachep->buffer_size / PAGE_SIZE, 0);
2666#else 2636#else
2667 if (cachep->ctor) 2637 if (cachep->ctor)
2668 cachep->ctor(objp, cachep, ctor_flags); 2638 cachep->ctor(objp, cachep, 0);
2669#endif 2639#endif
2670 slab_bufctl(slabp)[i] = i + 1; 2640 slab_bufctl(slabp)[i] = i + 1;
2671 } 2641 }
@@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep,
2754 struct slab *slabp; 2724 struct slab *slabp;
2755 size_t offset; 2725 size_t offset;
2756 gfp_t local_flags; 2726 gfp_t local_flags;
2757 unsigned long ctor_flags;
2758 struct kmem_list3 *l3; 2727 struct kmem_list3 *l3;
2759 2728
2760 /* 2729 /*
@@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep,
2763 */ 2732 */
2764 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2733 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2765 2734
2766 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2767 local_flags = (flags & GFP_LEVEL_MASK); 2735 local_flags = (flags & GFP_LEVEL_MASK);
2768 /* Take the l3 list lock to change the colour_next on this node */ 2736 /* Take the l3 list lock to change the colour_next on this node */
2769 check_irq_off(); 2737 check_irq_off();
@@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
2808 slabp->nodeid = nodeid; 2776 slabp->nodeid = nodeid;
2809 slab_map_pages(cachep, slabp, objp); 2777 slab_map_pages(cachep, slabp, objp);
2810 2778
2811 cache_init_objs(cachep, slabp, ctor_flags); 2779 cache_init_objs(cachep, slabp);
2812 2780
2813 if (local_flags & __GFP_WAIT) 2781 if (local_flags & __GFP_WAIT)
2814 local_irq_disable(); 2782 local_irq_disable();
@@ -2835,7 +2803,6 @@ failed:
2835 * Perform extra freeing checks: 2803 * Perform extra freeing checks:
2836 * - detect bad pointers. 2804 * - detect bad pointers.
2837 * - POISON/RED_ZONE checking 2805 * - POISON/RED_ZONE checking
2838 * - destructor calls, for caches with POISON+dtor
2839 */ 2806 */
2840static void kfree_debugcheck(const void *objp) 2807static void kfree_debugcheck(const void *objp)
2841{ 2808{
@@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2894 BUG_ON(objnr >= cachep->num); 2861 BUG_ON(objnr >= cachep->num);
2895 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2862 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2896 2863
2897 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2898 /* we want to cache poison the object,
2899 * call the destruction callback
2900 */
2901 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2902 }
2903#ifdef CONFIG_DEBUG_SLAB_LEAK 2864#ifdef CONFIG_DEBUG_SLAB_LEAK
2904 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2865 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2905#endif 2866#endif
@@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3099#endif 3060#endif
3100 objp += obj_offset(cachep); 3061 objp += obj_offset(cachep);
3101 if (cachep->ctor && cachep->flags & SLAB_POISON) 3062 if (cachep->ctor && cachep->flags & SLAB_POISON)
3102 cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); 3063 cachep->ctor(objp, cachep, 0);
3103#if ARCH_SLAB_MINALIGN 3064#if ARCH_SLAB_MINALIGN
3104 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3065 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3105 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3066 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
diff --git a/mm/slob.c b/mm/slob.c
index c6933bc19bc..71976c5d40d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/rcupdate.h>
38 39
39struct slob_block { 40struct slob_block {
40 int units; 41 int units;
@@ -53,6 +54,16 @@ struct bigblock {
53}; 54};
54typedef struct bigblock bigblock_t; 55typedef struct bigblock bigblock_t;
55 56
57/*
58 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
59 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
60 * the block using call_rcu.
61 */
62struct slob_rcu {
63 struct rcu_head head;
64 int size;
65};
66
56static slob_t arena = { .next = &arena, .units = 1 }; 67static slob_t arena = { .next = &arena, .units = 1 };
57static slob_t *slobfree = &arena; 68static slob_t *slobfree = &arena;
58static bigblock_t *bigblocks; 69static bigblock_t *bigblocks;
@@ -266,9 +277,9 @@ size_t ksize(const void *block)
266 277
267struct kmem_cache { 278struct kmem_cache {
268 unsigned int size, align; 279 unsigned int size, align;
280 unsigned long flags;
269 const char *name; 281 const char *name;
270 void (*ctor)(void *, struct kmem_cache *, unsigned long); 282 void (*ctor)(void *, struct kmem_cache *, unsigned long);
271 void (*dtor)(void *, struct kmem_cache *, unsigned long);
272}; 283};
273 284
274struct kmem_cache *kmem_cache_create(const char *name, size_t size, 285struct kmem_cache *kmem_cache_create(const char *name, size_t size,
@@ -283,8 +294,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
283 if (c) { 294 if (c) {
284 c->name = name; 295 c->name = name;
285 c->size = size; 296 c->size = size;
297 if (flags & SLAB_DESTROY_BY_RCU) {
298 /* leave room for rcu footer at the end of object */
299 c->size += sizeof(struct slob_rcu);
300 }
301 c->flags = flags;
286 c->ctor = ctor; 302 c->ctor = ctor;
287 c->dtor = dtor;
288 /* ignore alignment unless it's forced */ 303 /* ignore alignment unless it's forced */
289 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 304 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
290 if (c->align < align) 305 if (c->align < align)
@@ -312,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
312 b = (void *)__get_free_pages(flags, get_order(c->size)); 327 b = (void *)__get_free_pages(flags, get_order(c->size));
313 328
314 if (c->ctor) 329 if (c->ctor)
315 c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); 330 c->ctor(b, c, 0);
316 331
317 return b; 332 return b;
318} 333}
@@ -328,15 +343,33 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
328} 343}
329EXPORT_SYMBOL(kmem_cache_zalloc); 344EXPORT_SYMBOL(kmem_cache_zalloc);
330 345
331void kmem_cache_free(struct kmem_cache *c, void *b) 346static void __kmem_cache_free(void *b, int size)
332{ 347{
333 if (c->dtor) 348 if (size < PAGE_SIZE)
334 c->dtor(b, c, 0); 349 slob_free(b, size);
335
336 if (c->size < PAGE_SIZE)
337 slob_free(b, c->size);
338 else 350 else
339 free_pages((unsigned long)b, get_order(c->size)); 351 free_pages((unsigned long)b, get_order(size));
352}
353
354static void kmem_rcu_free(struct rcu_head *head)
355{
356 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
357 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
358
359 __kmem_cache_free(b, slob_rcu->size);
360}
361
362void kmem_cache_free(struct kmem_cache *c, void *b)
363{
364 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
365 struct slob_rcu *slob_rcu;
366 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
367 INIT_RCU_HEAD(&slob_rcu->head);
368 slob_rcu->size = c->size;
369 call_rcu(&slob_rcu->head, kmem_rcu_free);
370 } else {
371 __kmem_cache_free(b, c->size);
372 }
340} 373}
341EXPORT_SYMBOL(kmem_cache_free); 374EXPORT_SYMBOL(kmem_cache_free);
342 375
diff --git a/mm/slub.c b/mm/slub.c
index 5e3e8bc9838..98801d404d6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -78,10 +78,18 @@
78 * 78 *
79 * Overloading of page flags that are otherwise used for LRU management. 79 * Overloading of page flags that are otherwise used for LRU management.
80 * 80 *
81 * PageActive The slab is used as a cpu cache. Allocations 81 * PageActive The slab is frozen and exempt from list processing.
82 * may be performed from the slab. The slab is not 82 * This means that the slab is dedicated to a purpose
83 * on any slab list and cannot be moved onto one. 83 * such as satisfying allocations for a specific
84 * The cpu slab may be equipped with an additioanl 84 * processor. Objects may be freed in the slab while
85 * it is frozen but slab_free will then skip the usual
86 * list operations. It is up to the processor holding
87 * the slab to integrate the slab into the slab lists
88 * when the slab is no longer needed.
89 *
90 * One use of this flag is to mark slabs that are
91 * used for allocations. Then such a slab becomes a cpu
92 * slab. The cpu slab may be equipped with an additional
85 * lockless_freelist that allows lockless access to 93 * lockless_freelist that allows lockless access to
86 * free objects in addition to the regular freelist 94 * free objects in addition to the regular freelist
87 * that requires the slab lock. 95 * that requires the slab lock.
@@ -91,27 +99,42 @@
91 * the fast path and disables lockless freelists. 99 * the fast path and disables lockless freelists.
92 */ 100 */
93 101
94static inline int SlabDebug(struct page *page) 102#define FROZEN (1 << PG_active)
95{ 103
96#ifdef CONFIG_SLUB_DEBUG 104#ifdef CONFIG_SLUB_DEBUG
97 return PageError(page); 105#define SLABDEBUG (1 << PG_error)
98#else 106#else
99 return 0; 107#define SLABDEBUG 0
100#endif 108#endif
109
110static inline int SlabFrozen(struct page *page)
111{
112 return page->flags & FROZEN;
113}
114
115static inline void SetSlabFrozen(struct page *page)
116{
117 page->flags |= FROZEN;
118}
119
120static inline void ClearSlabFrozen(struct page *page)
121{
122 page->flags &= ~FROZEN;
123}
124
125static inline int SlabDebug(struct page *page)
126{
127 return page->flags & SLABDEBUG;
101} 128}
102 129
103static inline void SetSlabDebug(struct page *page) 130static inline void SetSlabDebug(struct page *page)
104{ 131{
105#ifdef CONFIG_SLUB_DEBUG 132 page->flags |= SLABDEBUG;
106 SetPageError(page);
107#endif
108} 133}
109 134
110static inline void ClearSlabDebug(struct page *page) 135static inline void ClearSlabDebug(struct page *page)
111{ 136{
112#ifdef CONFIG_SLUB_DEBUG 137 page->flags &= ~SLABDEBUG;
113 ClearPageError(page);
114#endif
115} 138}
116 139
117/* 140/*
@@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
719 return search == NULL; 742 return search == NULL;
720} 743}
721 744
745static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
746{
747 if (s->flags & SLAB_TRACE) {
748 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
749 s->name,
750 alloc ? "alloc" : "free",
751 object, page->inuse,
752 page->freelist);
753
754 if (!alloc)
755 print_section("Object", (void *)object, s->objsize);
756
757 dump_stack();
758 }
759}
760
722/* 761/*
723 * Tracking of fully allocated slabs for debugging purposes. 762 * Tracking of fully allocated slabs for debugging purposes.
724 */ 763 */
@@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page)
743 spin_unlock(&n->list_lock); 782 spin_unlock(&n->list_lock);
744} 783}
745 784
746static int alloc_object_checks(struct kmem_cache *s, struct page *page, 785static void setup_object_debug(struct kmem_cache *s, struct page *page,
747 void *object) 786 void *object)
787{
788 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
789 return;
790
791 init_object(s, object, 0);
792 init_tracking(s, object);
793}
794
795static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
796 void *object, void *addr)
748{ 797{
749 if (!check_slab(s, page)) 798 if (!check_slab(s, page))
750 goto bad; 799 goto bad;
@@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
759 goto bad; 808 goto bad;
760 } 809 }
761 810
762 if (!object) 811 if (object && !check_object(s, page, object, 0))
763 return 1;
764
765 if (!check_object(s, page, object, 0))
766 goto bad; 812 goto bad;
767 813
814 /* Success perform special debug activities for allocs */
815 if (s->flags & SLAB_STORE_USER)
816 set_track(s, object, TRACK_ALLOC, addr);
817 trace(s, page, object, 1);
818 init_object(s, object, 1);
768 return 1; 819 return 1;
820
769bad: 821bad:
770 if (PageSlab(page)) { 822 if (PageSlab(page)) {
771 /* 823 /*
@@ -783,8 +835,8 @@ bad:
783 return 0; 835 return 0;
784} 836}
785 837
786static int free_object_checks(struct kmem_cache *s, struct page *page, 838static int free_debug_processing(struct kmem_cache *s, struct page *page,
787 void *object) 839 void *object, void *addr)
788{ 840{
789 if (!check_slab(s, page)) 841 if (!check_slab(s, page))
790 goto fail; 842 goto fail;
@@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
818 "to slab %s", object, page->slab->name); 870 "to slab %s", object, page->slab->name);
819 goto fail; 871 goto fail;
820 } 872 }
873
874 /* Special debug activities for freeing objects */
875 if (!SlabFrozen(page) && !page->freelist)
876 remove_full(s, page);
877 if (s->flags & SLAB_STORE_USER)
878 set_track(s, object, TRACK_FREE, addr);
879 trace(s, page, object, 0);
880 init_object(s, object, 0);
821 return 1; 881 return 1;
882
822fail: 883fail:
823 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 884 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
824 s->name, page, object); 885 s->name, page, object);
825 return 0; 886 return 0;
826} 887}
827 888
828static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
829{
830 if (s->flags & SLAB_TRACE) {
831 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
832 s->name,
833 alloc ? "alloc" : "free",
834 object, page->inuse,
835 page->freelist);
836
837 if (!alloc)
838 print_section("Object", (void *)object, s->objsize);
839
840 dump_stack();
841 }
842}
843
844static int __init setup_slub_debug(char *str) 889static int __init setup_slub_debug(char *str)
845{ 890{
846 if (!str || *str != '=') 891 if (!str || *str != '=')
@@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
891 * On 32 bit platforms the limit is 256k. On 64bit platforms 936 * On 32 bit platforms the limit is 256k. On 64bit platforms
892 * the limit is 512k. 937 * the limit is 512k.
893 * 938 *
894 * Debugging or ctor/dtors may create a need to move the free 939 * Debugging or ctor may create a need to move the free
895 * pointer. Fail if this happens. 940 * pointer. Fail if this happens.
896 */ 941 */
897 if (s->size >= 65535 * sizeof(void *)) { 942 if (s->size >= 65535 * sizeof(void *)) {
898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 943 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 944 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
900 BUG_ON(s->ctor || s->dtor); 945 BUG_ON(s->ctor);
901 } 946 }
902 else 947 else
903 /* 948 /*
@@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
909 s->flags |= slub_debug; 954 s->flags |= slub_debug;
910} 955}
911#else 956#else
957static inline void setup_object_debug(struct kmem_cache *s,
958 struct page *page, void *object) {}
912 959
913static inline int alloc_object_checks(struct kmem_cache *s, 960static inline int alloc_debug_processing(struct kmem_cache *s,
914 struct page *page, void *object) { return 0; } 961 struct page *page, void *object, void *addr) { return 0; }
915 962
916static inline int free_object_checks(struct kmem_cache *s, 963static inline int free_debug_processing(struct kmem_cache *s,
917 struct page *page, void *object) { return 0; } 964 struct page *page, void *object, void *addr) { return 0; }
918 965
919static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
920static inline void remove_full(struct kmem_cache *s, struct page *page) {}
921static inline void trace(struct kmem_cache *s, struct page *page,
922 void *object, int alloc) {}
923static inline void init_object(struct kmem_cache *s,
924 void *object, int active) {}
925static inline void init_tracking(struct kmem_cache *s, void *object) {}
926static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 966static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
927 { return 1; } 967 { return 1; }
928static inline int check_object(struct kmem_cache *s, struct page *page, 968static inline int check_object(struct kmem_cache *s, struct page *page,
929 void *object, int active) { return 1; } 969 void *object, int active) { return 1; }
930static inline void set_track(struct kmem_cache *s, void *object, 970static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
931 enum track_item alloc, void *addr) {}
932static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 971static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
933#define slub_debug 0 972#define slub_debug 0
934#endif 973#endif
@@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
965static void setup_object(struct kmem_cache *s, struct page *page, 1004static void setup_object(struct kmem_cache *s, struct page *page,
966 void *object) 1005 void *object)
967{ 1006{
968 if (SlabDebug(page)) { 1007 setup_object_debug(s, page, object);
969 init_object(s, object, 0);
970 init_tracking(s, object);
971 }
972
973 if (unlikely(s->ctor)) 1008 if (unlikely(s->ctor))
974 s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); 1009 s->ctor(object, s, 0);
975} 1010}
976 1011
977static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1012static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1030{ 1065{
1031 int pages = 1 << s->order; 1066 int pages = 1 << s->order;
1032 1067
1033 if (unlikely(SlabDebug(page) || s->dtor)) { 1068 if (unlikely(SlabDebug(page))) {
1034 void *p; 1069 void *p;
1035 1070
1036 slab_pad_check(s, page); 1071 slab_pad_check(s, page);
1037 for_each_object(p, s, page_address(page)) { 1072 for_each_object(p, s, page_address(page))
1038 if (s->dtor)
1039 s->dtor(p, s, 0);
1040 check_object(s, page, p, 0); 1073 check_object(s, page, p, 0);
1041 }
1042 } 1074 }
1043 1075
1044 mod_zone_page_state(page_zone(page), 1076 mod_zone_page_state(page_zone(page),
@@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s,
1138 * 1170 *
1139 * Must hold list_lock. 1171 * Must hold list_lock.
1140 */ 1172 */
1141static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) 1173static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1142{ 1174{
1143 if (slab_trylock(page)) { 1175 if (slab_trylock(page)) {
1144 list_del(&page->lru); 1176 list_del(&page->lru);
1145 n->nr_partial--; 1177 n->nr_partial--;
1178 SetSlabFrozen(page);
1146 return 1; 1179 return 1;
1147 } 1180 }
1148 return 0; 1181 return 0;
@@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
1166 1199
1167 spin_lock(&n->list_lock); 1200 spin_lock(&n->list_lock);
1168 list_for_each_entry(page, &n->partial, lru) 1201 list_for_each_entry(page, &n->partial, lru)
1169 if (lock_and_del_slab(n, page)) 1202 if (lock_and_freeze_slab(n, page))
1170 goto out; 1203 goto out;
1171 page = NULL; 1204 page = NULL;
1172out: 1205out:
@@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1245 * 1278 *
1246 * On exit the slab lock will have been dropped. 1279 * On exit the slab lock will have been dropped.
1247 */ 1280 */
1248static void putback_slab(struct kmem_cache *s, struct page *page) 1281static void unfreeze_slab(struct kmem_cache *s, struct page *page)
1249{ 1282{
1250 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1283 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1251 1284
1285 ClearSlabFrozen(page);
1252 if (page->inuse) { 1286 if (page->inuse) {
1253 1287
1254 if (page->freelist) 1288 if (page->freelist)
@@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1299 page->inuse--; 1333 page->inuse--;
1300 } 1334 }
1301 s->cpu_slab[cpu] = NULL; 1335 s->cpu_slab[cpu] = NULL;
1302 ClearPageActive(page); 1336 unfreeze_slab(s, page);
1303
1304 putback_slab(s, page);
1305} 1337}
1306 1338
1307static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) 1339static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
@@ -1392,9 +1424,7 @@ another_slab:
1392new_slab: 1424new_slab:
1393 page = get_partial(s, gfpflags, node); 1425 page = get_partial(s, gfpflags, node);
1394 if (page) { 1426 if (page) {
1395have_slab:
1396 s->cpu_slab[cpu] = page; 1427 s->cpu_slab[cpu] = page;
1397 SetPageActive(page);
1398 goto load_freelist; 1428 goto load_freelist;
1399 } 1429 }
1400 1430
@@ -1424,17 +1454,15 @@ have_slab:
1424 flush_slab(s, s->cpu_slab[cpu], cpu); 1454 flush_slab(s, s->cpu_slab[cpu], cpu);
1425 } 1455 }
1426 slab_lock(page); 1456 slab_lock(page);
1427 goto have_slab; 1457 SetSlabFrozen(page);
1458 s->cpu_slab[cpu] = page;
1459 goto load_freelist;
1428 } 1460 }
1429 return NULL; 1461 return NULL;
1430debug: 1462debug:
1431 object = page->freelist; 1463 object = page->freelist;
1432 if (!alloc_object_checks(s, page, object)) 1464 if (!alloc_debug_processing(s, page, object, addr))
1433 goto another_slab; 1465 goto another_slab;
1434 if (s->flags & SLAB_STORE_USER)
1435 set_track(s, object, TRACK_ALLOC, addr);
1436 trace(s, page, object, 1);
1437 init_object(s, object, 1);
1438 1466
1439 page->inuse++; 1467 page->inuse++;
1440 page->freelist = object[page->offset]; 1468 page->freelist = object[page->offset];
@@ -1511,11 +1539,7 @@ checks_ok:
1511 page->freelist = object; 1539 page->freelist = object;
1512 page->inuse--; 1540 page->inuse--;
1513 1541
1514 if (unlikely(PageActive(page))) 1542 if (unlikely(SlabFrozen(page)))
1515 /*
1516 * Cpu slabs are never on partial lists and are
1517 * never freed.
1518 */
1519 goto out_unlock; 1543 goto out_unlock;
1520 1544
1521 if (unlikely(!page->inuse)) 1545 if (unlikely(!page->inuse))
@@ -1545,14 +1569,8 @@ slab_empty:
1545 return; 1569 return;
1546 1570
1547debug: 1571debug:
1548 if (!free_object_checks(s, page, x)) 1572 if (!free_debug_processing(s, page, x, addr))
1549 goto out_unlock; 1573 goto out_unlock;
1550 if (!PageActive(page) && !page->freelist)
1551 remove_full(s, page);
1552 if (s->flags & SLAB_STORE_USER)
1553 set_track(s, x, TRACK_FREE, addr);
1554 trace(s, page, object, 0);
1555 init_object(s, object, 0);
1556 goto checks_ok; 1574 goto checks_ok;
1557} 1575}
1558 1576
@@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1789 page->freelist = get_freepointer(kmalloc_caches, n); 1807 page->freelist = get_freepointer(kmalloc_caches, n);
1790 page->inuse++; 1808 page->inuse++;
1791 kmalloc_caches->node[node] = n; 1809 kmalloc_caches->node[node] = n;
1792 init_object(kmalloc_caches, n, 1); 1810 setup_object_debug(kmalloc_caches, page, n);
1793 init_kmem_cache_node(n); 1811 init_kmem_cache_node(n);
1794 atomic_long_inc(&n->nr_slabs); 1812 atomic_long_inc(&n->nr_slabs);
1795 add_partial(n, page); 1813 add_partial(n, page);
@@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s)
1871 * then we should never poison the object itself. 1889 * then we should never poison the object itself.
1872 */ 1890 */
1873 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1891 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
1874 !s->ctor && !s->dtor) 1892 !s->ctor)
1875 s->flags |= __OBJECT_POISON; 1893 s->flags |= __OBJECT_POISON;
1876 else 1894 else
1877 s->flags &= ~__OBJECT_POISON; 1895 s->flags &= ~__OBJECT_POISON;
@@ -1901,7 +1919,7 @@ static int calculate_sizes(struct kmem_cache *s)
1901 1919
1902#ifdef CONFIG_SLUB_DEBUG 1920#ifdef CONFIG_SLUB_DEBUG
1903 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1921 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1904 s->ctor || s->dtor)) { 1922 s->ctor)) {
1905 /* 1923 /*
1906 * Relocate free pointer after the object if it is not 1924 * Relocate free pointer after the object if it is not
1907 * permitted to overwrite the first word of the object on 1925 * permitted to overwrite the first word of the object on
@@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s)
1970static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1988static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1971 const char *name, size_t size, 1989 const char *name, size_t size,
1972 size_t align, unsigned long flags, 1990 size_t align, unsigned long flags,
1973 void (*ctor)(void *, struct kmem_cache *, unsigned long), 1991 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1974 void (*dtor)(void *, struct kmem_cache *, unsigned long))
1975{ 1992{
1976 memset(s, 0, kmem_size); 1993 memset(s, 0, kmem_size);
1977 s->name = name; 1994 s->name = name;
1978 s->ctor = ctor; 1995 s->ctor = ctor;
1979 s->dtor = dtor;
1980 s->objsize = size; 1996 s->objsize = size;
1981 s->flags = flags; 1997 s->flags = flags;
1982 s->align = align; 1998 s->align = align;
@@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2161 2177
2162 down_write(&slub_lock); 2178 down_write(&slub_lock);
2163 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2179 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2164 flags, NULL, NULL)) 2180 flags, NULL))
2165 goto panic; 2181 goto panic;
2166 2182
2167 list_add(&s->list, &slab_caches); 2183 list_add(&s->list, &slab_caches);
@@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2463 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2479 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2464 return 1; 2480 return 1;
2465 2481
2466 if (s->ctor || s->dtor) 2482 if (s->ctor)
2467 return 1; 2483 return 1;
2468 2484
2469 return 0; 2485 return 0;
@@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s)
2471 2487
2472static struct kmem_cache *find_mergeable(size_t size, 2488static struct kmem_cache *find_mergeable(size_t size,
2473 size_t align, unsigned long flags, 2489 size_t align, unsigned long flags,
2474 void (*ctor)(void *, struct kmem_cache *, unsigned long), 2490 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2475 void (*dtor)(void *, struct kmem_cache *, unsigned long))
2476{ 2491{
2477 struct list_head *h; 2492 struct list_head *h;
2478 2493
2479 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2494 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2480 return NULL; 2495 return NULL;
2481 2496
2482 if (ctor || dtor) 2497 if (ctor)
2483 return NULL; 2498 return NULL;
2484 2499
2485 size = ALIGN(size, sizeof(void *)); 2500 size = ALIGN(size, sizeof(void *));
@@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2521{ 2536{
2522 struct kmem_cache *s; 2537 struct kmem_cache *s;
2523 2538
2539 BUG_ON(dtor);
2524 down_write(&slub_lock); 2540 down_write(&slub_lock);
2525 s = find_mergeable(size, align, flags, ctor, dtor); 2541 s = find_mergeable(size, align, flags, ctor);
2526 if (s) { 2542 if (s) {
2527 s->refcount++; 2543 s->refcount++;
2528 /* 2544 /*
@@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2536 } else { 2552 } else {
2537 s = kmalloc(kmem_size, GFP_KERNEL); 2553 s = kmalloc(kmem_size, GFP_KERNEL);
2538 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2554 if (s && kmem_cache_open(s, GFP_KERNEL, name,
2539 size, align, flags, ctor, dtor)) { 2555 size, align, flags, ctor)) {
2540 if (sysfs_slab_add(s)) { 2556 if (sysfs_slab_add(s)) {
2541 kfree(s); 2557 kfree(s);
2542 goto err; 2558 goto err;
@@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3177} 3193}
3178SLAB_ATTR_RO(ctor); 3194SLAB_ATTR_RO(ctor);
3179 3195
3180static ssize_t dtor_show(struct kmem_cache *s, char *buf)
3181{
3182 if (s->dtor) {
3183 int n = sprint_symbol(buf, (unsigned long)s->dtor);
3184
3185 return n + sprintf(buf + n, "\n");
3186 }
3187 return 0;
3188}
3189SLAB_ATTR_RO(dtor);
3190
3191static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3196static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3192{ 3197{
3193 return sprintf(buf, "%d\n", s->refcount - 1); 3198 return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = {
3419 &partial_attr.attr, 3424 &partial_attr.attr,
3420 &cpu_slabs_attr.attr, 3425 &cpu_slabs_attr.attr,
3421 &ctor_attr.attr, 3426 &ctor_attr.attr,
3422 &dtor_attr.attr,
3423 &aliases_attr.attr, 3427 &aliases_attr.attr,
3424 &align_attr.attr, 3428 &align_attr.attr,
3425 &sanity_checks_attr.attr, 3429 &sanity_checks_attr.attr,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index faa2a521dea..d3a9c536825 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr)
311 return v; 311 return v;
312} 312}
313 313
314void __vunmap(void *addr, int deallocate_pages) 314static void __vunmap(void *addr, int deallocate_pages)
315{ 315{
316 struct vm_struct *area; 316 struct vm_struct *area;
317 317
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 15419dd682f..8400525177a 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -87,7 +87,7 @@ extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
87 unsigned char *node); 87 unsigned char *node);
88extern void ipxrtr_del_routes(struct ipx_interface *intrfc); 88extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
89extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, 89extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
90 struct iovec *iov, int len, int noblock); 90 struct iovec *iov, size_t len, int noblock);
91extern int ipxrtr_route_skb(struct sk_buff *skb); 91extern int ipxrtr_route_skb(struct sk_buff *skb);
92extern struct ipx_route *ipxrtr_lookup(__be32 net); 92extern struct ipx_route *ipxrtr_lookup(__be32 net);
93extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); 93extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
diff --git a/net/socket.c b/net/socket.c
index 98a8f67abbf..f4530196a70 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -261,8 +261,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
261{ 261{
262 struct socket_alloc *ei = (struct socket_alloc *)foo; 262 struct socket_alloc *ei = (struct socket_alloc *)foo;
263 263
264 if (flags & SLAB_CTOR_CONSTRUCTOR) 264 inode_init_once(&ei->vfs_inode);
265 inode_init_once(&ei->vfs_inode);
266} 265}
267 266
268static int init_inodecache(void) 267static int init_inodecache(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a2f1893bde5..5887457dc93 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -828,19 +828,17 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
828{ 828{
829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 829 struct rpc_inode *rpci = (struct rpc_inode *) foo;
830 830
831 if (flags & SLAB_CTOR_CONSTRUCTOR) { 831 inode_init_once(&rpci->vfs_inode);
832 inode_init_once(&rpci->vfs_inode); 832 rpci->private = NULL;
833 rpci->private = NULL; 833 rpci->nreaders = 0;
834 rpci->nreaders = 0; 834 rpci->nwriters = 0;
835 rpci->nwriters = 0; 835 INIT_LIST_HEAD(&rpci->in_upcall);
836 INIT_LIST_HEAD(&rpci->in_upcall); 836 INIT_LIST_HEAD(&rpci->pipe);
837 INIT_LIST_HEAD(&rpci->pipe); 837 rpci->pipelen = 0;
838 rpci->pipelen = 0; 838 init_waitqueue_head(&rpci->waitq);
839 init_waitqueue_head(&rpci->waitq); 839 INIT_DELAYED_WORK(&rpci->queue_timeout,
840 INIT_DELAYED_WORK(&rpci->queue_timeout, 840 rpc_timeout_upcall_queue);
841 rpc_timeout_upcall_queue); 841 rpci->ops = NULL;
842 rpci->ops = NULL;
843 }
844} 842}
845 843
846int register_rpc_pipefs(void) 844int register_rpc_pipefs(void)