diff options
146 files changed, 983 insertions, 945 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 644c3884fab..0a441f73261 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -551,10 +551,12 @@ | |||
551 | <function>spin_lock_irqsave()</function>, which is a superset | 551 | <function>spin_lock_irqsave()</function>, which is a superset |
552 | of all other spinlock primitives. | 552 | of all other spinlock primitives. |
553 | </para> | 553 | </para> |
554 | |||
554 | <table> | 555 | <table> |
555 | <title>Table of Locking Requirements</title> | 556 | <title>Table of Locking Requirements</title> |
556 | <tgroup cols="11"> | 557 | <tgroup cols="11"> |
557 | <tbody> | 558 | <tbody> |
559 | |||
558 | <row> | 560 | <row> |
559 | <entry></entry> | 561 | <entry></entry> |
560 | <entry>IRQ Handler A</entry> | 562 | <entry>IRQ Handler A</entry> |
@@ -576,97 +578,128 @@ | |||
576 | 578 | ||
577 | <row> | 579 | <row> |
578 | <entry>IRQ Handler B</entry> | 580 | <entry>IRQ Handler B</entry> |
579 | <entry>spin_lock_irqsave</entry> | 581 | <entry>SLIS</entry> |
580 | <entry>None</entry> | 582 | <entry>None</entry> |
581 | </row> | 583 | </row> |
582 | 584 | ||
583 | <row> | 585 | <row> |
584 | <entry>Softirq A</entry> | 586 | <entry>Softirq A</entry> |
585 | <entry>spin_lock_irq</entry> | 587 | <entry>SLI</entry> |
586 | <entry>spin_lock_irq</entry> | 588 | <entry>SLI</entry> |
587 | <entry>spin_lock</entry> | 589 | <entry>SL</entry> |
588 | </row> | 590 | </row> |
589 | 591 | ||
590 | <row> | 592 | <row> |
591 | <entry>Softirq B</entry> | 593 | <entry>Softirq B</entry> |
592 | <entry>spin_lock_irq</entry> | 594 | <entry>SLI</entry> |
593 | <entry>spin_lock_irq</entry> | 595 | <entry>SLI</entry> |
594 | <entry>spin_lock</entry> | 596 | <entry>SL</entry> |
595 | <entry>spin_lock</entry> | 597 | <entry>SL</entry> |
596 | </row> | 598 | </row> |
597 | 599 | ||
598 | <row> | 600 | <row> |
599 | <entry>Tasklet A</entry> | 601 | <entry>Tasklet A</entry> |
600 | <entry>spin_lock_irq</entry> | 602 | <entry>SLI</entry> |
601 | <entry>spin_lock_irq</entry> | 603 | <entry>SLI</entry> |
602 | <entry>spin_lock</entry> | 604 | <entry>SL</entry> |
603 | <entry>spin_lock</entry> | 605 | <entry>SL</entry> |
604 | <entry>None</entry> | 606 | <entry>None</entry> |
605 | </row> | 607 | </row> |
606 | 608 | ||
607 | <row> | 609 | <row> |
608 | <entry>Tasklet B</entry> | 610 | <entry>Tasklet B</entry> |
609 | <entry>spin_lock_irq</entry> | 611 | <entry>SLI</entry> |
610 | <entry>spin_lock_irq</entry> | 612 | <entry>SLI</entry> |
611 | <entry>spin_lock</entry> | 613 | <entry>SL</entry> |
612 | <entry>spin_lock</entry> | 614 | <entry>SL</entry> |
613 | <entry>spin_lock</entry> | 615 | <entry>SL</entry> |
614 | <entry>None</entry> | 616 | <entry>None</entry> |
615 | </row> | 617 | </row> |
616 | 618 | ||
617 | <row> | 619 | <row> |
618 | <entry>Timer A</entry> | 620 | <entry>Timer A</entry> |
619 | <entry>spin_lock_irq</entry> | 621 | <entry>SLI</entry> |
620 | <entry>spin_lock_irq</entry> | 622 | <entry>SLI</entry> |
621 | <entry>spin_lock</entry> | 623 | <entry>SL</entry> |
622 | <entry>spin_lock</entry> | 624 | <entry>SL</entry> |
623 | <entry>spin_lock</entry> | 625 | <entry>SL</entry> |
624 | <entry>spin_lock</entry> | 626 | <entry>SL</entry> |
625 | <entry>None</entry> | 627 | <entry>None</entry> |
626 | </row> | 628 | </row> |
627 | 629 | ||
628 | <row> | 630 | <row> |
629 | <entry>Timer B</entry> | 631 | <entry>Timer B</entry> |
630 | <entry>spin_lock_irq</entry> | 632 | <entry>SLI</entry> |
631 | <entry>spin_lock_irq</entry> | 633 | <entry>SLI</entry> |
632 | <entry>spin_lock</entry> | 634 | <entry>SL</entry> |
633 | <entry>spin_lock</entry> | 635 | <entry>SL</entry> |
634 | <entry>spin_lock</entry> | 636 | <entry>SL</entry> |
635 | <entry>spin_lock</entry> | 637 | <entry>SL</entry> |
636 | <entry>spin_lock</entry> | 638 | <entry>SL</entry> |
637 | <entry>None</entry> | 639 | <entry>None</entry> |
638 | </row> | 640 | </row> |
639 | 641 | ||
640 | <row> | 642 | <row> |
641 | <entry>User Context A</entry> | 643 | <entry>User Context A</entry> |
642 | <entry>spin_lock_irq</entry> | 644 | <entry>SLI</entry> |
643 | <entry>spin_lock_irq</entry> | 645 | <entry>SLI</entry> |
644 | <entry>spin_lock_bh</entry> | 646 | <entry>SLBH</entry> |
645 | <entry>spin_lock_bh</entry> | 647 | <entry>SLBH</entry> |
646 | <entry>spin_lock_bh</entry> | 648 | <entry>SLBH</entry> |
647 | <entry>spin_lock_bh</entry> | 649 | <entry>SLBH</entry> |
648 | <entry>spin_lock_bh</entry> | 650 | <entry>SLBH</entry> |
649 | <entry>spin_lock_bh</entry> | 651 | <entry>SLBH</entry> |
650 | <entry>None</entry> | 652 | <entry>None</entry> |
651 | </row> | 653 | </row> |
652 | 654 | ||
653 | <row> | 655 | <row> |
654 | <entry>User Context B</entry> | 656 | <entry>User Context B</entry> |
657 | <entry>SLI</entry> | ||
658 | <entry>SLI</entry> | ||
659 | <entry>SLBH</entry> | ||
660 | <entry>SLBH</entry> | ||
661 | <entry>SLBH</entry> | ||
662 | <entry>SLBH</entry> | ||
663 | <entry>SLBH</entry> | ||
664 | <entry>SLBH</entry> | ||
665 | <entry>DI</entry> | ||
666 | <entry>None</entry> | ||
667 | </row> | ||
668 | |||
669 | </tbody> | ||
670 | </tgroup> | ||
671 | </table> | ||
672 | |||
673 | <table> | ||
674 | <title>Legend for Locking Requirements Table</title> | ||
675 | <tgroup cols="2"> | ||
676 | <tbody> | ||
677 | |||
678 | <row> | ||
679 | <entry>SLIS</entry> | ||
680 | <entry>spin_lock_irqsave</entry> | ||
681 | </row> | ||
682 | <row> | ||
683 | <entry>SLI</entry> | ||
655 | <entry>spin_lock_irq</entry> | 684 | <entry>spin_lock_irq</entry> |
656 | <entry>spin_lock_irq</entry> | 685 | </row> |
657 | <entry>spin_lock_bh</entry> | 686 | <row> |
658 | <entry>spin_lock_bh</entry> | 687 | <entry>SL</entry> |
659 | <entry>spin_lock_bh</entry> | 688 | <entry>spin_lock</entry> |
660 | <entry>spin_lock_bh</entry> | 689 | </row> |
661 | <entry>spin_lock_bh</entry> | 690 | <row> |
691 | <entry>SLBH</entry> | ||
662 | <entry>spin_lock_bh</entry> | 692 | <entry>spin_lock_bh</entry> |
693 | </row> | ||
694 | <row> | ||
695 | <entry>DI</entry> | ||
663 | <entry>down_interruptible</entry> | 696 | <entry>down_interruptible</entry> |
664 | <entry>None</entry> | ||
665 | </row> | 697 | </row> |
666 | 698 | ||
667 | </tbody> | 699 | </tbody> |
668 | </tgroup> | 700 | </tgroup> |
669 | </table> | 701 | </table> |
702 | |||
670 | </sect1> | 703 | </sect1> |
671 | </chapter> | 704 | </chapter> |
672 | 705 | ||
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 498ff31f3aa..5c8695a3d13 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -328,21 +328,20 @@ Who: Adrian Bunk <bunk@stusta.de> | |||
328 | 328 | ||
329 | --------------------------- | 329 | --------------------------- |
330 | 330 | ||
331 | What: libata.spindown_compat module parameter | 331 | What: libata spindown skipping and warning |
332 | When: Dec 2008 | 332 | When: Dec 2008 |
333 | Why: halt(8) synchronizes caches for and spins down libata disks | 333 | Why: Some halt(8) implementations synchronize caches for and spin |
334 | because libata didn't use to spin down disk on system halt | 334 | down libata disks because libata didn't use to spin down disk on |
335 | (only synchronized caches). | 335 | system halt (only synchronized caches). |
336 | Spin down on system halt is now implemented and can be tested | 336 | Spin down on system halt is now implemented. sysfs node |
337 | using sysfs node /sys/class/scsi_disk/h:c:i:l/manage_start_stop. | 337 | /sys/class/scsi_disk/h:c:i:l/manage_start_stop is present if |
338 | spin down support is available. | ||
338 | Because issuing spin down command to an already spun down disk | 339 | Because issuing spin down command to an already spun down disk |
339 | makes some disks spin up just to spin down again, the old | 340 | makes some disks spin up just to spin down again, libata tracks |
340 | behavior needs to be maintained till userspace tool is updated | 341 | device spindown status to skip the extra spindown command and |
341 | to check the sysfs node and not to spin down disks with the | 342 | warn about it. |
342 | node set to one. | 343 | This is to give userspace tools the time to get updated and will |
343 | This module parameter is to give userspace tool the time to | 344 | be removed after userspace is reasonably updated. |
344 | get updated and should be removed after userspace is | ||
345 | reasonably updated. | ||
346 | Who: Tejun Heo <htejun@gmail.com> | 345 | Who: Tejun Heo <htejun@gmail.com> |
347 | 346 | ||
348 | --------------------------- | 347 | --------------------------- |
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index e8be0abb346..36af58eba13 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt | |||
@@ -111,7 +111,9 @@ setting up a platform_device using the GPIO, is mark its direction: | |||
111 | 111 | ||
112 | The return value is zero for success, else a negative errno. It should | 112 | The return value is zero for success, else a negative errno. It should |
113 | be checked, since the get/set calls don't have error returns and since | 113 | be checked, since the get/set calls don't have error returns and since |
114 | misconfiguration is possible. (These calls could sleep.) | 114 | misconfiguration is possible. You should normally issue these calls from |
115 | a task context. However, for spinlock-safe GPIOs it's OK to use them | ||
116 | before tasking is enabled, as part of early board setup. | ||
115 | 117 | ||
116 | For output GPIOs, the value provided becomes the initial output value. | 118 | For output GPIOs, the value provided becomes the initial output value. |
117 | This helps avoid signal glitching during system startup. | 119 | This helps avoid signal glitching during system startup. |
@@ -197,7 +199,9 @@ However, many platforms don't currently support this mechanism. | |||
197 | 199 | ||
198 | Passing invalid GPIO numbers to gpio_request() will fail, as will requesting | 200 | Passing invalid GPIO numbers to gpio_request() will fail, as will requesting |
199 | GPIOs that have already been claimed with that call. The return value of | 201 | GPIOs that have already been claimed with that call. The return value of |
200 | gpio_request() must be checked. (These calls could sleep.) | 202 | gpio_request() must be checked. You should normally issue these calls from |
203 | a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs | ||
204 | before tasking is enabled, as part of early board setup. | ||
201 | 205 | ||
202 | These calls serve two basic purposes. One is marking the signals which | 206 | These calls serve two basic purposes. One is marking the signals which |
203 | are actually in use as GPIOs, for better diagnostics; systems may have | 207 | are actually in use as GPIOs, for better diagnostics; systems may have |
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt index 847cedb238f..ce1361f9524 100644 --- a/Documentation/networking/netdevices.txt +++ b/Documentation/networking/netdevices.txt | |||
@@ -49,7 +49,7 @@ dev->hard_start_xmit: | |||
49 | for this and return -1 when the spin lock fails. | 49 | for this and return -1 when the spin lock fails. |
50 | The locking there should also properly protect against | 50 | The locking there should also properly protect against |
51 | set_multicast_list | 51 | set_multicast_list |
52 | Context: BHs disabled | 52 | Context: Process with BHs disabled or BH (timer). |
53 | Notes: netif_queue_stopped() is guaranteed false | 53 | Notes: netif_queue_stopped() is guaranteed false |
54 | Interrupts must be enabled when calling hard_start_xmit. | 54 | Interrupts must be enabled when calling hard_start_xmit. |
55 | (Interrupts must also be enabled when enabling the BH handler.) | 55 | (Interrupts must also be enabled when enabling the BH handler.) |
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c index 686a8e04a4f..d4f21ffd140 100644 --- a/Documentation/vm/slabinfo.c +++ b/Documentation/vm/slabinfo.c | |||
@@ -242,6 +242,9 @@ void decode_numa_list(int *numa, char *t) | |||
242 | 242 | ||
243 | memset(numa, 0, MAX_NODES * sizeof(int)); | 243 | memset(numa, 0, MAX_NODES * sizeof(int)); |
244 | 244 | ||
245 | if (!t) | ||
246 | return; | ||
247 | |||
245 | while (*t == 'N') { | 248 | while (*t == 'N') { |
246 | t++; | 249 | t++; |
247 | node = strtoul(t, &t, 10); | 250 | node = strtoul(t, &t, 10); |
@@ -259,11 +262,17 @@ void decode_numa_list(int *numa, char *t) | |||
259 | 262 | ||
260 | void slab_validate(struct slabinfo *s) | 263 | void slab_validate(struct slabinfo *s) |
261 | { | 264 | { |
265 | if (strcmp(s->name, "*") == 0) | ||
266 | return; | ||
267 | |||
262 | set_obj(s, "validate", 1); | 268 | set_obj(s, "validate", 1); |
263 | } | 269 | } |
264 | 270 | ||
265 | void slab_shrink(struct slabinfo *s) | 271 | void slab_shrink(struct slabinfo *s) |
266 | { | 272 | { |
273 | if (strcmp(s->name, "*") == 0) | ||
274 | return; | ||
275 | |||
267 | set_obj(s, "shrink", 1); | 276 | set_obj(s, "shrink", 1); |
268 | } | 277 | } |
269 | 278 | ||
@@ -386,7 +395,9 @@ void report(struct slabinfo *s) | |||
386 | { | 395 | { |
387 | if (strcmp(s->name, "*") == 0) | 396 | if (strcmp(s->name, "*") == 0) |
388 | return; | 397 | return; |
389 | printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order); | 398 | |
399 | printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n", | ||
400 | s->name, s->aliases, s->order, s->objects); | ||
390 | if (s->hwcache_align) | 401 | if (s->hwcache_align) |
391 | printf("** Hardware cacheline aligned\n"); | 402 | printf("** Hardware cacheline aligned\n"); |
392 | if (s->cache_dma) | 403 | if (s->cache_dma) |
@@ -545,6 +556,9 @@ int slab_empty(struct slabinfo *s) | |||
545 | 556 | ||
546 | void slab_debug(struct slabinfo *s) | 557 | void slab_debug(struct slabinfo *s) |
547 | { | 558 | { |
559 | if (strcmp(s->name, "*") == 0) | ||
560 | return; | ||
561 | |||
548 | if (sanity && !s->sanity_checks) { | 562 | if (sanity && !s->sanity_checks) { |
549 | set_obj(s, "sanity", 1); | 563 | set_obj(s, "sanity", 1); |
550 | } | 564 | } |
@@ -791,11 +805,11 @@ void totals(void) | |||
791 | 805 | ||
792 | store_size(b1, total_size);store_size(b2, total_waste); | 806 | store_size(b1, total_size);store_size(b2, total_waste); |
793 | store_size(b3, total_waste * 100 / total_used); | 807 | store_size(b3, total_waste * 100 / total_used); |
794 | printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3); | 808 | printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3); |
795 | 809 | ||
796 | store_size(b1, total_objects);store_size(b2, total_partobj); | 810 | store_size(b1, total_objects);store_size(b2, total_partobj); |
797 | store_size(b3, total_partobj * 100 / total_objects); | 811 | store_size(b3, total_partobj * 100 / total_objects); |
798 | printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3); | 812 | printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3); |
799 | 813 | ||
800 | printf("\n"); | 814 | printf("\n"); |
801 | printf("Per Cache Average Min Max Total\n"); | 815 | printf("Per Cache Average Min Max Total\n"); |
@@ -818,7 +832,7 @@ void totals(void) | |||
818 | store_size(b1, avg_ppart);store_size(b2, min_ppart); | 832 | store_size(b1, avg_ppart);store_size(b2, min_ppart); |
819 | store_size(b3, max_ppart); | 833 | store_size(b3, max_ppart); |
820 | store_size(b4, total_partial * 100 / total_slabs); | 834 | store_size(b4, total_partial * 100 / total_slabs); |
821 | printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n", | 835 | printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n", |
822 | b1, b2, b3, b4); | 836 | b1, b2, b3, b4); |
823 | 837 | ||
824 | store_size(b1, avg_partobj);store_size(b2, min_partobj); | 838 | store_size(b1, avg_partobj);store_size(b2, min_partobj); |
@@ -830,7 +844,7 @@ void totals(void) | |||
830 | store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); | 844 | store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); |
831 | store_size(b3, max_ppartobj); | 845 | store_size(b3, max_ppartobj); |
832 | store_size(b4, total_partobj * 100 / total_objects); | 846 | store_size(b4, total_partobj * 100 / total_objects); |
833 | printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n", | 847 | printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n", |
834 | b1, b2, b3, b4); | 848 | b1, b2, b3, b4); |
835 | 849 | ||
836 | store_size(b1, avg_size);store_size(b2, min_size); | 850 | store_size(b1, avg_size);store_size(b2, min_size); |
@@ -1100,6 +1114,8 @@ void output_slabs(void) | |||
1100 | ops(slab); | 1114 | ops(slab); |
1101 | else if (show_slab) | 1115 | else if (show_slab) |
1102 | slabcache(slab); | 1116 | slabcache(slab); |
1117 | else if (show_report) | ||
1118 | report(slab); | ||
1103 | } | 1119 | } |
1104 | } | 1120 | } |
1105 | 1121 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index bbeb5b6b5b0..4c3277cb925 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2689,13 +2689,13 @@ L: i2c@lm-sensors.org | |||
2689 | S: Maintained | 2689 | S: Maintained |
2690 | 2690 | ||
2691 | PARALLEL PORT SUPPORT | 2691 | PARALLEL PORT SUPPORT |
2692 | L: linux-parport@lists.infradead.org | 2692 | L: linux-parport@lists.infradead.org (subscribers-only) |
2693 | S: Orphan | 2693 | S: Orphan |
2694 | 2694 | ||
2695 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES | 2695 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES |
2696 | P: Tim Waugh | 2696 | P: Tim Waugh |
2697 | M: tim@cyberelk.net | 2697 | M: tim@cyberelk.net |
2698 | L: linux-parport@lists.infradead.org | 2698 | L: linux-parport@lists.infradead.org (subscribers-only) |
2699 | W: http://www.torque.net/linux-pp.html | 2699 | W: http://www.torque.net/linux-pp.html |
2700 | S: Maintained | 2700 | S: Maintained |
2701 | 2701 | ||
@@ -491,7 +491,7 @@ endif | |||
491 | include $(srctree)/arch/$(ARCH)/Makefile | 491 | include $(srctree)/arch/$(ARCH)/Makefile |
492 | 492 | ||
493 | ifdef CONFIG_FRAME_POINTER | 493 | ifdef CONFIG_FRAME_POINTER |
494 | CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls | 494 | CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,) |
495 | else | 495 | else |
496 | CFLAGS += -fomit-frame-pointer | 496 | CFLAGS += -fomit-frame-pointer |
497 | endif | 497 | endif |
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 1a493050932..d80e5b1d686 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -560,14 +560,6 @@ endchoice | |||
560 | 560 | ||
561 | source "mm/Kconfig" | 561 | source "mm/Kconfig" |
562 | 562 | ||
563 | config LARGE_ALLOCS | ||
564 | bool "Allow allocating large blocks (> 1MB) of memory" | ||
565 | help | ||
566 | Allow the slab memory allocator to keep chains for very large | ||
567 | memory sizes - upto 32MB. You may need this if your system has | ||
568 | a lot of RAM, and you need to able to allocate very large | ||
569 | contiguous chunks. If unsure, say N. | ||
570 | |||
571 | config BFIN_DMA_5XX | 563 | config BFIN_DMA_5XX |
572 | bool "Enable DMA Support" | 564 | bool "Enable DMA Support" |
573 | depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561) | 565 | depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561) |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 114738a4558..74eef7111f2 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -102,14 +102,6 @@ config HIGHPTE | |||
102 | with a lot of RAM, this can be wasteful of precious low memory. | 102 | with a lot of RAM, this can be wasteful of precious low memory. |
103 | Setting this option will put user-space page tables in high memory. | 103 | Setting this option will put user-space page tables in high memory. |
104 | 104 | ||
105 | config LARGE_ALLOCS | ||
106 | bool "Allow allocating large blocks (> 1MB) of memory" | ||
107 | help | ||
108 | Allow the slab memory allocator to keep chains for very large memory | ||
109 | sizes - up to 32MB. You may need this if your system has a lot of | ||
110 | RAM, and you need to able to allocate very large contiguous chunks. | ||
111 | If unsure, say N. | ||
112 | |||
113 | source "mm/Kconfig" | 105 | source "mm/Kconfig" |
114 | 106 | ||
115 | choice | 107 | choice |
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 6dc5e5d90fe..bd28f9f9b4b 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile | |||
@@ -34,7 +34,7 @@ CHECKFLAGS += -D__i386__ | |||
34 | CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return | 34 | CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return |
35 | 35 | ||
36 | # prevent gcc from keeping the stack 16 byte aligned | 36 | # prevent gcc from keeping the stack 16 byte aligned |
37 | CFLAGS += -mpreferred-stack-boundary=4 | 37 | CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) |
38 | 38 | ||
39 | # CPU-specific tuning. Anything which can be shared with UML should go here. | 39 | # CPU-specific tuning. Anything which can be shared with UML should go here. |
40 | include $(srctree)/arch/i386/Makefile.cpu | 40 | include $(srctree)/arch/i386/Makefile.cpu |
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index 5367e32e040..c4ebb5126ef 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c | |||
@@ -78,7 +78,7 @@ static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type* | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* Grab all of the MTRR state for this CPU into *state */ | 80 | /* Grab all of the MTRR state for this CPU into *state */ |
81 | void __init get_mtrr_state(void) | 81 | void get_mtrr_state(void) |
82 | { | 82 | { |
83 | unsigned int i; | 83 | unsigned int i; |
84 | struct mtrr_var_range *vrs; | 84 | struct mtrr_var_range *vrs; |
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 02a2f39e5e0..1cf466df330 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c | |||
@@ -639,7 +639,7 @@ static struct sysdev_driver mtrr_sysdev_driver = { | |||
639 | * initialized (i.e. before smp_init()). | 639 | * initialized (i.e. before smp_init()). |
640 | * | 640 | * |
641 | */ | 641 | */ |
642 | void __init mtrr_bp_init(void) | 642 | void mtrr_bp_init(void) |
643 | { | 643 | { |
644 | init_ifs(); | 644 | init_ifs(); |
645 | 645 | ||
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index c9a7c9835ab..6299c080f6e 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
421 | } | 421 | } |
422 | if (!cpus_empty(cpu_mask)) | 422 | if (!cpus_empty(cpu_mask)) |
423 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); | 423 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
424 | check_pgt_cache(); | 424 | |
425 | preempt_enable(); | 425 | preempt_enable(); |
426 | } | 426 | } |
427 | 427 | ||
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index 823f73736bb..adc64a2bafb 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig | |||
@@ -470,14 +470,6 @@ config AVNET | |||
470 | default y | 470 | default y |
471 | depends on (AVNET5282) | 471 | depends on (AVNET5282) |
472 | 472 | ||
473 | config LARGE_ALLOCS | ||
474 | bool "Allow allocating large blocks (> 1MB) of memory" | ||
475 | help | ||
476 | Allow the slab memory allocator to keep chains for very large | ||
477 | memory sizes - upto 32MB. You may need this if your system has | ||
478 | a lot of RAM, and you need to able to allocate very large | ||
479 | contiguous chunks. If unsure, say N. | ||
480 | |||
481 | config 4KSTACKS | 473 | config 4KSTACKS |
482 | bool "Use 4Kb for kernel stacks instead of 8Kb" | 474 | bool "Use 4Kb for kernel stacks instead of 8Kb" |
483 | default y | 475 | default y |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index a93f328a731..7150730e2ff 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -71,9 +71,7 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags) | |||
71 | { | 71 | { |
72 | struct spufs_inode_info *ei = p; | 72 | struct spufs_inode_info *ei = p; |
73 | 73 | ||
74 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 74 | inode_init_once(&ei->vfs_inode); |
75 | inode_init_once(&ei->vfs_inode); | ||
76 | } | ||
77 | } | 75 | } |
78 | 76 | ||
79 | static struct inode * | 77 | static struct inode * |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 6b9a06e4254..2d63d768996 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -1030,7 +1030,7 @@ void __devinit setup_sparc64_timer(void) | |||
1030 | clockevents_register_device(sevt); | 1030 | clockevents_register_device(sevt); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | #define SPARC64_NSEC_PER_CYC_SHIFT 32UL | 1033 | #define SPARC64_NSEC_PER_CYC_SHIFT 10UL |
1034 | 1034 | ||
1035 | static struct clocksource clocksource_tick = { | 1035 | static struct clocksource clocksource_tick = { |
1036 | .rating = 100, | 1036 | .rating = 100, |
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index 5f54c1236c1..ace479ab273 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig | |||
@@ -240,14 +240,6 @@ menu "Processor type and features" | |||
240 | config RESET_GUARD | 240 | config RESET_GUARD |
241 | bool "Reset Guard" | 241 | bool "Reset Guard" |
242 | 242 | ||
243 | config LARGE_ALLOCS | ||
244 | bool "Allow allocating large blocks (> 1MB) of memory" | ||
245 | help | ||
246 | Allow the slab memory allocator to keep chains for very large | ||
247 | memory sizes - upto 32MB. You may need this if your system has | ||
248 | a lot of RAM, and you need to able to allocate very large | ||
249 | contiguous chunks. If unsure, say N. | ||
250 | |||
251 | source "mm/Kconfig" | 243 | source "mm/Kconfig" |
252 | 244 | ||
253 | endmenu | 245 | endmenu |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 8fcd6a15517..a2efae8a4c4 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -40,19 +40,19 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE; | |||
40 | #define NID_INVAL -1 | 40 | #define NID_INVAL -1 |
41 | 41 | ||
42 | /* maps to convert between proximity domain and logical node ID */ | 42 | /* maps to convert between proximity domain and logical node ID */ |
43 | int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS] | 43 | static int pxm_to_node_map[MAX_PXM_DOMAINS] |
44 | = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; | 44 | = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; |
45 | int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] | 45 | static int node_to_pxm_map[MAX_NUMNODES] |
46 | = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; | 46 | = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; |
47 | 47 | ||
48 | int __cpuinit pxm_to_node(int pxm) | 48 | int pxm_to_node(int pxm) |
49 | { | 49 | { |
50 | if (pxm < 0) | 50 | if (pxm < 0) |
51 | return NID_INVAL; | 51 | return NID_INVAL; |
52 | return pxm_to_node_map[pxm]; | 52 | return pxm_to_node_map[pxm]; |
53 | } | 53 | } |
54 | 54 | ||
55 | int __cpuinit node_to_pxm(int node) | 55 | int node_to_pxm(int node) |
56 | { | 56 | { |
57 | if (node < 0) | 57 | if (node < 0) |
58 | return PXM_INVAL; | 58 | return PXM_INVAL; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d5939e659cb..d3ea7f55283 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -101,12 +101,6 @@ int libata_noacpi = 1; | |||
101 | module_param_named(noacpi, libata_noacpi, int, 0444); | 101 | module_param_named(noacpi, libata_noacpi, int, 0444); |
102 | MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); | 102 | MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set"); |
103 | 103 | ||
104 | int ata_spindown_compat = 1; | ||
105 | module_param_named(spindown_compat, ata_spindown_compat, int, 0644); | ||
106 | MODULE_PARM_DESC(spindown_compat, "Enable backward compatible spindown " | ||
107 | "behavior. Will be removed. More info can be found in " | ||
108 | "Documentation/feature-removal-schedule.txt\n"); | ||
109 | |||
110 | MODULE_AUTHOR("Jeff Garzik"); | 104 | MODULE_AUTHOR("Jeff Garzik"); |
111 | MODULE_DESCRIPTION("Library module for ATA devices"); | 105 | MODULE_DESCRIPTION("Library module for ATA devices"); |
112 | MODULE_LICENSE("GPL"); | 106 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b6a1de8fad5..242c43eef80 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -893,7 +893,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) | |||
893 | return queue_depth; | 893 | return queue_depth; |
894 | } | 894 | } |
895 | 895 | ||
896 | /* XXX: for ata_spindown_compat */ | 896 | /* XXX: for spindown warning */ |
897 | static void ata_delayed_done_timerfn(unsigned long arg) | 897 | static void ata_delayed_done_timerfn(unsigned long arg) |
898 | { | 898 | { |
899 | struct scsi_cmnd *scmd = (void *)arg; | 899 | struct scsi_cmnd *scmd = (void *)arg; |
@@ -901,7 +901,7 @@ static void ata_delayed_done_timerfn(unsigned long arg) | |||
901 | scmd->scsi_done(scmd); | 901 | scmd->scsi_done(scmd); |
902 | } | 902 | } |
903 | 903 | ||
904 | /* XXX: for ata_spindown_compat */ | 904 | /* XXX: for spindown warning */ |
905 | static void ata_delayed_done(struct scsi_cmnd *scmd) | 905 | static void ata_delayed_done(struct scsi_cmnd *scmd) |
906 | { | 906 | { |
907 | static struct timer_list timer; | 907 | static struct timer_list timer; |
@@ -966,8 +966,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) | |||
966 | * removed. Read Documentation/feature-removal-schedule.txt | 966 | * removed. Read Documentation/feature-removal-schedule.txt |
967 | * for more info. | 967 | * for more info. |
968 | */ | 968 | */ |
969 | if (ata_spindown_compat && | 969 | if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && |
970 | (qc->dev->flags & ATA_DFLAG_SPUNDOWN) && | ||
971 | (system_state == SYSTEM_HALT || | 970 | (system_state == SYSTEM_HALT || |
972 | system_state == SYSTEM_POWER_OFF)) { | 971 | system_state == SYSTEM_POWER_OFF)) { |
973 | static unsigned long warned = 0; | 972 | static unsigned long warned = 0; |
@@ -1395,7 +1394,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1395 | } | 1394 | } |
1396 | } | 1395 | } |
1397 | 1396 | ||
1398 | /* XXX: track spindown state for spindown_compat */ | 1397 | /* XXX: track spindown state for spindown skipping and warning */ |
1399 | if (unlikely(qc->tf.command == ATA_CMD_STANDBY || | 1398 | if (unlikely(qc->tf.command == ATA_CMD_STANDBY || |
1400 | qc->tf.command == ATA_CMD_STANDBYNOW1)) | 1399 | qc->tf.command == ATA_CMD_STANDBYNOW1)) |
1401 | qc->dev->flags |= ATA_DFLAG_SPUNDOWN; | 1400 | qc->dev->flags |= ATA_DFLAG_SPUNDOWN; |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 13cb0c9af68..5e246665842 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -58,7 +58,6 @@ extern int atapi_enabled; | |||
58 | extern int atapi_dmadir; | 58 | extern int atapi_dmadir; |
59 | extern int libata_fua; | 59 | extern int libata_fua; |
60 | extern int libata_noacpi; | 60 | extern int libata_noacpi; |
61 | extern int ata_spindown_compat; | ||
62 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); | 61 | extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); |
63 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, | 62 | extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, |
64 | u64 block, u32 n_block, unsigned int tf_flags, | 63 | u64 block, u32 n_block, unsigned int tf_flags, |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 4cea3ef7522..1a49c777fa6 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -229,7 +229,6 @@ struct nv_host_priv { | |||
229 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) | 229 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) |
230 | 230 | ||
231 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 231 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
232 | static void nv_remove_one (struct pci_dev *pdev); | ||
233 | #ifdef CONFIG_PM | 232 | #ifdef CONFIG_PM |
234 | static int nv_pci_device_resume(struct pci_dev *pdev); | 233 | static int nv_pci_device_resume(struct pci_dev *pdev); |
235 | #endif | 234 | #endif |
@@ -288,12 +287,6 @@ static const struct pci_device_id nv_pci_tbl[] = { | |||
288 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, | 287 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, |
289 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, | 288 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, |
290 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, | 289 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, |
291 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, | ||
292 | PCI_ANY_ID, PCI_ANY_ID, | ||
293 | PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, | ||
294 | { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, | ||
295 | PCI_ANY_ID, PCI_ANY_ID, | ||
296 | PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC }, | ||
297 | 290 | ||
298 | { } /* terminate list */ | 291 | { } /* terminate list */ |
299 | }; | 292 | }; |
@@ -306,7 +299,7 @@ static struct pci_driver nv_pci_driver = { | |||
306 | .suspend = ata_pci_device_suspend, | 299 | .suspend = ata_pci_device_suspend, |
307 | .resume = nv_pci_device_resume, | 300 | .resume = nv_pci_device_resume, |
308 | #endif | 301 | #endif |
309 | .remove = nv_remove_one, | 302 | .remove = ata_pci_remove_one, |
310 | }; | 303 | }; |
311 | 304 | ||
312 | static struct scsi_host_template nv_sht = { | 305 | static struct scsi_host_template nv_sht = { |
@@ -1613,15 +1606,6 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1613 | IRQF_SHARED, ppi[0]->sht); | 1606 | IRQF_SHARED, ppi[0]->sht); |
1614 | } | 1607 | } |
1615 | 1608 | ||
1616 | static void nv_remove_one (struct pci_dev *pdev) | ||
1617 | { | ||
1618 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | ||
1619 | struct nv_host_priv *hpriv = host->private_data; | ||
1620 | |||
1621 | ata_pci_remove_one(pdev); | ||
1622 | kfree(hpriv); | ||
1623 | } | ||
1624 | |||
1625 | #ifdef CONFIG_PM | 1609 | #ifdef CONFIG_PM |
1626 | static int nv_pci_device_resume(struct pci_dev *pdev) | 1610 | static int nv_pci_device_resume(struct pci_dev *pdev) |
1627 | { | 1611 | { |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index d105d2c189d..ac4f43c4993 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -441,7 +441,7 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) | |||
441 | return -ENOMEM; | 441 | return -ENOMEM; |
442 | } | 442 | } |
443 | 443 | ||
444 | rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME); | 444 | rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); |
445 | if (rc) { | 445 | if (rc) { |
446 | dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " | 446 | dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " |
447 | "PCI BARs (errno=%d)\n", rc); | 447 | "PCI BARs (errno=%d)\n", rc); |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 3dba5733ed1..74002945b71 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -940,9 +940,6 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache, | |||
940 | { | 940 | { |
941 | struct ltree_entry *le = obj; | 941 | struct ltree_entry *le = obj; |
942 | 942 | ||
943 | if (flags & SLAB_CTOR_CONSTRUCTOR) | ||
944 | return; | ||
945 | |||
946 | le->users = 0; | 943 | le->users = 0; |
947 | init_rwsem(&le->mutex); | 944 | init_rwsem(&le->mutex); |
948 | } | 945 | } |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index a9ea67e75c1..16a6edfeba4 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -333,11 +333,9 @@ struct e1000_adapter { | |||
333 | struct e1000_tx_ring test_tx_ring; | 333 | struct e1000_tx_ring test_tx_ring; |
334 | struct e1000_rx_ring test_rx_ring; | 334 | struct e1000_rx_ring test_rx_ring; |
335 | 335 | ||
336 | |||
337 | int msg_enable; | 336 | int msg_enable; |
338 | #ifdef CONFIG_PCI_MSI | ||
339 | boolean_t have_msi; | 337 | boolean_t have_msi; |
340 | #endif | 338 | |
341 | /* to not mess up cache alignment, always add to the bottom */ | 339 | /* to not mess up cache alignment, always add to the bottom */ |
342 | boolean_t tso_force; | 340 | boolean_t tso_force; |
343 | boolean_t smart_power_down; /* phy smart power down */ | 341 | boolean_t smart_power_down; /* phy smart power down */ |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 637ae8f6879..49be393e1c1 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -158,9 +158,7 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); | |||
158 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); | 158 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); |
159 | static int e1000_set_mac(struct net_device *netdev, void *p); | 159 | static int e1000_set_mac(struct net_device *netdev, void *p); |
160 | static irqreturn_t e1000_intr(int irq, void *data); | 160 | static irqreturn_t e1000_intr(int irq, void *data); |
161 | #ifdef CONFIG_PCI_MSI | ||
162 | static irqreturn_t e1000_intr_msi(int irq, void *data); | 161 | static irqreturn_t e1000_intr_msi(int irq, void *data); |
163 | #endif | ||
164 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, | 162 | static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, |
165 | struct e1000_tx_ring *tx_ring); | 163 | struct e1000_tx_ring *tx_ring); |
166 | #ifdef CONFIG_E1000_NAPI | 164 | #ifdef CONFIG_E1000_NAPI |
@@ -300,31 +298,26 @@ module_exit(e1000_exit_module); | |||
300 | static int e1000_request_irq(struct e1000_adapter *adapter) | 298 | static int e1000_request_irq(struct e1000_adapter *adapter) |
301 | { | 299 | { |
302 | struct net_device *netdev = adapter->netdev; | 300 | struct net_device *netdev = adapter->netdev; |
303 | int flags, err = 0; | 301 | void (*handler) = &e1000_intr; |
302 | int irq_flags = IRQF_SHARED; | ||
303 | int err; | ||
304 | 304 | ||
305 | flags = IRQF_SHARED; | ||
306 | #ifdef CONFIG_PCI_MSI | ||
307 | if (adapter->hw.mac_type >= e1000_82571) { | 305 | if (adapter->hw.mac_type >= e1000_82571) { |
308 | adapter->have_msi = TRUE; | 306 | adapter->have_msi = !pci_enable_msi(adapter->pdev); |
309 | if ((err = pci_enable_msi(adapter->pdev))) { | 307 | if (adapter->have_msi) { |
310 | DPRINTK(PROBE, ERR, | 308 | handler = &e1000_intr_msi; |
311 | "Unable to allocate MSI interrupt Error: %d\n", err); | 309 | irq_flags = 0; |
312 | adapter->have_msi = FALSE; | ||
313 | } | 310 | } |
314 | } | 311 | } |
315 | if (adapter->have_msi) { | 312 | |
316 | flags &= ~IRQF_SHARED; | 313 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
317 | err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags, | 314 | netdev); |
318 | netdev->name, netdev); | 315 | if (err) { |
319 | if (err) | 316 | if (adapter->have_msi) |
320 | DPRINTK(PROBE, ERR, | 317 | pci_disable_msi(adapter->pdev); |
321 | "Unable to allocate interrupt Error: %d\n", err); | ||
322 | } else | ||
323 | #endif | ||
324 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, | ||
325 | netdev->name, netdev))) | ||
326 | DPRINTK(PROBE, ERR, | 318 | DPRINTK(PROBE, ERR, |
327 | "Unable to allocate interrupt Error: %d\n", err); | 319 | "Unable to allocate interrupt Error: %d\n", err); |
320 | } | ||
328 | 321 | ||
329 | return err; | 322 | return err; |
330 | } | 323 | } |
@@ -335,10 +328,8 @@ static void e1000_free_irq(struct e1000_adapter *adapter) | |||
335 | 328 | ||
336 | free_irq(adapter->pdev->irq, netdev); | 329 | free_irq(adapter->pdev->irq, netdev); |
337 | 330 | ||
338 | #ifdef CONFIG_PCI_MSI | ||
339 | if (adapter->have_msi) | 331 | if (adapter->have_msi) |
340 | pci_disable_msi(adapter->pdev); | 332 | pci_disable_msi(adapter->pdev); |
341 | #endif | ||
342 | } | 333 | } |
343 | 334 | ||
344 | /** | 335 | /** |
@@ -3744,7 +3735,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3744 | 3735 | ||
3745 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3736 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3746 | } | 3737 | } |
3747 | #ifdef CONFIG_PCI_MSI | ||
3748 | 3738 | ||
3749 | /** | 3739 | /** |
3750 | * e1000_intr_msi - Interrupt Handler | 3740 | * e1000_intr_msi - Interrupt Handler |
@@ -3810,7 +3800,6 @@ e1000_intr_msi(int irq, void *data) | |||
3810 | 3800 | ||
3811 | return IRQ_HANDLED; | 3801 | return IRQ_HANDLED; |
3812 | } | 3802 | } |
3813 | #endif | ||
3814 | 3803 | ||
3815 | /** | 3804 | /** |
3816 | * e1000_intr - Interrupt Handler | 3805 | * e1000_intr - Interrupt Handler |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b666a0cc064..f5b3cba23fc 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1025,6 +1025,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1025 | 1025 | ||
1026 | dev->trans_start = jiffies; | 1026 | dev->trans_start = jiffies; |
1027 | 1027 | ||
1028 | /* The powerpc-specific eieio() is used, as wmb() has too strong | ||
1029 | * semantics (it requires synchronization between cacheable and | ||
1030 | * uncacheable mappings, which eieio doesn't provide and which we | ||
1031 | * don't need), thus requiring a more expensive sync instruction. At | ||
1032 | * some point, the set of architecture-independent barrier functions | ||
1033 | * should be expanded to include weaker barriers. | ||
1034 | */ | ||
1035 | |||
1036 | eieio(); | ||
1028 | txbdp->status = status; | 1037 | txbdp->status = status; |
1029 | 1038 | ||
1030 | /* If this was the last BD in the ring, the next one */ | 1039 | /* If this was the last BD in the ring, the next one */ |
@@ -1301,6 +1310,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1301 | bdp->length = 0; | 1310 | bdp->length = 0; |
1302 | 1311 | ||
1303 | /* Mark the buffer empty */ | 1312 | /* Mark the buffer empty */ |
1313 | eieio(); | ||
1304 | bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); | 1314 | bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); |
1305 | 1315 | ||
1306 | return skb; | 1316 | return skb; |
@@ -1484,6 +1494,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1484 | bdp = priv->cur_rx; | 1494 | bdp = priv->cur_rx; |
1485 | 1495 | ||
1486 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | 1496 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
1497 | rmb(); | ||
1487 | skb = priv->rx_skbuff[priv->skb_currx]; | 1498 | skb = priv->rx_skbuff[priv->skb_currx]; |
1488 | 1499 | ||
1489 | if (!(bdp->status & | 1500 | if (!(bdp->status & |
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index 50035ebd4f5..f752e5fc65b 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c | |||
@@ -926,7 +926,7 @@ static int emac_link_differs(struct ocp_enet_private *dev) | |||
926 | int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; | 926 | int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; |
927 | int speed, pause, asym_pause; | 927 | int speed, pause, asym_pause; |
928 | 928 | ||
929 | if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS)) | 929 | if (r & EMAC_MR1_MF_1000) |
930 | speed = SPEED_1000; | 930 | speed = SPEED_1000; |
931 | else if (r & EMAC_MR1_MF_100) | 931 | else if (r & EMAC_MR1_MF_100) |
932 | speed = SPEED_100; | 932 | speed = SPEED_100; |
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index 6c0f071e405..cabd9846a5e 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c | |||
@@ -59,8 +59,7 @@ int __init mal_register_commac(struct ibm_ocp_mal *mal, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, | 62 | void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) |
63 | struct mal_commac *commac) | ||
64 | { | 63 | { |
65 | unsigned long flags; | 64 | unsigned long flags; |
66 | local_irq_save(flags); | 65 | local_irq_save(flags); |
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h index 407d2acbf7c..64bc338acc6 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/drivers/net/ibm_emac/ibm_emac_mal.h | |||
@@ -223,8 +223,7 @@ void mal_exit(void) __exit; | |||
223 | 223 | ||
224 | int mal_register_commac(struct ibm_ocp_mal *mal, | 224 | int mal_register_commac(struct ibm_ocp_mal *mal, |
225 | struct mal_commac *commac) __init; | 225 | struct mal_commac *commac) __init; |
226 | void mal_unregister_commac(struct ibm_ocp_mal *mal, | 226 | void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac); |
227 | struct mal_commac *commac) __exit; | ||
228 | int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size); | 227 | int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size); |
229 | 228 | ||
230 | /* Returns BD ring offset for a particular channel | 229 | /* Returns BD ring offset for a particular channel |
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c index 9074f76ee2b..e57862b34ca 100644 --- a/drivers/net/ibm_emac/ibm_emac_phy.c +++ b/drivers/net/ibm_emac/ibm_emac_phy.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/ocp.h> | 23 | #include <asm/ocp.h> |
24 | 24 | ||
25 | #include "ibm_emac_core.h" | ||
25 | #include "ibm_emac_phy.h" | 26 | #include "ibm_emac_phy.h" |
26 | 27 | ||
27 | static inline int phy_read(struct mii_phy *phy, int reg) | 28 | static inline int phy_read(struct mii_phy *phy, int reg) |
@@ -34,11 +35,39 @@ static inline void phy_write(struct mii_phy *phy, int reg, int val) | |||
34 | phy->mdio_write(phy->dev, phy->address, reg, val); | 35 | phy->mdio_write(phy->dev, phy->address, reg, val); |
35 | } | 36 | } |
36 | 37 | ||
37 | int mii_reset_phy(struct mii_phy *phy) | 38 | /* |
39 | * polls MII_BMCR until BMCR_RESET bit clears or operation times out. | ||
40 | * | ||
41 | * returns: | ||
42 | * >= 0 => success, value in BMCR returned to caller | ||
43 | * -EBUSY => failure, RESET bit never cleared | ||
44 | * otherwise => failure, lower level PHY read failed | ||
45 | */ | ||
46 | static int mii_spin_reset_complete(struct mii_phy *phy) | ||
38 | { | 47 | { |
39 | int val; | 48 | int val; |
40 | int limit = 10000; | 49 | int limit = 10000; |
41 | 50 | ||
51 | while (limit--) { | ||
52 | val = phy_read(phy, MII_BMCR); | ||
53 | if (val >= 0 && !(val & BMCR_RESET)) | ||
54 | return val; /* success */ | ||
55 | udelay(10); | ||
56 | } | ||
57 | if (val & BMCR_RESET) | ||
58 | val = -EBUSY; | ||
59 | |||
60 | if (net_ratelimit()) | ||
61 | printk(KERN_ERR "emac%d: PHY reset timeout (%d)\n", | ||
62 | ((struct ocp_enet_private *)phy->dev->priv)->def->index, | ||
63 | val); | ||
64 | return val; | ||
65 | } | ||
66 | |||
67 | int mii_reset_phy(struct mii_phy *phy) | ||
68 | { | ||
69 | int val; | ||
70 | |||
42 | val = phy_read(phy, MII_BMCR); | 71 | val = phy_read(phy, MII_BMCR); |
43 | val &= ~BMCR_ISOLATE; | 72 | val &= ~BMCR_ISOLATE; |
44 | val |= BMCR_RESET; | 73 | val |= BMCR_RESET; |
@@ -46,16 +75,11 @@ int mii_reset_phy(struct mii_phy *phy) | |||
46 | 75 | ||
47 | udelay(300); | 76 | udelay(300); |
48 | 77 | ||
49 | while (limit--) { | 78 | val = mii_spin_reset_complete(phy); |
50 | val = phy_read(phy, MII_BMCR); | 79 | if (val >= 0 && (val & BMCR_ISOLATE)) |
51 | if (val >= 0 && (val & BMCR_RESET) == 0) | ||
52 | break; | ||
53 | udelay(10); | ||
54 | } | ||
55 | if ((val & BMCR_ISOLATE) && limit > 0) | ||
56 | phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); | 80 | phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); |
57 | 81 | ||
58 | return limit <= 0; | 82 | return val < 0; |
59 | } | 83 | } |
60 | 84 | ||
61 | static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) | 85 | static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) |
@@ -102,8 +126,14 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) | |||
102 | } | 126 | } |
103 | 127 | ||
104 | /* Start/Restart aneg */ | 128 | /* Start/Restart aneg */ |
105 | ctl = phy_read(phy, MII_BMCR); | 129 | /* on some PHYs (e.g. National DP83843) a write to MII_ADVERTISE |
106 | ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); | 130 | * causes BMCR_RESET to be set on the next read of MII_BMCR, which |
131 | * if not checked for causes the PHY to be reset below */ | ||
132 | ctl = mii_spin_reset_complete(phy); | ||
133 | if (ctl < 0) | ||
134 | return ctl; | ||
135 | |||
136 | ctl |= BMCR_ANENABLE | BMCR_ANRESTART; | ||
107 | phy_write(phy, MII_BMCR, ctl); | 137 | phy_write(phy, MII_BMCR, ctl); |
108 | 138 | ||
109 | return 0; | 139 | return 0; |
@@ -118,13 +148,13 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) | |||
118 | phy->duplex = fd; | 148 | phy->duplex = fd; |
119 | phy->pause = phy->asym_pause = 0; | 149 | phy->pause = phy->asym_pause = 0; |
120 | 150 | ||
151 | /* First reset the PHY */ | ||
152 | mii_reset_phy(phy); | ||
153 | |||
121 | ctl = phy_read(phy, MII_BMCR); | 154 | ctl = phy_read(phy, MII_BMCR); |
122 | if (ctl < 0) | 155 | if (ctl < 0) |
123 | return ctl; | 156 | return ctl; |
124 | ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); | 157 | ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE | BMCR_SPEED1000); |
125 | |||
126 | /* First reset the PHY */ | ||
127 | phy_write(phy, MII_BMCR, ctl | BMCR_RESET); | ||
128 | 158 | ||
129 | /* Select speed & duplex */ | 159 | /* Select speed & duplex */ |
130 | switch (speed) { | 160 | switch (speed) { |
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.c b/drivers/net/ibm_emac/ibm_emac_rgmii.c index 53d281cb9a1..9dbb5e5936c 100644 --- a/drivers/net/ibm_emac/ibm_emac_rgmii.c +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.c | |||
@@ -162,7 +162,7 @@ void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed) | |||
162 | out_be32(&dev->base->ssr, ssr); | 162 | out_be32(&dev->base->ssr, ssr); |
163 | } | 163 | } |
164 | 164 | ||
165 | void __exit __rgmii_fini(struct ocp_device *ocpdev, int input) | 165 | void __rgmii_fini(struct ocp_device *ocpdev, int input) |
166 | { | 166 | { |
167 | struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev); | 167 | struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev); |
168 | BUG_ON(!dev || dev->users == 0); | 168 | BUG_ON(!dev || dev->users == 0); |
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h index 117ea486c2c..971e45815c6 100644 --- a/drivers/net/ibm_emac/ibm_emac_rgmii.h +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h | |||
@@ -37,7 +37,7 @@ struct ibm_ocp_rgmii { | |||
37 | #ifdef CONFIG_IBM_EMAC_RGMII | 37 | #ifdef CONFIG_IBM_EMAC_RGMII |
38 | int rgmii_attach(void *emac) __init; | 38 | int rgmii_attach(void *emac) __init; |
39 | 39 | ||
40 | void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit; | 40 | void __rgmii_fini(struct ocp_device *ocpdev, int input); |
41 | static inline void rgmii_fini(struct ocp_device *ocpdev, int input) | 41 | static inline void rgmii_fini(struct ocp_device *ocpdev, int input) |
42 | { | 42 | { |
43 | if (ocpdev) | 43 | if (ocpdev) |
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.c b/drivers/net/ibm_emac/ibm_emac_tah.c index e287b451bb4..3c2d5ba522a 100644 --- a/drivers/net/ibm_emac/ibm_emac_tah.c +++ b/drivers/net/ibm_emac/ibm_emac_tah.c | |||
@@ -63,7 +63,7 @@ int __init tah_attach(void *emac) | |||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
65 | 65 | ||
66 | void __exit __tah_fini(struct ocp_device *ocpdev) | 66 | void __tah_fini(struct ocp_device *ocpdev) |
67 | { | 67 | { |
68 | struct tah_regs *p = ocp_get_drvdata(ocpdev); | 68 | struct tah_regs *p = ocp_get_drvdata(ocpdev); |
69 | BUG_ON(!p); | 69 | BUG_ON(!p); |
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h index 38153945a24..ccf64915e1e 100644 --- a/drivers/net/ibm_emac/ibm_emac_tah.h +++ b/drivers/net/ibm_emac/ibm_emac_tah.h | |||
@@ -55,7 +55,7 @@ struct tah_regs { | |||
55 | #ifdef CONFIG_IBM_EMAC_TAH | 55 | #ifdef CONFIG_IBM_EMAC_TAH |
56 | int tah_attach(void *emac) __init; | 56 | int tah_attach(void *emac) __init; |
57 | 57 | ||
58 | void __tah_fini(struct ocp_device *ocpdev) __exit; | 58 | void __tah_fini(struct ocp_device *ocpdev); |
59 | static inline void tah_fini(struct ocp_device *ocpdev) | 59 | static inline void tah_fini(struct ocp_device *ocpdev) |
60 | { | 60 | { |
61 | if (ocpdev) | 61 | if (ocpdev) |
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c index 37dc8f34286..2c0fdb0cabf 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.c +++ b/drivers/net/ibm_emac/ibm_emac_zmii.c | |||
@@ -215,7 +215,7 @@ void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed) | |||
215 | out_be32(&dev->base->ssr, ssr); | 215 | out_be32(&dev->base->ssr, ssr); |
216 | } | 216 | } |
217 | 217 | ||
218 | void __exit __zmii_fini(struct ocp_device *ocpdev, int input) | 218 | void __zmii_fini(struct ocp_device *ocpdev, int input) |
219 | { | 219 | { |
220 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); | 220 | struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); |
221 | BUG_ON(!dev || dev->users == 0); | 221 | BUG_ON(!dev || dev->users == 0); |
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h index 972e3a44a09..fad6d8bf983 100644 --- a/drivers/net/ibm_emac/ibm_emac_zmii.h +++ b/drivers/net/ibm_emac/ibm_emac_zmii.h | |||
@@ -40,7 +40,7 @@ struct ibm_ocp_zmii { | |||
40 | #ifdef CONFIG_IBM_EMAC_ZMII | 40 | #ifdef CONFIG_IBM_EMAC_ZMII |
41 | int zmii_attach(void *emac) __init; | 41 | int zmii_attach(void *emac) __init; |
42 | 42 | ||
43 | void __zmii_fini(struct ocp_device *ocpdev, int input) __exit; | 43 | void __zmii_fini(struct ocp_device *ocpdev, int input); |
44 | static inline void zmii_fini(struct ocp_device *ocpdev, int input) | 44 | static inline void zmii_fini(struct ocp_device *ocpdev, int input) |
45 | { | 45 | { |
46 | if (ocpdev) | 46 | if (ocpdev) |
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index c8e90861f86..3569d5b0338 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h | |||
@@ -193,8 +193,6 @@ struct ixgb_adapter { | |||
193 | u16 msg_enable; | 193 | u16 msg_enable; |
194 | struct ixgb_hw_stats stats; | 194 | struct ixgb_hw_stats stats; |
195 | uint32_t alloc_rx_buff_failed; | 195 | uint32_t alloc_rx_buff_failed; |
196 | #ifdef CONFIG_PCI_MSI | ||
197 | boolean_t have_msi; | 196 | boolean_t have_msi; |
198 | #endif | ||
199 | }; | 197 | }; |
200 | #endif /* _IXGB_H_ */ | 198 | #endif /* _IXGB_H_ */ |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 6d2b059371f..991c8833e23 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -227,7 +227,7 @@ int | |||
227 | ixgb_up(struct ixgb_adapter *adapter) | 227 | ixgb_up(struct ixgb_adapter *adapter) |
228 | { | 228 | { |
229 | struct net_device *netdev = adapter->netdev; | 229 | struct net_device *netdev = adapter->netdev; |
230 | int err; | 230 | int err, irq_flags = IRQF_SHARED; |
231 | int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; | 231 | int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; |
232 | struct ixgb_hw *hw = &adapter->hw; | 232 | struct ixgb_hw *hw = &adapter->hw; |
233 | 233 | ||
@@ -246,26 +246,21 @@ ixgb_up(struct ixgb_adapter *adapter) | |||
246 | /* disable interrupts and get the hardware into a known state */ | 246 | /* disable interrupts and get the hardware into a known state */ |
247 | IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); | 247 | IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); |
248 | 248 | ||
249 | #ifdef CONFIG_PCI_MSI | 249 | /* only enable MSI if bus is in PCI-X mode */ |
250 | { | 250 | if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { |
251 | boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & | 251 | err = pci_enable_msi(adapter->pdev); |
252 | IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE; | 252 | if (!err) { |
253 | adapter->have_msi = TRUE; | 253 | adapter->have_msi = 1; |
254 | 254 | irq_flags = 0; | |
255 | if (!pcix) | 255 | } |
256 | adapter->have_msi = FALSE; | ||
257 | else if((err = pci_enable_msi(adapter->pdev))) { | ||
258 | DPRINTK(PROBE, ERR, | ||
259 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
260 | adapter->have_msi = FALSE; | ||
261 | /* proceed to try to request regular interrupt */ | 256 | /* proceed to try to request regular interrupt */ |
262 | } | 257 | } |
263 | } | ||
264 | 258 | ||
265 | #endif | 259 | err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags, |
266 | if((err = request_irq(adapter->pdev->irq, &ixgb_intr, | 260 | netdev->name, netdev); |
267 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | 261 | if (err) { |
268 | netdev->name, netdev))) { | 262 | if (adapter->have_msi) |
263 | pci_disable_msi(adapter->pdev); | ||
269 | DPRINTK(PROBE, ERR, | 264 | DPRINTK(PROBE, ERR, |
270 | "Unable to allocate interrupt Error: %d\n", err); | 265 | "Unable to allocate interrupt Error: %d\n", err); |
271 | return err; | 266 | return err; |
@@ -307,11 +302,10 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) | |||
307 | 302 | ||
308 | ixgb_irq_disable(adapter); | 303 | ixgb_irq_disable(adapter); |
309 | free_irq(adapter->pdev->irq, netdev); | 304 | free_irq(adapter->pdev->irq, netdev); |
310 | #ifdef CONFIG_PCI_MSI | 305 | |
311 | if(adapter->have_msi == TRUE) | 306 | if (adapter->have_msi) |
312 | pci_disable_msi(adapter->pdev); | 307 | pci_disable_msi(adapter->pdev); |
313 | 308 | ||
314 | #endif | ||
315 | if(kill_watchdog) | 309 | if(kill_watchdog) |
316 | del_timer_sync(&adapter->watchdog_timer); | 310 | del_timer_sync(&adapter->watchdog_timer); |
317 | #ifdef CONFIG_IXGB_NAPI | 311 | #ifdef CONFIG_IXGB_NAPI |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index cf0e96adfe4..a3689245776 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1216,7 +1216,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1216 | /* Window = 1 */ | 1216 | /* Window = 1 */ |
1217 | writel(consumer, | 1217 | writel(consumer, |
1218 | NETXEN_CRB_NORMALIZE(adapter, | 1218 | NETXEN_CRB_NORMALIZE(adapter, |
1219 | recv_crb_registers[ctxid]. | 1219 | recv_crb_registers[adapter->portnum]. |
1220 | crb_rcv_status_consumer)); | 1220 | crb_rcv_status_consumer)); |
1221 | } | 1221 | } |
1222 | 1222 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 104e20456e6..832fd69a0e5 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/if_vlan.h> | 40 | #include <linux/if_vlan.h> |
41 | #include <linux/prefetch.h> | 41 | #include <linux/prefetch.h> |
42 | #include <linux/mii.h> | 42 | #include <linux/mii.h> |
43 | #include <linux/dmi.h> | ||
44 | 43 | ||
45 | #include <asm/irq.h> | 44 | #include <asm/irq.h> |
46 | 45 | ||
@@ -151,8 +150,6 @@ static const char *yukon2_name[] = { | |||
151 | "FE", /* 0xb7 */ | 150 | "FE", /* 0xb7 */ |
152 | }; | 151 | }; |
153 | 152 | ||
154 | static int dmi_blacklisted; | ||
155 | |||
156 | /* Access to external PHY */ | 153 | /* Access to external PHY */ |
157 | static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) | 154 | static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) |
158 | { | 155 | { |
@@ -307,10 +304,13 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
307 | PHY_M_EC_MAC_S_MSK); | 304 | PHY_M_EC_MAC_S_MSK); |
308 | ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); | 305 | ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); |
309 | 306 | ||
307 | /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ | ||
310 | if (hw->chip_id == CHIP_ID_YUKON_EC) | 308 | if (hw->chip_id == CHIP_ID_YUKON_EC) |
309 | /* set downshift counter to 3x and enable downshift */ | ||
311 | ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; | 310 | ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; |
312 | else | 311 | else |
313 | ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3); | 312 | /* set master & slave downshift counter to 1x */ |
313 | ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); | ||
314 | 314 | ||
315 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); | 315 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); |
316 | } | 316 | } |
@@ -327,10 +327,12 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
327 | /* enable automatic crossover */ | 327 | /* enable automatic crossover */ |
328 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); | 328 | ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); |
329 | 329 | ||
330 | /* downshift on PHY 88E1112 and 88E1149 is changed */ | ||
330 | if (sky2->autoneg == AUTONEG_ENABLE | 331 | if (sky2->autoneg == AUTONEG_ENABLE |
331 | && (hw->chip_id == CHIP_ID_YUKON_XL | 332 | && (hw->chip_id == CHIP_ID_YUKON_XL |
332 | || hw->chip_id == CHIP_ID_YUKON_EC_U | 333 | || hw->chip_id == CHIP_ID_YUKON_EC_U |
333 | || hw->chip_id == CHIP_ID_YUKON_EX)) { | 334 | || hw->chip_id == CHIP_ID_YUKON_EX)) { |
335 | /* set downshift counter to 3x and enable downshift */ | ||
334 | ctrl &= ~PHY_M_PC_DSC_MSK; | 336 | ctrl &= ~PHY_M_PC_DSC_MSK; |
335 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; | 337 | ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; |
336 | } | 338 | } |
@@ -842,10 +844,12 @@ static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, | |||
842 | /* Update chip's next pointer */ | 844 | /* Update chip's next pointer */ |
843 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) | 845 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) |
844 | { | 846 | { |
845 | q = Y2_QADDR(q, PREF_UNIT_PUT_IDX); | 847 | /* Make sure write' to descriptors are complete before we tell hardware */ |
846 | wmb(); | 848 | wmb(); |
847 | sky2_write16(hw, q, idx); | 849 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); |
848 | sky2_read16(hw, q); | 850 | |
851 | /* Synchronize I/O on since next processor may write to tail */ | ||
852 | mmiowb(); | ||
849 | } | 853 | } |
850 | 854 | ||
851 | 855 | ||
@@ -977,6 +981,7 @@ stopped: | |||
977 | 981 | ||
978 | /* reset the Rx prefetch unit */ | 982 | /* reset the Rx prefetch unit */ |
979 | sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); | 983 | sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); |
984 | mmiowb(); | ||
980 | } | 985 | } |
981 | 986 | ||
982 | /* Clean out receive buffer area, assumes receiver hardware stopped */ | 987 | /* Clean out receive buffer area, assumes receiver hardware stopped */ |
@@ -1196,7 +1201,7 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1196 | } | 1201 | } |
1197 | 1202 | ||
1198 | /* Tell chip about available buffers */ | 1203 | /* Tell chip about available buffers */ |
1199 | sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); | 1204 | sky2_put_idx(hw, rxq, sky2->rx_put); |
1200 | return 0; | 1205 | return 0; |
1201 | nomem: | 1206 | nomem: |
1202 | sky2_rx_clean(sky2); | 1207 | sky2_rx_clean(sky2); |
@@ -1538,6 +1543,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1538 | } | 1543 | } |
1539 | 1544 | ||
1540 | sky2->tx_cons = idx; | 1545 | sky2->tx_cons = idx; |
1546 | smp_mb(); | ||
1547 | |||
1541 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) | 1548 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) |
1542 | netif_wake_queue(dev); | 1549 | netif_wake_queue(dev); |
1543 | } | 1550 | } |
@@ -1577,13 +1584,6 @@ static int sky2_down(struct net_device *dev) | |||
1577 | imask &= ~portirq_msk[port]; | 1584 | imask &= ~portirq_msk[port]; |
1578 | sky2_write32(hw, B0_IMSK, imask); | 1585 | sky2_write32(hw, B0_IMSK, imask); |
1579 | 1586 | ||
1580 | /* | ||
1581 | * Both ports share the NAPI poll on port 0, so if necessary undo the | ||
1582 | * the disable that is done in dev_close. | ||
1583 | */ | ||
1584 | if (sky2->port == 0 && hw->ports > 1) | ||
1585 | netif_poll_enable(dev); | ||
1586 | |||
1587 | sky2_gmac_reset(hw, port); | 1587 | sky2_gmac_reset(hw, port); |
1588 | 1588 | ||
1589 | /* Stop transmitter */ | 1589 | /* Stop transmitter */ |
@@ -2139,8 +2139,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2139 | switch (le->opcode & ~HW_OWNER) { | 2139 | switch (le->opcode & ~HW_OWNER) { |
2140 | case OP_RXSTAT: | 2140 | case OP_RXSTAT: |
2141 | skb = sky2_receive(dev, length, status); | 2141 | skb = sky2_receive(dev, length, status); |
2142 | if (!skb) | 2142 | if (unlikely(!skb)) { |
2143 | sky2->net_stats.rx_dropped++; | ||
2143 | goto force_update; | 2144 | goto force_update; |
2145 | } | ||
2144 | 2146 | ||
2145 | skb->protocol = eth_type_trans(skb, dev); | 2147 | skb->protocol = eth_type_trans(skb, dev); |
2146 | sky2->net_stats.rx_packets++; | 2148 | sky2->net_stats.rx_packets++; |
@@ -2221,6 +2223,7 @@ force_update: | |||
2221 | 2223 | ||
2222 | /* Fully processed status ring so clear irq */ | 2224 | /* Fully processed status ring so clear irq */ |
2223 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | 2225 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); |
2226 | mmiowb(); | ||
2224 | 2227 | ||
2225 | exit_loop: | 2228 | exit_loop: |
2226 | if (buf_write[0]) { | 2229 | if (buf_write[0]) { |
@@ -2341,6 +2344,12 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) | |||
2341 | printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", | 2344 | printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", |
2342 | dev->name, status); | 2345 | dev->name, status); |
2343 | 2346 | ||
2347 | if (status & GM_IS_RX_CO_OV) | ||
2348 | gma_read16(hw, port, GM_RX_IRQ_SRC); | ||
2349 | |||
2350 | if (status & GM_IS_TX_CO_OV) | ||
2351 | gma_read16(hw, port, GM_TX_IRQ_SRC); | ||
2352 | |||
2344 | if (status & GM_IS_RX_FF_OR) { | 2353 | if (status & GM_IS_RX_FF_OR) { |
2345 | ++sky2->net_stats.rx_fifo_errors; | 2354 | ++sky2->net_stats.rx_fifo_errors; |
2346 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); | 2355 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); |
@@ -2439,6 +2448,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2439 | if (work_done < work_limit) { | 2448 | if (work_done < work_limit) { |
2440 | netif_rx_complete(dev0); | 2449 | netif_rx_complete(dev0); |
2441 | 2450 | ||
2451 | /* end of interrupt, re-enables also acts as I/O synchronization */ | ||
2442 | sky2_read32(hw, B0_Y2_SP_LISR); | 2452 | sky2_read32(hw, B0_Y2_SP_LISR); |
2443 | return 0; | 2453 | return 0; |
2444 | } else { | 2454 | } else { |
@@ -2534,17 +2544,6 @@ static int __devinit sky2_init(struct sky2_hw *hw) | |||
2534 | return -EOPNOTSUPP; | 2544 | return -EOPNOTSUPP; |
2535 | } | 2545 | } |
2536 | 2546 | ||
2537 | |||
2538 | /* Some Gigabyte motherboards have 88e8056 but cause problems | ||
2539 | * There is some unresolved hardware related problem that causes | ||
2540 | * descriptor errors and receive data corruption. | ||
2541 | */ | ||
2542 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && dmi_blacklisted) { | ||
2543 | dev_err(&hw->pdev->dev, | ||
2544 | "88E8056 on this motherboard not supported\n"); | ||
2545 | return -EOPNOTSUPP; | ||
2546 | } | ||
2547 | |||
2548 | hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); | 2547 | hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); |
2549 | hw->ports = 1; | 2548 | hw->ports = 1; |
2550 | t8 = sky2_read8(hw, B2_Y2_HW_RES); | 2549 | t8 = sky2_read8(hw, B2_Y2_HW_RES); |
@@ -3910,24 +3909,8 @@ static struct pci_driver sky2_driver = { | |||
3910 | .shutdown = sky2_shutdown, | 3909 | .shutdown = sky2_shutdown, |
3911 | }; | 3910 | }; |
3912 | 3911 | ||
3913 | static struct dmi_system_id __initdata broken_dmi_table[] = { | ||
3914 | { | ||
3915 | .ident = "Gigabyte 965P-S3", | ||
3916 | .matches = { | ||
3917 | DMI_MATCH(DMI_SYS_VENDOR, "Gigabyte Technology Co., Ltd."), | ||
3918 | DMI_MATCH(DMI_PRODUCT_NAME, "965P-S3"), | ||
3919 | |||
3920 | }, | ||
3921 | }, | ||
3922 | { } | ||
3923 | }; | ||
3924 | |||
3925 | static int __init sky2_init_module(void) | 3912 | static int __init sky2_init_module(void) |
3926 | { | 3913 | { |
3927 | /* Look for sick motherboards */ | ||
3928 | if (dmi_check_system(broken_dmi_table)) | ||
3929 | dmi_blacklisted = 1; | ||
3930 | |||
3931 | return pci_register_driver(&sky2_driver); | 3914 | return pci_register_driver(&sky2_driver); |
3932 | } | 3915 | } |
3933 | 3916 | ||
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 108adbf5b5e..c3964c3d89d 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -430,7 +430,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
430 | /* and we need to have it 128 byte aligned, therefore we allocate a | 430 | /* and we need to have it 128 byte aligned, therefore we allocate a |
431 | * bit more */ | 431 | * bit more */ |
432 | /* allocate an skb */ | 432 | /* allocate an skb */ |
433 | descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); | 433 | descr->skb = netdev_alloc_skb(card->netdev, |
434 | bufsize + SPIDER_NET_RXBUF_ALIGN - 1); | ||
434 | if (!descr->skb) { | 435 | if (!descr->skb) { |
435 | if (netif_msg_rx_err(card) && net_ratelimit()) | 436 | if (netif_msg_rx_err(card) && net_ratelimit()) |
436 | pr_err("Not enough memory to allocate rx buffer\n"); | 437 | pr_err("Not enough memory to allocate rx buffer\n"); |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 95ce8f49e38..4e4c10a7fd3 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -59,7 +59,7 @@ comment "RTC interfaces" | |||
59 | depends on RTC_CLASS | 59 | depends on RTC_CLASS |
60 | 60 | ||
61 | config RTC_INTF_SYSFS | 61 | config RTC_INTF_SYSFS |
62 | boolean "sysfs" | 62 | boolean "/sys/class/rtc/rtcN (sysfs)" |
63 | depends on RTC_CLASS && SYSFS | 63 | depends on RTC_CLASS && SYSFS |
64 | default RTC_CLASS | 64 | default RTC_CLASS |
65 | help | 65 | help |
@@ -70,7 +70,7 @@ config RTC_INTF_SYSFS | |||
70 | will be called rtc-sysfs. | 70 | will be called rtc-sysfs. |
71 | 71 | ||
72 | config RTC_INTF_PROC | 72 | config RTC_INTF_PROC |
73 | boolean "proc" | 73 | boolean "/proc/driver/rtc (procfs for rtc0)" |
74 | depends on RTC_CLASS && PROC_FS | 74 | depends on RTC_CLASS && PROC_FS |
75 | default RTC_CLASS | 75 | default RTC_CLASS |
76 | help | 76 | help |
@@ -82,7 +82,7 @@ config RTC_INTF_PROC | |||
82 | will be called rtc-proc. | 82 | will be called rtc-proc. |
83 | 83 | ||
84 | config RTC_INTF_DEV | 84 | config RTC_INTF_DEV |
85 | boolean "dev" | 85 | boolean "/dev/rtcN (character devices)" |
86 | depends on RTC_CLASS | 86 | depends on RTC_CLASS |
87 | default RTC_CLASS | 87 | default RTC_CLASS |
88 | help | 88 | help |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 60a8a4bb8bd..a2f84f16958 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -371,7 +371,7 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev) | |||
371 | goto fail; | 371 | goto fail; |
372 | } | 372 | } |
373 | platform_set_drvdata(pdev, rtc); | 373 | platform_set_drvdata(pdev, rtc); |
374 | dev_set_devdata(&rtc->dev, mem); | 374 | dev_set_drvdata(&rtc->dev, mem); |
375 | 375 | ||
376 | /* clear pending irqs, and set 1/second periodic, | 376 | /* clear pending irqs, and set 1/second periodic, |
377 | * which we'll use instead of update irqs | 377 | * which we'll use instead of update irqs |
@@ -453,7 +453,7 @@ static int __devexit omap_rtc_remove(struct platform_device *pdev) | |||
453 | free_irq(omap_rtc_timer, rtc); | 453 | free_irq(omap_rtc_timer, rtc); |
454 | free_irq(omap_rtc_alarm, rtc); | 454 | free_irq(omap_rtc_alarm, rtc); |
455 | 455 | ||
456 | release_resource(dev_get_devdata(&rtc->dev)); | 456 | release_resource(dev_get_drvdata(&rtc->dev)); |
457 | rtc_device_unregister(rtc); | 457 | rtc_device_unregister(rtc); |
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 48e259a0167..c84dab083a8 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -894,7 +894,7 @@ static void autoconfig_16550a(struct uart_8250_port *up) | |||
894 | quot = serial_dl_read(up); | 894 | quot = serial_dl_read(up); |
895 | quot <<= 3; | 895 | quot <<= 3; |
896 | 896 | ||
897 | status1 = serial_in(up, 0x04); /* EXCR1 */ | 897 | status1 = serial_in(up, 0x04); /* EXCR2 */ |
898 | status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ | 898 | status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ |
899 | status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ | 899 | status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ |
900 | serial_outp(up, 0x04, status1); | 900 | serial_outp(up, 0x04, status1); |
@@ -2617,7 +2617,22 @@ void serial8250_suspend_port(int line) | |||
2617 | */ | 2617 | */ |
2618 | void serial8250_resume_port(int line) | 2618 | void serial8250_resume_port(int line) |
2619 | { | 2619 | { |
2620 | uart_resume_port(&serial8250_reg, &serial8250_ports[line].port); | 2620 | struct uart_8250_port *up = &serial8250_ports[line]; |
2621 | |||
2622 | if (up->capabilities & UART_NATSEMI) { | ||
2623 | unsigned char tmp; | ||
2624 | |||
2625 | /* Ensure it's still in high speed mode */ | ||
2626 | serial_outp(up, UART_LCR, 0xE0); | ||
2627 | |||
2628 | tmp = serial_in(up, 0x04); /* EXCR2 */ | ||
2629 | tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ | ||
2630 | tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ | ||
2631 | serial_outp(up, 0x04, tmp); | ||
2632 | |||
2633 | serial_outp(up, UART_LCR, 0); | ||
2634 | } | ||
2635 | uart_resume_port(&serial8250_reg, &up->port); | ||
2621 | } | 2636 | } |
2622 | 2637 | ||
2623 | /* | 2638 | /* |
@@ -2694,7 +2709,7 @@ static int serial8250_resume(struct platform_device *dev) | |||
2694 | struct uart_8250_port *up = &serial8250_ports[i]; | 2709 | struct uart_8250_port *up = &serial8250_ports[i]; |
2695 | 2710 | ||
2696 | if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) | 2711 | if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) |
2697 | uart_resume_port(&serial8250_reg, &up->port); | 2712 | serial8250_resume_port(i); |
2698 | } | 2713 | } |
2699 | 2714 | ||
2700 | return 0; | 2715 | return 0; |
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 6202995e821..9d3105b64a7 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c | |||
@@ -69,33 +69,40 @@ | |||
69 | 69 | ||
70 | static const struct pci_device_id icom_pci_table[] = { | 70 | static const struct pci_device_id icom_pci_table[] = { |
71 | { | 71 | { |
72 | .vendor = PCI_VENDOR_ID_IBM, | 72 | .vendor = PCI_VENDOR_ID_IBM, |
73 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, | 73 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, |
74 | .subvendor = PCI_ANY_ID, | 74 | .subvendor = PCI_ANY_ID, |
75 | .subdevice = PCI_ANY_ID, | 75 | .subdevice = PCI_ANY_ID, |
76 | .driver_data = ADAPTER_V1, | 76 | .driver_data = ADAPTER_V1, |
77 | }, | 77 | }, |
78 | { | 78 | { |
79 | .vendor = PCI_VENDOR_ID_IBM, | 79 | .vendor = PCI_VENDOR_ID_IBM, |
80 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, | 80 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, |
81 | .subvendor = PCI_VENDOR_ID_IBM, | 81 | .subvendor = PCI_VENDOR_ID_IBM, |
82 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, | 82 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, |
83 | .driver_data = ADAPTER_V2, | 83 | .driver_data = ADAPTER_V2, |
84 | }, | 84 | }, |
85 | { | 85 | { |
86 | .vendor = PCI_VENDOR_ID_IBM, | 86 | .vendor = PCI_VENDOR_ID_IBM, |
87 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, | 87 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, |
88 | .subvendor = PCI_VENDOR_ID_IBM, | 88 | .subvendor = PCI_VENDOR_ID_IBM, |
89 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, | 89 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, |
90 | .driver_data = ADAPTER_V2, | 90 | .driver_data = ADAPTER_V2, |
91 | }, | 91 | }, |
92 | { | 92 | { |
93 | .vendor = PCI_VENDOR_ID_IBM, | 93 | .vendor = PCI_VENDOR_ID_IBM, |
94 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, | 94 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, |
95 | .subvendor = PCI_VENDOR_ID_IBM, | 95 | .subvendor = PCI_VENDOR_ID_IBM, |
96 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, | 96 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, |
97 | .driver_data = ADAPTER_V2, | 97 | .driver_data = ADAPTER_V2, |
98 | }, | 98 | }, |
99 | { | ||
100 | .vendor = PCI_VENDOR_ID_IBM, | ||
101 | .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, | ||
102 | .subvendor = PCI_VENDOR_ID_IBM, | ||
103 | .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE, | ||
104 | .driver_data = ADAPTER_V2, | ||
105 | }, | ||
99 | {} | 106 | {} |
100 | }; | 107 | }; |
101 | 108 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 2460b82a1d9..f46fe95f69f 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
@@ -368,9 +368,14 @@ static const char *vgacon_startup(void) | |||
368 | #endif | 368 | #endif |
369 | } | 369 | } |
370 | 370 | ||
371 | /* SCREEN_INFO initialized? */ | ||
372 | if ((ORIG_VIDEO_MODE == 0) && | ||
373 | (ORIG_VIDEO_LINES == 0) && | ||
374 | (ORIG_VIDEO_COLS == 0)) | ||
375 | goto no_vga; | ||
376 | |||
371 | /* VGA16 modes are not handled by VGACON */ | 377 | /* VGA16 modes are not handled by VGACON */ |
372 | if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */ | 378 | if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ |
373 | (ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ | ||
374 | (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ | 379 | (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ |
375 | (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ | 380 | (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ |
376 | (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */ | 381 | (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */ |
diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 30c29650849..de2ed5ca335 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c | |||
@@ -232,8 +232,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
232 | { | 232 | { |
233 | struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; | 233 | struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; |
234 | 234 | ||
235 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 235 | inode_init_once(&ei->vfs_inode); |
236 | inode_init_once(&ei->vfs_inode); | ||
237 | } | 236 | } |
238 | 237 | ||
239 | static int init_inodecache(void) | 238 | static int init_inodecache(void) |
diff --git a/fs/affs/super.c b/fs/affs/super.c index beff7d21e6e..b800d451cd6 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c | |||
@@ -87,11 +87,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
87 | { | 87 | { |
88 | struct affs_inode_info *ei = (struct affs_inode_info *) foo; | 88 | struct affs_inode_info *ei = (struct affs_inode_info *) foo; |
89 | 89 | ||
90 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 90 | init_MUTEX(&ei->i_link_lock); |
91 | init_MUTEX(&ei->i_link_lock); | 91 | init_MUTEX(&ei->i_ext_lock); |
92 | init_MUTEX(&ei->i_ext_lock); | 92 | inode_init_once(&ei->vfs_inode); |
93 | inode_init_once(&ei->vfs_inode); | ||
94 | } | ||
95 | } | 93 | } |
96 | 94 | ||
97 | static int init_inodecache(void) | 95 | static int init_inodecache(void) |
diff --git a/fs/afs/super.c b/fs/afs/super.c index 370cecc910d..8d47ad88a09 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
@@ -451,17 +451,15 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, | |||
451 | { | 451 | { |
452 | struct afs_vnode *vnode = _vnode; | 452 | struct afs_vnode *vnode = _vnode; |
453 | 453 | ||
454 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 454 | memset(vnode, 0, sizeof(*vnode)); |
455 | memset(vnode, 0, sizeof(*vnode)); | 455 | inode_init_once(&vnode->vfs_inode); |
456 | inode_init_once(&vnode->vfs_inode); | 456 | init_waitqueue_head(&vnode->update_waitq); |
457 | init_waitqueue_head(&vnode->update_waitq); | 457 | mutex_init(&vnode->permits_lock); |
458 | mutex_init(&vnode->permits_lock); | 458 | mutex_init(&vnode->validate_lock); |
459 | mutex_init(&vnode->validate_lock); | 459 | spin_lock_init(&vnode->writeback_lock); |
460 | spin_lock_init(&vnode->writeback_lock); | 460 | spin_lock_init(&vnode->lock); |
461 | spin_lock_init(&vnode->lock); | 461 | INIT_LIST_HEAD(&vnode->writebacks); |
462 | INIT_LIST_HEAD(&vnode->writebacks); | 462 | INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); |
463 | INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); | ||
464 | } | ||
465 | } | 463 | } |
466 | 464 | ||
467 | /* | 465 | /* |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index fe96108a788..a5c5171c282 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -292,10 +292,8 @@ befs_destroy_inode(struct inode *inode) | |||
292 | static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) | 292 | static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) |
293 | { | 293 | { |
294 | struct befs_inode_info *bi = (struct befs_inode_info *) foo; | 294 | struct befs_inode_info *bi = (struct befs_inode_info *) foo; |
295 | 295 | ||
296 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 296 | inode_init_once(&bi->vfs_inode); |
297 | inode_init_once(&bi->vfs_inode); | ||
298 | } | ||
299 | } | 297 | } |
300 | 298 | ||
301 | static void | 299 | static void |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index edc08d89aab..58c7bd9f530 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
@@ -248,8 +248,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
248 | { | 248 | { |
249 | struct bfs_inode_info *bi = foo; | 249 | struct bfs_inode_info *bi = foo; |
250 | 250 | ||
251 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 251 | inode_init_once(&bi->vfs_inode); |
252 | inode_init_once(&bi->vfs_inode); | ||
253 | } | 252 | } |
254 | 253 | ||
255 | static int init_inodecache(void) | 254 | static int init_inodecache(void) |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 74289924087..ea1480a16f5 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -458,17 +458,15 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
458 | struct bdev_inode *ei = (struct bdev_inode *) foo; | 458 | struct bdev_inode *ei = (struct bdev_inode *) foo; |
459 | struct block_device *bdev = &ei->bdev; | 459 | struct block_device *bdev = &ei->bdev; |
460 | 460 | ||
461 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 461 | memset(bdev, 0, sizeof(*bdev)); |
462 | memset(bdev, 0, sizeof(*bdev)); | 462 | mutex_init(&bdev->bd_mutex); |
463 | mutex_init(&bdev->bd_mutex); | 463 | sema_init(&bdev->bd_mount_sem, 1); |
464 | sema_init(&bdev->bd_mount_sem, 1); | 464 | INIT_LIST_HEAD(&bdev->bd_inodes); |
465 | INIT_LIST_HEAD(&bdev->bd_inodes); | 465 | INIT_LIST_HEAD(&bdev->bd_list); |
466 | INIT_LIST_HEAD(&bdev->bd_list); | ||
467 | #ifdef CONFIG_SYSFS | 466 | #ifdef CONFIG_SYSFS |
468 | INIT_LIST_HEAD(&bdev->bd_holder_list); | 467 | INIT_LIST_HEAD(&bdev->bd_holder_list); |
469 | #endif | 468 | #endif |
470 | inode_init_once(&ei->vfs_inode); | 469 | inode_init_once(&ei->vfs_inode); |
471 | } | ||
472 | } | 470 | } |
473 | 471 | ||
474 | static inline void __bd_forget(struct inode *inode) | 472 | static inline void __bd_forget(struct inode *inode) |
diff --git a/fs/buffer.c b/fs/buffer.c index aecd057cd0e..49590d590d7 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -981,7 +981,8 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
981 | struct page *page; | 981 | struct page *page; |
982 | struct buffer_head *bh; | 982 | struct buffer_head *bh; |
983 | 983 | ||
984 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 984 | page = find_or_create_page(inode->i_mapping, index, |
985 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | ||
985 | if (!page) | 986 | if (!page) |
986 | return NULL; | 987 | return NULL; |
987 | 988 | ||
@@ -2898,8 +2899,9 @@ static void recalc_bh_state(void) | |||
2898 | 2899 | ||
2899 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) | 2900 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) |
2900 | { | 2901 | { |
2901 | struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); | 2902 | struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); |
2902 | if (ret) { | 2903 | if (ret) { |
2904 | INIT_LIST_HEAD(&ret->b_assoc_buffers); | ||
2903 | get_cpu_var(bh_accounting).nr++; | 2905 | get_cpu_var(bh_accounting).nr++; |
2904 | recalc_bh_state(); | 2906 | recalc_bh_state(); |
2905 | put_cpu_var(bh_accounting); | 2907 | put_cpu_var(bh_accounting); |
@@ -2918,17 +2920,6 @@ void free_buffer_head(struct buffer_head *bh) | |||
2918 | } | 2920 | } |
2919 | EXPORT_SYMBOL(free_buffer_head); | 2921 | EXPORT_SYMBOL(free_buffer_head); |
2920 | 2922 | ||
2921 | static void | ||
2922 | init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) | ||
2923 | { | ||
2924 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | ||
2925 | struct buffer_head * bh = (struct buffer_head *)data; | ||
2926 | |||
2927 | memset(bh, 0, sizeof(*bh)); | ||
2928 | INIT_LIST_HEAD(&bh->b_assoc_buffers); | ||
2929 | } | ||
2930 | } | ||
2931 | |||
2932 | static void buffer_exit_cpu(int cpu) | 2923 | static void buffer_exit_cpu(int cpu) |
2933 | { | 2924 | { |
2934 | int i; | 2925 | int i; |
@@ -2955,12 +2946,8 @@ void __init buffer_init(void) | |||
2955 | { | 2946 | { |
2956 | int nrpages; | 2947 | int nrpages; |
2957 | 2948 | ||
2958 | bh_cachep = kmem_cache_create("buffer_head", | 2949 | bh_cachep = KMEM_CACHE(buffer_head, |
2959 | sizeof(struct buffer_head), 0, | 2950 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); |
2960 | (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| | ||
2961 | SLAB_MEM_SPREAD), | ||
2962 | init_buffer_head, | ||
2963 | NULL); | ||
2964 | 2951 | ||
2965 | /* | 2952 | /* |
2966 | * Limit the bh occupancy to 10% of ZONE_NORMAL | 2953 | * Limit the bh occupancy to 10% of ZONE_NORMAL |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8568e100953..d38c69b591c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -701,10 +701,8 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) | |||
701 | { | 701 | { |
702 | struct cifsInodeInfo *cifsi = inode; | 702 | struct cifsInodeInfo *cifsi = inode; |
703 | 703 | ||
704 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 704 | inode_init_once(&cifsi->vfs_inode); |
705 | inode_init_once(&cifsi->vfs_inode); | 705 | INIT_LIST_HEAD(&cifsi->lockList); |
706 | INIT_LIST_HEAD(&cifsi->lockList); | ||
707 | } | ||
708 | } | 706 | } |
709 | 707 | ||
710 | static int | 708 | static int |
diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 0aaff3651d1..dbff1bd4fb9 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c | |||
@@ -62,8 +62,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
62 | { | 62 | { |
63 | struct coda_inode_info *ei = (struct coda_inode_info *) foo; | 63 | struct coda_inode_info *ei = (struct coda_inode_info *) foo; |
64 | 64 | ||
65 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 65 | inode_init_once(&ei->vfs_inode); |
66 | inode_init_once(&ei->vfs_inode); | ||
67 | } | 66 | } |
68 | 67 | ||
69 | int coda_init_inodecache(void) | 68 | int coda_init_inodecache(void) |
diff --git a/fs/compat.c b/fs/compat.c index 7b21b0a8259..1de2331db84 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -2230,21 +2230,16 @@ asmlinkage long compat_sys_signalfd(int ufd, | |||
2230 | asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, | 2230 | asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, |
2231 | const struct compat_itimerspec __user *utmr) | 2231 | const struct compat_itimerspec __user *utmr) |
2232 | { | 2232 | { |
2233 | long res; | ||
2234 | struct itimerspec t; | 2233 | struct itimerspec t; |
2235 | struct itimerspec __user *ut; | 2234 | struct itimerspec __user *ut; |
2236 | 2235 | ||
2237 | res = -EFAULT; | ||
2238 | if (get_compat_itimerspec(&t, utmr)) | 2236 | if (get_compat_itimerspec(&t, utmr)) |
2239 | goto err_exit; | 2237 | return -EFAULT; |
2240 | ut = compat_alloc_user_space(sizeof(*ut)); | 2238 | ut = compat_alloc_user_space(sizeof(*ut)); |
2241 | if (copy_to_user(ut, &t, sizeof(t)) ) | 2239 | if (copy_to_user(ut, &t, sizeof(t))) |
2242 | goto err_exit; | 2240 | return -EFAULT; |
2243 | 2241 | ||
2244 | res = sys_timerfd(ufd, clockid, flags, ut); | 2242 | return sys_timerfd(ufd, clockid, flags, ut); |
2245 | err_exit: | ||
2246 | return res; | ||
2247 | } | 2243 | } |
2248 | 2244 | ||
2249 | #endif /* CONFIG_TIMERFD */ | 2245 | #endif /* CONFIG_TIMERFD */ |
2250 | |||
diff --git a/fs/dquot.c b/fs/dquot.c index 3a995841de9..8819d281500 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -1421,7 +1421,7 @@ int vfs_quota_off(struct super_block *sb, int type) | |||
1421 | /* If quota was reenabled in the meantime, we have | 1421 | /* If quota was reenabled in the meantime, we have |
1422 | * nothing to do */ | 1422 | * nothing to do */ |
1423 | if (!sb_has_quota_enabled(sb, cnt)) { | 1423 | if (!sb_has_quota_enabled(sb, cnt)) { |
1424 | mutex_lock(&toputinode[cnt]->i_mutex); | 1424 | mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); |
1425 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | | 1425 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | |
1426 | S_NOATIME | S_NOQUOTA); | 1426 | S_NOATIME | S_NOQUOTA); |
1427 | truncate_inode_pages(&toputinode[cnt]->i_data, 0); | 1427 | truncate_inode_pages(&toputinode[cnt]->i_data, 0); |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 8cbf3f69ebe..606128f5c92 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -583,8 +583,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags) | |||
583 | { | 583 | { |
584 | struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; | 584 | struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; |
585 | 585 | ||
586 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 586 | inode_init_once(&ei->vfs_inode); |
587 | inode_init_once(&ei->vfs_inode); | ||
588 | } | 587 | } |
589 | 588 | ||
590 | static struct ecryptfs_cache_info { | 589 | static struct ecryptfs_cache_info { |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 0770c4b66f5..88ea6697908 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -364,18 +364,14 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) | |||
364 | { | 364 | { |
365 | struct inode *inode = page->mapping->host; | 365 | struct inode *inode = page->mapping->host; |
366 | int end_byte_in_page; | 366 | int end_byte_in_page; |
367 | char *page_virt; | ||
368 | 367 | ||
369 | if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) | 368 | if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) |
370 | goto out; | 369 | goto out; |
371 | end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; | 370 | end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; |
372 | if (to > end_byte_in_page) | 371 | if (to > end_byte_in_page) |
373 | end_byte_in_page = to; | 372 | end_byte_in_page = to; |
374 | page_virt = kmap_atomic(page, KM_USER0); | 373 | zero_user_page(page, end_byte_in_page, |
375 | memset((page_virt + end_byte_in_page), 0, | 374 | PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0); |
376 | (PAGE_CACHE_SIZE - end_byte_in_page)); | ||
377 | kunmap_atomic(page_virt, KM_USER0); | ||
378 | flush_dcache_page(page); | ||
379 | out: | 375 | out: |
380 | return 0; | 376 | return 0; |
381 | } | 377 | } |
@@ -740,7 +736,6 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros) | |||
740 | { | 736 | { |
741 | int rc = 0; | 737 | int rc = 0; |
742 | struct page *tmp_page; | 738 | struct page *tmp_page; |
743 | char *tmp_page_virt; | ||
744 | 739 | ||
745 | tmp_page = ecryptfs_get1page(file, index); | 740 | tmp_page = ecryptfs_get1page(file, index); |
746 | if (IS_ERR(tmp_page)) { | 741 | if (IS_ERR(tmp_page)) { |
@@ -757,10 +752,7 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros) | |||
757 | page_cache_release(tmp_page); | 752 | page_cache_release(tmp_page); |
758 | goto out; | 753 | goto out; |
759 | } | 754 | } |
760 | tmp_page_virt = kmap_atomic(tmp_page, KM_USER0); | 755 | zero_user_page(tmp_page, start, num_zeros, KM_USER0); |
761 | memset(((char *)tmp_page_virt + start), 0, num_zeros); | ||
762 | kunmap_atomic(tmp_page_virt, KM_USER0); | ||
763 | flush_dcache_page(tmp_page); | ||
764 | rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); | 756 | rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); |
765 | if (rc < 0) { | 757 | if (rc < 0) { |
766 | ecryptfs_printk(KERN_ERR, "Error attempting to write zero's " | 758 | ecryptfs_printk(KERN_ERR, "Error attempting to write zero's " |
diff --git a/fs/efs/super.c b/fs/efs/super.c index ba7a8b9da0c..e0a6839e68a 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c | |||
@@ -72,8 +72,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
72 | { | 72 | { |
73 | struct efs_inode_info *ei = (struct efs_inode_info *) foo; | 73 | struct efs_inode_info *ei = (struct efs_inode_info *) foo; |
74 | 74 | ||
75 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 75 | inode_init_once(&ei->vfs_inode); |
76 | inode_init_once(&ei->vfs_inode); | ||
77 | } | 76 | } |
78 | 77 | ||
79 | static int init_inodecache(void) | 78 | static int init_inodecache(void) |
@@ -60,7 +60,7 @@ | |||
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | int core_uses_pid; | 62 | int core_uses_pid; |
63 | char core_pattern[128] = "core"; | 63 | char core_pattern[CORENAME_MAX_SIZE] = "core"; |
64 | int suid_dumpable = 0; | 64 | int suid_dumpable = 0; |
65 | 65 | ||
66 | EXPORT_SYMBOL(suid_dumpable); | 66 | EXPORT_SYMBOL(suid_dumpable); |
@@ -1264,8 +1264,6 @@ int set_binfmt(struct linux_binfmt *new) | |||
1264 | 1264 | ||
1265 | EXPORT_SYMBOL(set_binfmt); | 1265 | EXPORT_SYMBOL(set_binfmt); |
1266 | 1266 | ||
1267 | #define CORENAME_MAX_SIZE 64 | ||
1268 | |||
1269 | /* format_corename will inspect the pattern parameter, and output a | 1267 | /* format_corename will inspect the pattern parameter, and output a |
1270 | * name into corename, which must have space for at least | 1268 | * name into corename, which must have space for at least |
1271 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | 1269 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 685a1c28717..16337bff027 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
@@ -160,13 +160,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
160 | { | 160 | { |
161 | struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; | 161 | struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; |
162 | 162 | ||
163 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 163 | rwlock_init(&ei->i_meta_lock); |
164 | rwlock_init(&ei->i_meta_lock); | ||
165 | #ifdef CONFIG_EXT2_FS_XATTR | 164 | #ifdef CONFIG_EXT2_FS_XATTR |
166 | init_rwsem(&ei->xattr_sem); | 165 | init_rwsem(&ei->xattr_sem); |
167 | #endif | 166 | #endif |
168 | inode_init_once(&ei->vfs_inode); | 167 | inode_init_once(&ei->vfs_inode); |
169 | } | ||
170 | } | 168 | } |
171 | 169 | ||
172 | static int init_inodecache(void) | 170 | static int init_inodecache(void) |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 54d3c904125..6e3062913a9 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -466,14 +466,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
466 | { | 466 | { |
467 | struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; | 467 | struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; |
468 | 468 | ||
469 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 469 | INIT_LIST_HEAD(&ei->i_orphan); |
470 | INIT_LIST_HEAD(&ei->i_orphan); | ||
471 | #ifdef CONFIG_EXT3_FS_XATTR | 470 | #ifdef CONFIG_EXT3_FS_XATTR |
472 | init_rwsem(&ei->xattr_sem); | 471 | init_rwsem(&ei->xattr_sem); |
473 | #endif | 472 | #endif |
474 | mutex_init(&ei->truncate_mutex); | 473 | mutex_init(&ei->truncate_mutex); |
475 | inode_init_once(&ei->vfs_inode); | 474 | inode_init_once(&ei->vfs_inode); |
476 | } | ||
477 | } | 475 | } |
478 | 476 | ||
479 | static int init_inodecache(void) | 477 | static int init_inodecache(void) |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 71912693235..cb9afdd0e26 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -517,14 +517,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
517 | { | 517 | { |
518 | struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; | 518 | struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; |
519 | 519 | ||
520 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 520 | INIT_LIST_HEAD(&ei->i_orphan); |
521 | INIT_LIST_HEAD(&ei->i_orphan); | ||
522 | #ifdef CONFIG_EXT4DEV_FS_XATTR | 521 | #ifdef CONFIG_EXT4DEV_FS_XATTR |
523 | init_rwsem(&ei->xattr_sem); | 522 | init_rwsem(&ei->xattr_sem); |
524 | #endif | 523 | #endif |
525 | mutex_init(&ei->truncate_mutex); | 524 | mutex_init(&ei->truncate_mutex); |
526 | inode_init_once(&ei->vfs_inode); | 525 | inode_init_once(&ei->vfs_inode); |
527 | } | ||
528 | } | 526 | } |
529 | 527 | ||
530 | static int init_inodecache(void) | 528 | static int init_inodecache(void) |
diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 1959143c1d2..3c9c8a15ec7 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c | |||
@@ -40,8 +40,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | |||
40 | { | 40 | { |
41 | struct fat_cache *cache = (struct fat_cache *)foo; | 41 | struct fat_cache *cache = (struct fat_cache *)foo; |
42 | 42 | ||
43 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 43 | INIT_LIST_HEAD(&cache->cache_list); |
44 | INIT_LIST_HEAD(&cache->cache_list); | ||
45 | } | 44 | } |
46 | 45 | ||
47 | int __init fat_cache_init(void) | 46 | int __init fat_cache_init(void) |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 2c55e8dce79..479722d8966 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -500,14 +500,12 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
500 | { | 500 | { |
501 | struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; | 501 | struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; |
502 | 502 | ||
503 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 503 | spin_lock_init(&ei->cache_lru_lock); |
504 | spin_lock_init(&ei->cache_lru_lock); | 504 | ei->nr_caches = 0; |
505 | ei->nr_caches = 0; | 505 | ei->cache_valid_id = FAT_CACHE_VALID + 1; |
506 | ei->cache_valid_id = FAT_CACHE_VALID + 1; | 506 | INIT_LIST_HEAD(&ei->cache_lru); |
507 | INIT_LIST_HEAD(&ei->cache_lru); | 507 | INIT_HLIST_NODE(&ei->i_fat_hash); |
508 | INIT_HLIST_NODE(&ei->i_fat_hash); | 508 | inode_init_once(&ei->vfs_inode); |
509 | inode_init_once(&ei->vfs_inode); | ||
510 | } | ||
511 | } | 509 | } |
512 | 510 | ||
513 | static int __init fat_init_inodecache(void) | 511 | static int __init fat_init_inodecache(void) |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 1397018ff47..c3a2ad0da43 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -687,8 +687,7 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, | |||
687 | { | 687 | { |
688 | struct inode * inode = foo; | 688 | struct inode * inode = foo; |
689 | 689 | ||
690 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 690 | inode_init_once(inode); |
691 | inode_init_once(inode); | ||
692 | } | 691 | } |
693 | 692 | ||
694 | static int __init fuse_fs_init(void) | 693 | static int __init fuse_fs_init(void) |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e460487c055..787a0edef10 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -27,29 +27,27 @@ | |||
27 | static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | 27 | static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) |
28 | { | 28 | { |
29 | struct gfs2_inode *ip = foo; | 29 | struct gfs2_inode *ip = foo; |
30 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 30 | |
31 | inode_init_once(&ip->i_inode); | 31 | inode_init_once(&ip->i_inode); |
32 | spin_lock_init(&ip->i_spin); | 32 | spin_lock_init(&ip->i_spin); |
33 | init_rwsem(&ip->i_rw_mutex); | 33 | init_rwsem(&ip->i_rw_mutex); |
34 | memset(ip->i_cache, 0, sizeof(ip->i_cache)); | 34 | memset(ip->i_cache, 0, sizeof(ip->i_cache)); |
35 | } | ||
36 | } | 35 | } |
37 | 36 | ||
38 | static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | 37 | static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) |
39 | { | 38 | { |
40 | struct gfs2_glock *gl = foo; | 39 | struct gfs2_glock *gl = foo; |
41 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 40 | |
42 | INIT_HLIST_NODE(&gl->gl_list); | 41 | INIT_HLIST_NODE(&gl->gl_list); |
43 | spin_lock_init(&gl->gl_spin); | 42 | spin_lock_init(&gl->gl_spin); |
44 | INIT_LIST_HEAD(&gl->gl_holders); | 43 | INIT_LIST_HEAD(&gl->gl_holders); |
45 | INIT_LIST_HEAD(&gl->gl_waiters1); | 44 | INIT_LIST_HEAD(&gl->gl_waiters1); |
46 | INIT_LIST_HEAD(&gl->gl_waiters3); | 45 | INIT_LIST_HEAD(&gl->gl_waiters3); |
47 | gl->gl_lvb = NULL; | 46 | gl->gl_lvb = NULL; |
48 | atomic_set(&gl->gl_lvb_count, 0); | 47 | atomic_set(&gl->gl_lvb_count, 0); |
49 | INIT_LIST_HEAD(&gl->gl_reclaim); | 48 | INIT_LIST_HEAD(&gl->gl_reclaim); |
50 | INIT_LIST_HEAD(&gl->gl_ail_list); | 49 | INIT_LIST_HEAD(&gl->gl_ail_list); |
51 | atomic_set(&gl->gl_ail_count, 0); | 50 | atomic_set(&gl->gl_ail_count, 0); |
52 | } | ||
53 | } | 51 | } |
54 | 52 | ||
55 | /** | 53 | /** |
diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 4f1888f16cf..92cf8751e42 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c | |||
@@ -434,8 +434,7 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag | |||
434 | { | 434 | { |
435 | struct hfs_inode_info *i = p; | 435 | struct hfs_inode_info *i = p; |
436 | 436 | ||
437 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 437 | inode_init_once(&i->vfs_inode); |
438 | inode_init_once(&i->vfs_inode); | ||
439 | } | 438 | } |
440 | 439 | ||
441 | static int __init init_hfs_fs(void) | 440 | static int __init init_hfs_fs(void) |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 37afbec8a76..ebd1b380cbb 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -470,8 +470,7 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long | |||
470 | { | 470 | { |
471 | struct hfsplus_inode_info *i = p; | 471 | struct hfsplus_inode_info *i = p; |
472 | 472 | ||
473 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 473 | inode_init_once(&i->vfs_inode); |
474 | inode_init_once(&i->vfs_inode); | ||
475 | } | 474 | } |
476 | 475 | ||
477 | static int __init init_hfsplus_fs(void) | 476 | static int __init init_hfsplus_fs(void) |
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 1b95f39fbc3..fca1165d719 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c | |||
@@ -176,11 +176,9 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
176 | { | 176 | { |
177 | struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; | 177 | struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; |
178 | 178 | ||
179 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 179 | mutex_init(&ei->i_mutex); |
180 | mutex_init(&ei->i_mutex); | 180 | mutex_init(&ei->i_parent_mutex); |
181 | mutex_init(&ei->i_parent_mutex); | 181 | inode_init_once(&ei->vfs_inode); |
182 | inode_init_once(&ei->vfs_inode); | ||
183 | } | ||
184 | } | 182 | } |
185 | 183 | ||
186 | static int init_inodecache(void) | 184 | static int init_inodecache(void) |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 98959b87cdf..aa083dd34e9 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -556,8 +556,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | |||
556 | { | 556 | { |
557 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; | 557 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; |
558 | 558 | ||
559 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 559 | inode_init_once(&ei->vfs_inode); |
560 | inode_init_once(&ei->vfs_inode); | ||
561 | } | 560 | } |
562 | 561 | ||
563 | const struct file_operations hugetlbfs_file_operations = { | 562 | const struct file_operations hugetlbfs_file_operations = { |
diff --git a/fs/inode.c b/fs/inode.c index df2ef15d03d..9a012cc5b6c 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -213,8 +213,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
213 | { | 213 | { |
214 | struct inode * inode = (struct inode *) foo; | 214 | struct inode * inode = (struct inode *) foo; |
215 | 215 | ||
216 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 216 | inode_init_once(inode); |
217 | inode_init_once(inode); | ||
218 | } | 217 | } |
219 | 218 | ||
220 | /* | 219 | /* |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index e99f7ff4ecb..5c3eecf7542 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -77,8 +77,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags | |||
77 | { | 77 | { |
78 | struct iso_inode_info *ei = foo; | 78 | struct iso_inode_info *ei = foo; |
79 | 79 | ||
80 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 80 | inode_init_once(&ei->vfs_inode); |
81 | inode_init_once(&ei->vfs_inode); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | static int init_inodecache(void) | 83 | static int init_inodecache(void) |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 45368f8bbe7..6488af43bc9 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -47,10 +47,8 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l | |||
47 | { | 47 | { |
48 | struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; | 48 | struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; |
49 | 49 | ||
50 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 50 | init_MUTEX(&ei->sem); |
51 | init_MUTEX(&ei->sem); | 51 | inode_init_once(&ei->vfs_inode); |
52 | inode_init_once(&ei->vfs_inode); | ||
53 | } | ||
54 | } | 52 | } |
55 | 53 | ||
56 | static int jffs2_sync_fs(struct super_block *sb, int wait) | 54 | static int jffs2_sync_fs(struct super_block *sb, int wait) |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 6b3acb0b578..43d4f69afbe 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -184,16 +184,14 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | |||
184 | { | 184 | { |
185 | struct metapage *mp = (struct metapage *)foo; | 185 | struct metapage *mp = (struct metapage *)foo; |
186 | 186 | ||
187 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 187 | mp->lid = 0; |
188 | mp->lid = 0; | 188 | mp->lsn = 0; |
189 | mp->lsn = 0; | 189 | mp->flag = 0; |
190 | mp->flag = 0; | 190 | mp->data = NULL; |
191 | mp->data = NULL; | 191 | mp->clsn = 0; |
192 | mp->clsn = 0; | 192 | mp->log = NULL; |
193 | mp->log = NULL; | 193 | set_bit(META_free, &mp->flag); |
194 | set_bit(META_free, &mp->flag); | 194 | init_waitqueue_head(&mp->wait); |
195 | init_waitqueue_head(&mp->wait); | ||
196 | } | ||
197 | } | 195 | } |
198 | 196 | ||
199 | static inline struct metapage *alloc_metapage(gfp_t gfp_mask) | 197 | static inline struct metapage *alloc_metapage(gfp_t gfp_mask) |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index ea9dc3e65dc..20e4ac1c79a 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -752,20 +752,18 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags | |||
752 | { | 752 | { |
753 | struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; | 753 | struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; |
754 | 754 | ||
755 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 755 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); |
756 | memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); | 756 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); |
757 | INIT_LIST_HEAD(&jfs_ip->anon_inode_list); | 757 | init_rwsem(&jfs_ip->rdwrlock); |
758 | init_rwsem(&jfs_ip->rdwrlock); | 758 | mutex_init(&jfs_ip->commit_mutex); |
759 | mutex_init(&jfs_ip->commit_mutex); | 759 | init_rwsem(&jfs_ip->xattr_sem); |
760 | init_rwsem(&jfs_ip->xattr_sem); | 760 | spin_lock_init(&jfs_ip->ag_lock); |
761 | spin_lock_init(&jfs_ip->ag_lock); | 761 | jfs_ip->active_ag = -1; |
762 | jfs_ip->active_ag = -1; | ||
763 | #ifdef CONFIG_JFS_POSIX_ACL | 762 | #ifdef CONFIG_JFS_POSIX_ACL |
764 | jfs_ip->i_acl = JFS_ACL_NOT_CACHED; | 763 | jfs_ip->i_acl = JFS_ACL_NOT_CACHED; |
765 | jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; | 764 | jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; |
766 | #endif | 765 | #endif |
767 | inode_init_once(&jfs_ip->vfs_inode); | 766 | inode_init_once(&jfs_ip->vfs_inode); |
768 | } | ||
769 | } | 767 | } |
770 | 768 | ||
771 | static int __init init_jfs_fs(void) | 769 | static int __init init_jfs_fs(void) |
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index f4d45d4d835..d070b18e539 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -153,7 +153,7 @@ nlmclnt_recovery(struct nlm_host *host) | |||
153 | if (!host->h_reclaiming++) { | 153 | if (!host->h_reclaiming++) { |
154 | nlm_get_host(host); | 154 | nlm_get_host(host); |
155 | __module_get(THIS_MODULE); | 155 | __module_get(THIS_MODULE); |
156 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | 156 | if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0) |
157 | module_put(THIS_MODULE); | 157 | module_put(THIS_MODULE); |
158 | } | 158 | } |
159 | } | 159 | } |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index ad21c0713ef..96070bff93f 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -221,7 +221,7 @@ nlm_bind_host(struct nlm_host *host) | |||
221 | host->h_nextrebind - jiffies); | 221 | host->h_nextrebind - jiffies); |
222 | } | 222 | } |
223 | } else { | 223 | } else { |
224 | unsigned long increment = nlmsvc_timeout * HZ; | 224 | unsigned long increment = nlmsvc_timeout; |
225 | struct rpc_timeout timeparms = { | 225 | struct rpc_timeout timeparms = { |
226 | .to_initval = increment, | 226 | .to_initval = increment, |
227 | .to_increment = increment, | 227 | .to_increment = increment, |
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 9702956d206..5316e307a49 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c | |||
@@ -586,10 +586,6 @@ static struct rpc_version nlm_version3 = { | |||
586 | .procs = nlm_procedures, | 586 | .procs = nlm_procedures, |
587 | }; | 587 | }; |
588 | 588 | ||
589 | #ifdef CONFIG_LOCKD_V4 | ||
590 | extern struct rpc_version nlm_version4; | ||
591 | #endif | ||
592 | |||
593 | static struct rpc_version * nlm_versions[] = { | 589 | static struct rpc_version * nlm_versions[] = { |
594 | [1] = &nlm_version1, | 590 | [1] = &nlm_version1, |
595 | [3] = &nlm_version3, | 591 | [3] = &nlm_version3, |
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index ce1efdbe1b3..846fc1d639d 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c | |||
@@ -123,7 +123,8 @@ static __be32 * | |||
123 | nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) | 123 | nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) |
124 | { | 124 | { |
125 | struct file_lock *fl = &lock->fl; | 125 | struct file_lock *fl = &lock->fl; |
126 | __s64 len, start, end; | 126 | __u64 len, start; |
127 | __s64 end; | ||
127 | 128 | ||
128 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, | 129 | if (!(p = xdr_decode_string_inplace(p, &lock->caller, |
129 | &lock->len, NLM_MAXSTRLEN)) | 130 | &lock->len, NLM_MAXSTRLEN)) |
@@ -417,7 +418,8 @@ nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
417 | if (resp->status == nlm_lck_denied) { | 418 | if (resp->status == nlm_lck_denied) { |
418 | struct file_lock *fl = &resp->lock.fl; | 419 | struct file_lock *fl = &resp->lock.fl; |
419 | u32 excl; | 420 | u32 excl; |
420 | s64 start, end, len; | 421 | __u64 start, len; |
422 | __s64 end; | ||
421 | 423 | ||
422 | memset(&resp->lock, 0, sizeof(resp->lock)); | 424 | memset(&resp->lock, 0, sizeof(resp->lock)); |
423 | locks_init_lock(fl); | 425 | locks_init_lock(fl); |
diff --git a/fs/locks.c b/fs/locks.c index 8ec16ab5ef7..431a8b871fc 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -203,9 +203,6 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) | |||
203 | { | 203 | { |
204 | struct file_lock *lock = (struct file_lock *) foo; | 204 | struct file_lock *lock = (struct file_lock *) foo; |
205 | 205 | ||
206 | if (!(flags & SLAB_CTOR_CONSTRUCTOR)) | ||
207 | return; | ||
208 | |||
209 | locks_init_lock(lock); | 206 | locks_init_lock(lock); |
210 | } | 207 | } |
211 | 208 | ||
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 2f4d43a2a31..be4044614ac 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -73,8 +73,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
73 | { | 73 | { |
74 | struct minix_inode_info *ei = (struct minix_inode_info *) foo; | 74 | struct minix_inode_info *ei = (struct minix_inode_info *) foo; |
75 | 75 | ||
76 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 76 | inode_init_once(&ei->vfs_inode); |
77 | inode_init_once(&ei->vfs_inode); | ||
78 | } | 77 | } |
79 | 78 | ||
80 | static int init_inodecache(void) | 79 | static int init_inodecache(void) |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index c29f00ad495..cf06eb9f050 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -60,10 +60,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
60 | { | 60 | { |
61 | struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; | 61 | struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; |
62 | 62 | ||
63 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 63 | mutex_init(&ei->open_mutex); |
64 | mutex_init(&ei->open_mutex); | 64 | inode_init_once(&ei->vfs_inode); |
65 | inode_init_once(&ei->vfs_inode); | ||
66 | } | ||
67 | } | 65 | } |
68 | 66 | ||
69 | static int init_inodecache(void) | 67 | static int init_inodecache(void) |
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index db3d7919c60..c2bb14e053e 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h | |||
@@ -24,7 +24,7 @@ enum nfs4_callback_opnum { | |||
24 | }; | 24 | }; |
25 | 25 | ||
26 | struct cb_compound_hdr_arg { | 26 | struct cb_compound_hdr_arg { |
27 | int taglen; | 27 | unsigned int taglen; |
28 | const char *tag; | 28 | const char *tag; |
29 | unsigned int callback_ident; | 29 | unsigned int callback_ident; |
30 | unsigned nops; | 30 | unsigned nops; |
@@ -32,7 +32,7 @@ struct cb_compound_hdr_arg { | |||
32 | 32 | ||
33 | struct cb_compound_hdr_res { | 33 | struct cb_compound_hdr_res { |
34 | __be32 *status; | 34 | __be32 *status; |
35 | int taglen; | 35 | unsigned int taglen; |
36 | const char *tag; | 36 | const char *tag; |
37 | __be32 *nops; | 37 | __be32 *nops; |
38 | }; | 38 | }; |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 841c99a9b11..7f37d1bea83 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -226,7 +226,7 @@ restart: | |||
226 | spin_unlock(&clp->cl_lock); | 226 | spin_unlock(&clp->cl_lock); |
227 | } | 227 | } |
228 | 228 | ||
229 | int nfs_do_expire_all_delegations(void *ptr) | 229 | static int nfs_do_expire_all_delegations(void *ptr) |
230 | { | 230 | { |
231 | struct nfs_client *clp = ptr; | 231 | struct nfs_client *clp = ptr; |
232 | struct nfs_delegation *delegation; | 232 | struct nfs_delegation *delegation; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3df42881655..ac92e45432a 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -607,7 +607,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
607 | return res; | 607 | return res; |
608 | } | 608 | } |
609 | 609 | ||
610 | loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin) | 610 | static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin) |
611 | { | 611 | { |
612 | mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); | 612 | mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); |
613 | switch (origin) { | 613 | switch (origin) { |
@@ -633,7 +633,7 @@ out: | |||
633 | * All directory operations under NFS are synchronous, so fsync() | 633 | * All directory operations under NFS are synchronous, so fsync() |
634 | * is a dummy operation. | 634 | * is a dummy operation. |
635 | */ | 635 | */ |
636 | int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) | 636 | static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) |
637 | { | 637 | { |
638 | dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n", | 638 | dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n", |
639 | dentry->d_parent->d_name.name, dentry->d_name.name, | 639 | dentry->d_parent->d_name.name, dentry->d_name.name, |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 2a3fd957320..2b26ad7c977 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1164,21 +1164,19 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
1164 | { | 1164 | { |
1165 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; | 1165 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; |
1166 | 1166 | ||
1167 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 1167 | inode_init_once(&nfsi->vfs_inode); |
1168 | inode_init_once(&nfsi->vfs_inode); | 1168 | spin_lock_init(&nfsi->req_lock); |
1169 | spin_lock_init(&nfsi->req_lock); | 1169 | INIT_LIST_HEAD(&nfsi->dirty); |
1170 | INIT_LIST_HEAD(&nfsi->dirty); | 1170 | INIT_LIST_HEAD(&nfsi->commit); |
1171 | INIT_LIST_HEAD(&nfsi->commit); | 1171 | INIT_LIST_HEAD(&nfsi->open_files); |
1172 | INIT_LIST_HEAD(&nfsi->open_files); | 1172 | INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); |
1173 | INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); | 1173 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); |
1174 | INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); | 1174 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); |
1175 | INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); | 1175 | atomic_set(&nfsi->data_updates, 0); |
1176 | atomic_set(&nfsi->data_updates, 0); | 1176 | nfsi->ndirty = 0; |
1177 | nfsi->ndirty = 0; | 1177 | nfsi->ncommit = 0; |
1178 | nfsi->ncommit = 0; | 1178 | nfsi->npages = 0; |
1179 | nfsi->npages = 0; | 1179 | nfs4_init_once(nfsi); |
1180 | nfs4_init_once(nfsi); | ||
1181 | } | ||
1182 | } | 1180 | } |
1183 | 1181 | ||
1184 | static int __init nfs_init_inodecache(void) | 1182 | static int __init nfs_init_inodecache(void) |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d6a30e96578..648e0ac0f90 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -790,7 +790,7 @@ out: | |||
790 | return -EACCES; | 790 | return -EACCES; |
791 | } | 791 | } |
792 | 792 | ||
793 | int nfs4_recover_expired_lease(struct nfs_server *server) | 793 | static int nfs4_recover_expired_lease(struct nfs_server *server) |
794 | { | 794 | { |
795 | struct nfs_client *clp = server->nfs_client; | 795 | struct nfs_client *clp = server->nfs_client; |
796 | int ret; | 796 | int ret; |
@@ -2748,7 +2748,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) | |||
2748 | /* This is the error handling routine for processes that are allowed | 2748 | /* This is the error handling routine for processes that are allowed |
2749 | * to sleep. | 2749 | * to sleep. |
2750 | */ | 2750 | */ |
2751 | int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) | 2751 | static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) |
2752 | { | 2752 | { |
2753 | struct nfs_client *clp = server->nfs_client; | 2753 | struct nfs_client *clp = server->nfs_client; |
2754 | int ret = errorcode; | 2754 | int ret = errorcode; |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5fffbdfa971..8ed79d5c54f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -104,7 +104,7 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) | |||
104 | return cred; | 104 | return cred; |
105 | } | 105 | } |
106 | 106 | ||
107 | struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) | 107 | static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) |
108 | { | 108 | { |
109 | struct nfs4_state_owner *sp; | 109 | struct nfs4_state_owner *sp; |
110 | 110 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 938f3716678..8003c91ccb9 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -646,10 +646,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) | |||
646 | { | 646 | { |
647 | __be32 *p; | 647 | __be32 *p; |
648 | 648 | ||
649 | RESERVE_SPACE(8+sizeof(arg->stateid->data)); | 649 | RESERVE_SPACE(8+NFS4_STATEID_SIZE); |
650 | WRITE32(OP_CLOSE); | 650 | WRITE32(OP_CLOSE); |
651 | WRITE32(arg->seqid->sequence->counter); | 651 | WRITE32(arg->seqid->sequence->counter); |
652 | WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); | 652 | WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); |
653 | 653 | ||
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
@@ -793,17 +793,17 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args) | |||
793 | WRITE64(nfs4_lock_length(args->fl)); | 793 | WRITE64(nfs4_lock_length(args->fl)); |
794 | WRITE32(args->new_lock_owner); | 794 | WRITE32(args->new_lock_owner); |
795 | if (args->new_lock_owner){ | 795 | if (args->new_lock_owner){ |
796 | RESERVE_SPACE(40); | 796 | RESERVE_SPACE(4+NFS4_STATEID_SIZE+20); |
797 | WRITE32(args->open_seqid->sequence->counter); | 797 | WRITE32(args->open_seqid->sequence->counter); |
798 | WRITEMEM(args->open_stateid->data, sizeof(args->open_stateid->data)); | 798 | WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); |
799 | WRITE32(args->lock_seqid->sequence->counter); | 799 | WRITE32(args->lock_seqid->sequence->counter); |
800 | WRITE64(args->lock_owner.clientid); | 800 | WRITE64(args->lock_owner.clientid); |
801 | WRITE32(4); | 801 | WRITE32(4); |
802 | WRITE32(args->lock_owner.id); | 802 | WRITE32(args->lock_owner.id); |
803 | } | 803 | } |
804 | else { | 804 | else { |
805 | RESERVE_SPACE(20); | 805 | RESERVE_SPACE(NFS4_STATEID_SIZE+4); |
806 | WRITEMEM(args->lock_stateid->data, sizeof(args->lock_stateid->data)); | 806 | WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE); |
807 | WRITE32(args->lock_seqid->sequence->counter); | 807 | WRITE32(args->lock_seqid->sequence->counter); |
808 | } | 808 | } |
809 | 809 | ||
@@ -830,11 +830,11 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *arg | |||
830 | { | 830 | { |
831 | __be32 *p; | 831 | __be32 *p; |
832 | 832 | ||
833 | RESERVE_SPACE(44); | 833 | RESERVE_SPACE(12+NFS4_STATEID_SIZE+16); |
834 | WRITE32(OP_LOCKU); | 834 | WRITE32(OP_LOCKU); |
835 | WRITE32(nfs4_lock_type(args->fl, 0)); | 835 | WRITE32(nfs4_lock_type(args->fl, 0)); |
836 | WRITE32(args->seqid->sequence->counter); | 836 | WRITE32(args->seqid->sequence->counter); |
837 | WRITEMEM(args->stateid->data, sizeof(args->stateid->data)); | 837 | WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE); |
838 | WRITE64(args->fl->fl_start); | 838 | WRITE64(args->fl->fl_start); |
839 | WRITE64(nfs4_lock_length(args->fl)); | 839 | WRITE64(nfs4_lock_length(args->fl)); |
840 | 840 | ||
@@ -966,9 +966,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc | |||
966 | { | 966 | { |
967 | __be32 *p; | 967 | __be32 *p; |
968 | 968 | ||
969 | RESERVE_SPACE(4+sizeof(stateid->data)); | 969 | RESERVE_SPACE(4+NFS4_STATEID_SIZE); |
970 | WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR); | 970 | WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR); |
971 | WRITEMEM(stateid->data, sizeof(stateid->data)); | 971 | WRITEMEM(stateid->data, NFS4_STATEID_SIZE); |
972 | encode_string(xdr, name->len, name->name); | 972 | encode_string(xdr, name->len, name->name); |
973 | } | 973 | } |
974 | 974 | ||
@@ -996,9 +996,9 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con | |||
996 | { | 996 | { |
997 | __be32 *p; | 997 | __be32 *p; |
998 | 998 | ||
999 | RESERVE_SPACE(8+sizeof(arg->stateid->data)); | 999 | RESERVE_SPACE(4+NFS4_STATEID_SIZE+4); |
1000 | WRITE32(OP_OPEN_CONFIRM); | 1000 | WRITE32(OP_OPEN_CONFIRM); |
1001 | WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); | 1001 | WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); |
1002 | WRITE32(arg->seqid->sequence->counter); | 1002 | WRITE32(arg->seqid->sequence->counter); |
1003 | 1003 | ||
1004 | return 0; | 1004 | return 0; |
@@ -1008,9 +1008,9 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea | |||
1008 | { | 1008 | { |
1009 | __be32 *p; | 1009 | __be32 *p; |
1010 | 1010 | ||
1011 | RESERVE_SPACE(8+sizeof(arg->stateid->data)); | 1011 | RESERVE_SPACE(4+NFS4_STATEID_SIZE+4); |
1012 | WRITE32(OP_OPEN_DOWNGRADE); | 1012 | WRITE32(OP_OPEN_DOWNGRADE); |
1013 | WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); | 1013 | WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); |
1014 | WRITE32(arg->seqid->sequence->counter); | 1014 | WRITE32(arg->seqid->sequence->counter); |
1015 | encode_share_access(xdr, arg->open_flags); | 1015 | encode_share_access(xdr, arg->open_flags); |
1016 | return 0; | 1016 | return 0; |
@@ -1045,12 +1045,12 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context | |||
1045 | nfs4_stateid stateid; | 1045 | nfs4_stateid stateid; |
1046 | __be32 *p; | 1046 | __be32 *p; |
1047 | 1047 | ||
1048 | RESERVE_SPACE(16); | 1048 | RESERVE_SPACE(NFS4_STATEID_SIZE); |
1049 | if (ctx->state != NULL) { | 1049 | if (ctx->state != NULL) { |
1050 | nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); | 1050 | nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); |
1051 | WRITEMEM(stateid.data, sizeof(stateid.data)); | 1051 | WRITEMEM(stateid.data, NFS4_STATEID_SIZE); |
1052 | } else | 1052 | } else |
1053 | WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); | 1053 | WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) | 1056 | static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) |
@@ -1079,10 +1079,10 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg | |||
1079 | int replen; | 1079 | int replen; |
1080 | __be32 *p; | 1080 | __be32 *p; |
1081 | 1081 | ||
1082 | RESERVE_SPACE(32+sizeof(nfs4_verifier)); | 1082 | RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20); |
1083 | WRITE32(OP_READDIR); | 1083 | WRITE32(OP_READDIR); |
1084 | WRITE64(readdir->cookie); | 1084 | WRITE64(readdir->cookie); |
1085 | WRITEMEM(readdir->verifier.data, sizeof(readdir->verifier.data)); | 1085 | WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE); |
1086 | WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ | 1086 | WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ |
1087 | WRITE32(readdir->count); | 1087 | WRITE32(readdir->count); |
1088 | WRITE32(2); | 1088 | WRITE32(2); |
@@ -1190,9 +1190,9 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg) | |||
1190 | { | 1190 | { |
1191 | __be32 *p; | 1191 | __be32 *p; |
1192 | 1192 | ||
1193 | RESERVE_SPACE(4+sizeof(zero_stateid.data)); | 1193 | RESERVE_SPACE(4+NFS4_STATEID_SIZE); |
1194 | WRITE32(OP_SETATTR); | 1194 | WRITE32(OP_SETATTR); |
1195 | WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data)); | 1195 | WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); |
1196 | RESERVE_SPACE(2*4); | 1196 | RESERVE_SPACE(2*4); |
1197 | WRITE32(1); | 1197 | WRITE32(1); |
1198 | WRITE32(FATTR4_WORD0_ACL); | 1198 | WRITE32(FATTR4_WORD0_ACL); |
@@ -1220,9 +1220,9 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs * | |||
1220 | int status; | 1220 | int status; |
1221 | __be32 *p; | 1221 | __be32 *p; |
1222 | 1222 | ||
1223 | RESERVE_SPACE(4+sizeof(arg->stateid.data)); | 1223 | RESERVE_SPACE(4+NFS4_STATEID_SIZE); |
1224 | WRITE32(OP_SETATTR); | 1224 | WRITE32(OP_SETATTR); |
1225 | WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); | 1225 | WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); |
1226 | 1226 | ||
1227 | if ((status = encode_attrs(xdr, arg->iap, server))) | 1227 | if ((status = encode_attrs(xdr, arg->iap, server))) |
1228 | return status; | 1228 | return status; |
@@ -1234,9 +1234,9 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien | |||
1234 | { | 1234 | { |
1235 | __be32 *p; | 1235 | __be32 *p; |
1236 | 1236 | ||
1237 | RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data)); | 1237 | RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE); |
1238 | WRITE32(OP_SETCLIENTID); | 1238 | WRITE32(OP_SETCLIENTID); |
1239 | WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data)); | 1239 | WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE); |
1240 | 1240 | ||
1241 | encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); | 1241 | encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); |
1242 | RESERVE_SPACE(4); | 1242 | RESERVE_SPACE(4); |
@@ -1253,10 +1253,10 @@ static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_c | |||
1253 | { | 1253 | { |
1254 | __be32 *p; | 1254 | __be32 *p; |
1255 | 1255 | ||
1256 | RESERVE_SPACE(12 + sizeof(client_state->cl_confirm.data)); | 1256 | RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE); |
1257 | WRITE32(OP_SETCLIENTID_CONFIRM); | 1257 | WRITE32(OP_SETCLIENTID_CONFIRM); |
1258 | WRITE64(client_state->cl_clientid); | 1258 | WRITE64(client_state->cl_clientid); |
1259 | WRITEMEM(client_state->cl_confirm.data, sizeof(client_state->cl_confirm.data)); | 1259 | WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); |
1260 | 1260 | ||
1261 | return 0; | 1261 | return 0; |
1262 | } | 1262 | } |
@@ -1284,10 +1284,10 @@ static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *statei | |||
1284 | { | 1284 | { |
1285 | __be32 *p; | 1285 | __be32 *p; |
1286 | 1286 | ||
1287 | RESERVE_SPACE(20); | 1287 | RESERVE_SPACE(4+NFS4_STATEID_SIZE); |
1288 | 1288 | ||
1289 | WRITE32(OP_DELEGRETURN); | 1289 | WRITE32(OP_DELEGRETURN); |
1290 | WRITEMEM(stateid->data, sizeof(stateid->data)); | 1290 | WRITEMEM(stateid->data, NFS4_STATEID_SIZE); |
1291 | return 0; | 1291 | return 0; |
1292 | 1292 | ||
1293 | } | 1293 | } |
@@ -2494,7 +2494,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st | |||
2494 | int i; | 2494 | int i; |
2495 | dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations); | 2495 | dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations); |
2496 | for (i = loc->nservers; i < m; i++) { | 2496 | for (i = loc->nservers; i < m; i++) { |
2497 | int len; | 2497 | unsigned int len; |
2498 | char *data; | 2498 | char *data; |
2499 | status = decode_opaque_inline(xdr, &len, &data); | 2499 | status = decode_opaque_inline(xdr, &len, &data); |
2500 | if (unlikely(status != 0)) | 2500 | if (unlikely(status != 0)) |
@@ -2642,7 +2642,7 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t | |||
2642 | return 0; | 2642 | return 0; |
2643 | } | 2643 | } |
2644 | 2644 | ||
2645 | static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *uid) | 2645 | static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid) |
2646 | { | 2646 | { |
2647 | uint32_t len; | 2647 | uint32_t len; |
2648 | __be32 *p; | 2648 | __be32 *p; |
@@ -2667,7 +2667,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf | |||
2667 | return 0; | 2667 | return 0; |
2668 | } | 2668 | } |
2669 | 2669 | ||
2670 | static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *gid) | 2670 | static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid) |
2671 | { | 2671 | { |
2672 | uint32_t len; | 2672 | uint32_t len; |
2673 | __be32 *p; | 2673 | __be32 *p; |
@@ -2897,8 +2897,8 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) | |||
2897 | status = decode_op_hdr(xdr, OP_CLOSE); | 2897 | status = decode_op_hdr(xdr, OP_CLOSE); |
2898 | if (status) | 2898 | if (status) |
2899 | return status; | 2899 | return status; |
2900 | READ_BUF(sizeof(res->stateid.data)); | 2900 | READ_BUF(NFS4_STATEID_SIZE); |
2901 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 2901 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
2902 | return 0; | 2902 | return 0; |
2903 | } | 2903 | } |
2904 | 2904 | ||
@@ -3186,8 +3186,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res) | |||
3186 | 3186 | ||
3187 | status = decode_op_hdr(xdr, OP_LOCK); | 3187 | status = decode_op_hdr(xdr, OP_LOCK); |
3188 | if (status == 0) { | 3188 | if (status == 0) { |
3189 | READ_BUF(sizeof(res->stateid.data)); | 3189 | READ_BUF(NFS4_STATEID_SIZE); |
3190 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 3190 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
3191 | } else if (status == -NFS4ERR_DENIED) | 3191 | } else if (status == -NFS4ERR_DENIED) |
3192 | return decode_lock_denied(xdr, NULL); | 3192 | return decode_lock_denied(xdr, NULL); |
3193 | return status; | 3193 | return status; |
@@ -3209,8 +3209,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res) | |||
3209 | 3209 | ||
3210 | status = decode_op_hdr(xdr, OP_LOCKU); | 3210 | status = decode_op_hdr(xdr, OP_LOCKU); |
3211 | if (status == 0) { | 3211 | if (status == 0) { |
3212 | READ_BUF(sizeof(res->stateid.data)); | 3212 | READ_BUF(NFS4_STATEID_SIZE); |
3213 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 3213 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
3214 | } | 3214 | } |
3215 | return status; | 3215 | return status; |
3216 | } | 3216 | } |
@@ -3251,8 +3251,8 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) | |||
3251 | res->delegation_type = 0; | 3251 | res->delegation_type = 0; |
3252 | return 0; | 3252 | return 0; |
3253 | } | 3253 | } |
3254 | READ_BUF(20); | 3254 | READ_BUF(NFS4_STATEID_SIZE+4); |
3255 | COPYMEM(res->delegation.data, sizeof(res->delegation.data)); | 3255 | COPYMEM(res->delegation.data, NFS4_STATEID_SIZE); |
3256 | READ32(res->do_recall); | 3256 | READ32(res->do_recall); |
3257 | switch (delegation_type) { | 3257 | switch (delegation_type) { |
3258 | case NFS4_OPEN_DELEGATE_READ: | 3258 | case NFS4_OPEN_DELEGATE_READ: |
@@ -3275,8 +3275,8 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) | |||
3275 | status = decode_op_hdr(xdr, OP_OPEN); | 3275 | status = decode_op_hdr(xdr, OP_OPEN); |
3276 | if (status) | 3276 | if (status) |
3277 | return status; | 3277 | return status; |
3278 | READ_BUF(sizeof(res->stateid.data)); | 3278 | READ_BUF(NFS4_STATEID_SIZE); |
3279 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 3279 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
3280 | 3280 | ||
3281 | decode_change_info(xdr, &res->cinfo); | 3281 | decode_change_info(xdr, &res->cinfo); |
3282 | 3282 | ||
@@ -3302,8 +3302,8 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre | |||
3302 | status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); | 3302 | status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); |
3303 | if (status) | 3303 | if (status) |
3304 | return status; | 3304 | return status; |
3305 | READ_BUF(sizeof(res->stateid.data)); | 3305 | READ_BUF(NFS4_STATEID_SIZE); |
3306 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 3306 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
3307 | return 0; | 3307 | return 0; |
3308 | } | 3308 | } |
3309 | 3309 | ||
@@ -3315,8 +3315,8 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re | |||
3315 | status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); | 3315 | status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); |
3316 | if (status) | 3316 | if (status) |
3317 | return status; | 3317 | return status; |
3318 | READ_BUF(sizeof(res->stateid.data)); | 3318 | READ_BUF(NFS4_STATEID_SIZE); |
3319 | COPYMEM(res->stateid.data, sizeof(res->stateid.data)); | 3319 | COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); |
3320 | return 0; | 3320 | return 0; |
3321 | } | 3321 | } |
3322 | 3322 | ||
@@ -3590,9 +3590,9 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp) | |||
3590 | } | 3590 | } |
3591 | READ32(nfserr); | 3591 | READ32(nfserr); |
3592 | if (nfserr == NFS_OK) { | 3592 | if (nfserr == NFS_OK) { |
3593 | READ_BUF(8 + sizeof(clp->cl_confirm.data)); | 3593 | READ_BUF(8 + NFS4_VERIFIER_SIZE); |
3594 | READ64(clp->cl_clientid); | 3594 | READ64(clp->cl_clientid); |
3595 | COPYMEM(clp->cl_confirm.data, sizeof(clp->cl_confirm.data)); | 3595 | COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE); |
3596 | } else if (nfserr == NFSERR_CLID_INUSE) { | 3596 | } else if (nfserr == NFSERR_CLID_INUSE) { |
3597 | uint32_t len; | 3597 | uint32_t len; |
3598 | 3598 | ||
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 9a55807b2a7..7bd7cb95c03 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data) | |||
79 | static | 79 | static |
80 | int nfs_return_empty_page(struct page *page) | 80 | int nfs_return_empty_page(struct page *page) |
81 | { | 81 | { |
82 | memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE); | 82 | zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0); |
83 | SetPageUptodate(page); | 83 | SetPageUptodate(page); |
84 | unlock_page(page); | 84 | unlock_page(page); |
85 | return 0; | 85 | return 0; |
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | |||
103 | pglen = PAGE_CACHE_SIZE - base; | 103 | pglen = PAGE_CACHE_SIZE - base; |
104 | for (;;) { | 104 | for (;;) { |
105 | if (remainder <= pglen) { | 105 | if (remainder <= pglen) { |
106 | memclear_highpage_flush(*pages, base, remainder); | 106 | zero_user_page(*pages, base, remainder, KM_USER0); |
107 | break; | 107 | break; |
108 | } | 108 | } |
109 | memclear_highpage_flush(*pages, base, pglen); | 109 | zero_user_page(*pages, base, pglen, KM_USER0); |
110 | pages++; | 110 | pages++; |
111 | remainder -= pglen; | 111 | remainder -= pglen; |
112 | pglen = PAGE_CACHE_SIZE; | 112 | pglen = PAGE_CACHE_SIZE; |
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | |||
130 | return PTR_ERR(new); | 130 | return PTR_ERR(new); |
131 | } | 131 | } |
132 | if (len < PAGE_CACHE_SIZE) | 132 | if (len < PAGE_CACHE_SIZE) |
133 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 133 | zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); |
134 | 134 | ||
135 | nfs_list_add_request(new, &one_request); | 135 | nfs_list_add_request(new, &one_request); |
136 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) | 136 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) |
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page) | |||
532 | return PTR_ERR(new); | 532 | return PTR_ERR(new); |
533 | } | 533 | } |
534 | if (len < PAGE_CACHE_SIZE) | 534 | if (len < PAGE_CACHE_SIZE) |
535 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 535 | zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0); |
536 | nfs_pageio_add_request(desc->pgio, new); | 536 | nfs_pageio_add_request(desc->pgio, new); |
537 | return 0; | 537 | return 0; |
538 | } | 538 | } |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index de92b9509d9..b084c03ce49 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -58,7 +58,7 @@ struct nfs_write_data *nfs_commit_alloc(void) | |||
58 | return p; | 58 | return p; |
59 | } | 59 | } |
60 | 60 | ||
61 | void nfs_commit_rcu_free(struct rcu_head *head) | 61 | static void nfs_commit_rcu_free(struct rcu_head *head) |
62 | { | 62 | { |
63 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); | 63 | struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu); |
64 | if (p && (p->pagevec != &p->page_array[0])) | 64 | if (p && (p->pagevec != &p->page_array[0])) |
@@ -168,7 +168,7 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int | |||
168 | if (count != nfs_page_length(page)) | 168 | if (count != nfs_page_length(page)) |
169 | return; | 169 | return; |
170 | if (count != PAGE_CACHE_SIZE) | 170 | if (count != PAGE_CACHE_SIZE) |
171 | memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count); | 171 | zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0); |
172 | SetPageUptodate(page); | 172 | SetPageUptodate(page); |
173 | } | 173 | } |
174 | 174 | ||
@@ -922,7 +922,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i | |||
922 | return 0; | 922 | return 0; |
923 | out_bad: | 923 | out_bad: |
924 | while (!list_empty(head)) { | 924 | while (!list_empty(head)) { |
925 | struct nfs_page *req = nfs_list_entry(head->next); | 925 | req = nfs_list_entry(head->next); |
926 | nfs_list_remove_request(req); | 926 | nfs_list_remove_request(req); |
927 | nfs_redirty_request(req); | 927 | nfs_redirty_request(req); |
928 | nfs_end_page_writeback(req->wb_page); | 928 | nfs_end_page_writeback(req->wb_page); |
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 21d834e5ed7..4566b918255 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c | |||
@@ -3085,8 +3085,7 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep, | |||
3085 | { | 3085 | { |
3086 | ntfs_inode *ni = (ntfs_inode *)foo; | 3086 | ntfs_inode *ni = (ntfs_inode *)foo; |
3087 | 3087 | ||
3088 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 3088 | inode_init_once(VFS_I(ni)); |
3089 | inode_init_once(VFS_I(ni)); | ||
3090 | } | 3089 | } |
3091 | 3090 | ||
3092 | /* | 3091 | /* |
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 5671cf9d638..fd8cb1badc9 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c | |||
@@ -262,12 +262,10 @@ static void dlmfs_init_once(void *foo, | |||
262 | struct dlmfs_inode_private *ip = | 262 | struct dlmfs_inode_private *ip = |
263 | (struct dlmfs_inode_private *) foo; | 263 | (struct dlmfs_inode_private *) foo; |
264 | 264 | ||
265 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 265 | ip->ip_dlm = NULL; |
266 | ip->ip_dlm = NULL; | 266 | ip->ip_parent = NULL; |
267 | ip->ip_parent = NULL; | ||
268 | 267 | ||
269 | inode_init_once(&ip->ip_vfs_inode); | 268 | inode_init_once(&ip->ip_vfs_inode); |
270 | } | ||
271 | } | 269 | } |
272 | 270 | ||
273 | static struct inode *dlmfs_alloc_inode(struct super_block *sb) | 271 | static struct inode *dlmfs_alloc_inode(struct super_block *sb) |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 7c5e3f5d663..86b559c7dce 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -937,31 +937,29 @@ static void ocfs2_inode_init_once(void *data, | |||
937 | { | 937 | { |
938 | struct ocfs2_inode_info *oi = data; | 938 | struct ocfs2_inode_info *oi = data; |
939 | 939 | ||
940 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 940 | oi->ip_flags = 0; |
941 | oi->ip_flags = 0; | 941 | oi->ip_open_count = 0; |
942 | oi->ip_open_count = 0; | 942 | spin_lock_init(&oi->ip_lock); |
943 | spin_lock_init(&oi->ip_lock); | 943 | ocfs2_extent_map_init(&oi->vfs_inode); |
944 | ocfs2_extent_map_init(&oi->vfs_inode); | 944 | INIT_LIST_HEAD(&oi->ip_io_markers); |
945 | INIT_LIST_HEAD(&oi->ip_io_markers); | 945 | oi->ip_created_trans = 0; |
946 | oi->ip_created_trans = 0; | 946 | oi->ip_last_trans = 0; |
947 | oi->ip_last_trans = 0; | 947 | oi->ip_dir_start_lookup = 0; |
948 | oi->ip_dir_start_lookup = 0; | ||
949 | 948 | ||
950 | init_rwsem(&oi->ip_alloc_sem); | 949 | init_rwsem(&oi->ip_alloc_sem); |
951 | mutex_init(&oi->ip_io_mutex); | 950 | mutex_init(&oi->ip_io_mutex); |
952 | 951 | ||
953 | oi->ip_blkno = 0ULL; | 952 | oi->ip_blkno = 0ULL; |
954 | oi->ip_clusters = 0; | 953 | oi->ip_clusters = 0; |
955 | 954 | ||
956 | ocfs2_lock_res_init_once(&oi->ip_rw_lockres); | 955 | ocfs2_lock_res_init_once(&oi->ip_rw_lockres); |
957 | ocfs2_lock_res_init_once(&oi->ip_meta_lockres); | 956 | ocfs2_lock_res_init_once(&oi->ip_meta_lockres); |
958 | ocfs2_lock_res_init_once(&oi->ip_data_lockres); | 957 | ocfs2_lock_res_init_once(&oi->ip_data_lockres); |
959 | ocfs2_lock_res_init_once(&oi->ip_open_lockres); | 958 | ocfs2_lock_res_init_once(&oi->ip_open_lockres); |
960 | 959 | ||
961 | ocfs2_metadata_cache_init(&oi->vfs_inode); | 960 | ocfs2_metadata_cache_init(&oi->vfs_inode); |
962 | 961 | ||
963 | inode_init_once(&oi->vfs_inode); | 962 | inode_init_once(&oi->vfs_inode); |
964 | } | ||
965 | } | 963 | } |
966 | 964 | ||
967 | static int ocfs2_initialize_mem_caches(void) | 965 | static int ocfs2_initialize_mem_caches(void) |
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index 731a90e9f0c..e62397341c3 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c | |||
@@ -419,8 +419,7 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned | |||
419 | { | 419 | { |
420 | struct op_inode_info *oi = (struct op_inode_info *) data; | 420 | struct op_inode_info *oi = (struct op_inode_info *) data; |
421 | 421 | ||
422 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 422 | inode_init_once(&oi->vfs_inode); |
423 | inode_init_once(&oi->vfs_inode); | ||
424 | } | 423 | } |
425 | 424 | ||
426 | static int __init init_openprom_fs(void) | 425 | static int __init init_openprom_fs(void) |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index b8171907c83..d5ce65c68d7 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -109,8 +109,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
109 | { | 109 | { |
110 | struct proc_inode *ei = (struct proc_inode *) foo; | 110 | struct proc_inode *ei = (struct proc_inode *) foo; |
111 | 111 | ||
112 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 112 | inode_init_once(&ei->vfs_inode); |
113 | inode_init_once(&ei->vfs_inode); | ||
114 | } | 113 | } |
115 | 114 | ||
116 | int __init proc_init_inodecache(void) | 115 | int __init proc_init_inodecache(void) |
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 75fc8498f2e..8d256eb1181 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c | |||
@@ -536,8 +536,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, | |||
536 | { | 536 | { |
537 | struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; | 537 | struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; |
538 | 538 | ||
539 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 539 | inode_init_once(&ei->vfs_inode); |
540 | inode_init_once(&ei->vfs_inode); | ||
541 | } | 540 | } |
542 | 541 | ||
543 | static int init_inodecache(void) | 542 | static int init_inodecache(void) |
diff --git a/fs/quota.c b/fs/quota.c index e9d88fd0eca..9f237d6182c 100644 --- a/fs/quota.c +++ b/fs/quota.c | |||
@@ -157,7 +157,6 @@ static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t | |||
157 | static void quota_sync_sb(struct super_block *sb, int type) | 157 | static void quota_sync_sb(struct super_block *sb, int type) |
158 | { | 158 | { |
159 | int cnt; | 159 | int cnt; |
160 | struct inode *discard[MAXQUOTAS]; | ||
161 | 160 | ||
162 | sb->s_qcop->quota_sync(sb, type); | 161 | sb->s_qcop->quota_sync(sb, type); |
163 | /* This is not very clever (and fast) but currently I don't know about | 162 | /* This is not very clever (and fast) but currently I don't know about |
@@ -167,29 +166,21 @@ static void quota_sync_sb(struct super_block *sb, int type) | |||
167 | sb->s_op->sync_fs(sb, 1); | 166 | sb->s_op->sync_fs(sb, 1); |
168 | sync_blockdev(sb->s_bdev); | 167 | sync_blockdev(sb->s_bdev); |
169 | 168 | ||
170 | /* Now when everything is written we can discard the pagecache so | 169 | /* |
171 | * that userspace sees the changes. We need i_mutex and so we could | 170 | * Now when everything is written we can discard the pagecache so |
172 | * not do it inside dqonoff_mutex. Moreover we need to be carefull | 171 | * that userspace sees the changes. |
173 | * about races with quotaoff() (that is the reason why we have own | 172 | */ |
174 | * reference to inode). */ | ||
175 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | 173 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); |
176 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 174 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
177 | discard[cnt] = NULL; | ||
178 | if (type != -1 && cnt != type) | 175 | if (type != -1 && cnt != type) |
179 | continue; | 176 | continue; |
180 | if (!sb_has_quota_enabled(sb, cnt)) | 177 | if (!sb_has_quota_enabled(sb, cnt)) |
181 | continue; | 178 | continue; |
182 | discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); | 179 | mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); |
180 | truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); | ||
181 | mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); | ||
183 | } | 182 | } |
184 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | 183 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); |
185 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
186 | if (discard[cnt]) { | ||
187 | mutex_lock(&discard[cnt]->i_mutex); | ||
188 | truncate_inode_pages(&discard[cnt]->i_data, 0); | ||
189 | mutex_unlock(&discard[cnt]->i_mutex); | ||
190 | iput(discard[cnt]); | ||
191 | } | ||
192 | } | ||
193 | } | 184 | } |
194 | 185 | ||
195 | void sync_dquots(struct super_block *sb, int type) | 186 | void sync_dquots(struct super_block *sb, int type) |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index c7762140c42..b4ac9119200 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -511,14 +511,12 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags | |||
511 | { | 511 | { |
512 | struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; | 512 | struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; |
513 | 513 | ||
514 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 514 | INIT_LIST_HEAD(&ei->i_prealloc_list); |
515 | INIT_LIST_HEAD(&ei->i_prealloc_list); | 515 | inode_init_once(&ei->vfs_inode); |
516 | inode_init_once(&ei->vfs_inode); | ||
517 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL | 516 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL |
518 | ei->i_acl_access = NULL; | 517 | ei->i_acl_access = NULL; |
519 | ei->i_acl_default = NULL; | 518 | ei->i_acl_default = NULL; |
520 | #endif | 519 | #endif |
521 | } | ||
522 | } | 520 | } |
523 | 521 | ||
524 | static int init_inodecache(void) | 522 | static int init_inodecache(void) |
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index 80428519027..2284e03342c 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c | |||
@@ -566,12 +566,11 @@ static void romfs_destroy_inode(struct inode *inode) | |||
566 | kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); | 566 | kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); |
567 | } | 567 | } |
568 | 568 | ||
569 | static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) | 569 | static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) |
570 | { | 570 | { |
571 | struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; | 571 | struct romfs_inode_info *ei = foo; |
572 | 572 | ||
573 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 573 | inode_init_once(&ei->vfs_inode); |
574 | inode_init_once(&ei->vfs_inode); | ||
575 | } | 574 | } |
576 | 575 | ||
577 | static int init_inodecache(void) | 576 | static int init_inodecache(void) |
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 424a3ddf86d..5c9243a23b9 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c | |||
@@ -70,8 +70,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
70 | { | 70 | { |
71 | struct smb_inode_info *ei = (struct smb_inode_info *) foo; | 71 | struct smb_inode_info *ei = (struct smb_inode_info *) foo; |
72 | 72 | ||
73 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 73 | inode_init_once(&ei->vfs_inode); |
74 | inode_init_once(&ei->vfs_inode); | ||
75 | } | 74 | } |
76 | 75 | ||
77 | static int init_inodecache(void) | 76 | static int init_inodecache(void) |
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 3152d741560..56441169339 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
@@ -322,8 +322,7 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags) | |||
322 | { | 322 | { |
323 | struct sysv_inode_info *si = (struct sysv_inode_info *)p; | 323 | struct sysv_inode_info *si = (struct sysv_inode_info *)p; |
324 | 324 | ||
325 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 325 | inode_init_once(&si->vfs_inode); |
326 | inode_init_once(&si->vfs_inode); | ||
327 | } | 326 | } |
328 | 327 | ||
329 | const struct super_operations sysv_sops = { | 328 | const struct super_operations sysv_sops = { |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 9b8644a06e5..3a743d854c1 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -134,10 +134,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
134 | { | 134 | { |
135 | struct udf_inode_info *ei = (struct udf_inode_info *) foo; | 135 | struct udf_inode_info *ei = (struct udf_inode_info *) foo; |
136 | 136 | ||
137 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 137 | ei->i_ext.i_data = NULL; |
138 | ei->i_ext.i_data = NULL; | 138 | inode_init_once(&ei->vfs_inode); |
139 | inode_init_once(&ei->vfs_inode); | ||
140 | } | ||
141 | } | 139 | } |
142 | 140 | ||
143 | static int init_inodecache(void) | 141 | static int init_inodecache(void) |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index be7c48c5f20..22ff6ed55ce 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -1237,8 +1237,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
1237 | { | 1237 | { |
1238 | struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; | 1238 | struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; |
1239 | 1239 | ||
1240 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 1240 | inode_init_once(&ei->vfs_inode); |
1241 | inode_init_once(&ei->vfs_inode); | ||
1242 | } | 1241 | } |
1243 | 1242 | ||
1244 | static int init_inodecache(void) | 1243 | static int init_inodecache(void) |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 14e2cbe5a8d..bf9a9d5909b 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -360,8 +360,7 @@ xfs_fs_inode_init_once( | |||
360 | kmem_zone_t *zonep, | 360 | kmem_zone_t *zonep, |
361 | unsigned long flags) | 361 | unsigned long flags) |
362 | { | 362 | { |
363 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 363 | inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); |
364 | inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); | ||
365 | } | 364 | } |
366 | 365 | ||
367 | STATIC int | 366 | STATIC int |
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index f9d2bde9a7b..b62cd36ff32 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h | |||
@@ -11,11 +11,8 @@ | |||
11 | #define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ | 11 | #define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]; | 14 | extern int pxm_to_node(int); |
15 | extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]; | 15 | extern int node_to_pxm(int); |
16 | |||
17 | extern int __cpuinit pxm_to_node(int); | ||
18 | extern int __cpuinit node_to_pxm(int); | ||
19 | extern int __cpuinit acpi_map_pxm_to_node(int); | 16 | extern int __cpuinit acpi_map_pxm_to_node(int); |
20 | extern void __cpuinit acpi_unmap_pxm_to_node(int); | 17 | extern void __cpuinit acpi_unmap_pxm_to_node(int); |
21 | 18 | ||
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 2d956cd566a..e1a708337be 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -17,6 +17,8 @@ struct pt_regs; | |||
17 | 17 | ||
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | 19 | ||
20 | #define CORENAME_MAX_SIZE 128 | ||
21 | |||
20 | /* | 22 | /* |
21 | * This structure is used to hold the arguments that are used when loading binaries. | 23 | * This structure is used to hold the arguments that are used when loading binaries. |
22 | */ | 24 | */ |
diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h index bda23e00ed7..e576b848ce1 100644 --- a/include/linux/kmalloc_sizes.h +++ b/include/linux/kmalloc_sizes.h | |||
@@ -19,17 +19,27 @@ | |||
19 | CACHE(32768) | 19 | CACHE(32768) |
20 | CACHE(65536) | 20 | CACHE(65536) |
21 | CACHE(131072) | 21 | CACHE(131072) |
22 | #if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) | 22 | #if KMALLOC_MAX_SIZE >= 262144 |
23 | CACHE(262144) | 23 | CACHE(262144) |
24 | #endif | 24 | #endif |
25 | #ifndef CONFIG_MMU | 25 | #if KMALLOC_MAX_SIZE >= 524288 |
26 | CACHE(524288) | 26 | CACHE(524288) |
27 | #endif | ||
28 | #if KMALLOC_MAX_SIZE >= 1048576 | ||
27 | CACHE(1048576) | 29 | CACHE(1048576) |
28 | #ifdef CONFIG_LARGE_ALLOCS | 30 | #endif |
31 | #if KMALLOC_MAX_SIZE >= 2097152 | ||
29 | CACHE(2097152) | 32 | CACHE(2097152) |
33 | #endif | ||
34 | #if KMALLOC_MAX_SIZE >= 4194304 | ||
30 | CACHE(4194304) | 35 | CACHE(4194304) |
36 | #endif | ||
37 | #if KMALLOC_MAX_SIZE >= 8388608 | ||
31 | CACHE(8388608) | 38 | CACHE(8388608) |
39 | #endif | ||
40 | #if KMALLOC_MAX_SIZE >= 16777216 | ||
32 | CACHE(16777216) | 41 | CACHE(16777216) |
42 | #endif | ||
43 | #if KMALLOC_MAX_SIZE >= 33554432 | ||
33 | CACHE(33554432) | 44 | CACHE(33554432) |
34 | #endif /* CONFIG_LARGE_ALLOCS */ | 45 | #endif |
35 | #endif /* CONFIG_MMU */ | ||
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index dd12b4c9e61..12bfe09de2b 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h | |||
@@ -42,5 +42,6 @@ int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *); | |||
42 | int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); | 42 | int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *); |
43 | int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); | 43 | int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *); |
44 | */ | 44 | */ |
45 | extern struct rpc_version nlm_version4; | ||
45 | 46 | ||
46 | #endif /* LOCKD_XDR4_H */ | 47 | #endif /* LOCKD_XDR4_H */ |
diff --git a/include/linux/mii.h b/include/linux/mii.h index beddc6d3b0f..151b7e0182c 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
@@ -56,8 +56,8 @@ | |||
56 | #define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ | 56 | #define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ |
57 | #define BMSR_RESV 0x00c0 /* Unused... */ | 57 | #define BMSR_RESV 0x00c0 /* Unused... */ |
58 | #define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ | 58 | #define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ |
59 | #define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */ | 59 | #define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */ |
60 | #define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */ | 60 | #define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */ |
61 | #define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ | 61 | #define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ |
62 | #define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ | 62 | #define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ |
63 | #define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ | 63 | #define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 1be5be88deb..7e7f33a38fc 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | #define NFS4_VERIFIER_SIZE 8 | 18 | #define NFS4_VERIFIER_SIZE 8 |
19 | #define NFS4_STATEID_SIZE 16 | ||
19 | #define NFS4_FHSIZE 128 | 20 | #define NFS4_FHSIZE 128 |
20 | #define NFS4_MAXPATHLEN PATH_MAX | 21 | #define NFS4_MAXPATHLEN PATH_MAX |
21 | #define NFS4_MAXNAMLEN NAME_MAX | 22 | #define NFS4_MAXNAMLEN NAME_MAX |
@@ -113,7 +114,7 @@ struct nfs4_acl { | |||
113 | }; | 114 | }; |
114 | 115 | ||
115 | typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; | 116 | typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; |
116 | typedef struct { char data[16]; } nfs4_stateid; | 117 | typedef struct { char data[NFS4_STATEID_SIZE]; } nfs4_stateid; |
117 | 118 | ||
118 | enum nfs_opnum4 { | 119 | enum nfs_opnum4 { |
119 | OP_ACCESS = 3, | 120 | OP_ACCESS = 3, |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3b1fbf49fa7..62b3e008e64 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -471,6 +471,7 @@ | |||
471 | #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 | 471 | #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 |
472 | #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A | 472 | #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A |
473 | #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 | 473 | #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 |
474 | #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 | ||
474 | #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 | 475 | #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 |
475 | 476 | ||
476 | #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ | 477 | #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bdd277223af..97347f22fc2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon | |||
74 | void page_add_file_rmap(struct page *); | 74 | void page_add_file_rmap(struct page *); |
75 | void page_remove_rmap(struct page *, struct vm_area_struct *); | 75 | void page_remove_rmap(struct page *, struct vm_area_struct *); |
76 | 76 | ||
77 | /** | 77 | #ifdef CONFIG_DEBUG_VM |
78 | * page_dup_rmap - duplicate pte mapping to a page | 78 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); |
79 | * @page: the page to add the mapping to | 79 | #else |
80 | * | 80 | static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) |
81 | * For copy_page_range only: minimal extract from page_add_rmap, | ||
82 | * avoiding unnecessary tests (already checked) so it's quicker. | ||
83 | */ | ||
84 | static inline void page_dup_rmap(struct page *page) | ||
85 | { | 81 | { |
86 | atomic_inc(&page->_mapcount); | 82 | atomic_inc(&page->_mapcount); |
87 | } | 83 | } |
84 | #endif | ||
88 | 85 | ||
89 | /* | 86 | /* |
90 | * Called from mm/vmscan.c to handle paging out | 87 | * Called from mm/vmscan.c to handle paging out |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 71829efc40b..a015236cc57 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -32,9 +32,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated; | |||
32 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ | 32 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
33 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ | 33 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
34 | 34 | ||
35 | /* Flags passed to a constructor functions */ | ||
36 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ | ||
37 | |||
38 | /* | 35 | /* |
39 | * struct kmem_cache related prototypes | 36 | * struct kmem_cache related prototypes |
40 | */ | 37 | */ |
@@ -77,6 +74,21 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
77 | #endif | 74 | #endif |
78 | 75 | ||
79 | /* | 76 | /* |
77 | * The largest kmalloc size supported by the slab allocators is | ||
78 | * 32 megabyte (2^25) or the maximum allocatable page order if that is | ||
79 | * less than 32 MB. | ||
80 | * | ||
81 | * WARNING: Its not easy to increase this value since the allocators have | ||
82 | * to do various tricks to work around compiler limitations in order to | ||
83 | * ensure proper constant folding. | ||
84 | */ | ||
85 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ | ||
86 | (MAX_ORDER + PAGE_SHIFT) : 25) | ||
87 | |||
88 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) | ||
89 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) | ||
90 | |||
91 | /* | ||
80 | * Common kmalloc functions provided by all allocators | 92 | * Common kmalloc functions provided by all allocators |
81 | */ | 93 | */ |
82 | void *__kmalloc(size_t, gfp_t); | 94 | void *__kmalloc(size_t, gfp_t); |
@@ -233,9 +245,6 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
233 | 245 | ||
234 | #endif /* DEBUG_SLAB */ | 246 | #endif /* DEBUG_SLAB */ |
235 | 247 | ||
236 | extern const struct seq_operations slabinfo_op; | ||
237 | ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); | ||
238 | |||
239 | #endif /* __KERNEL__ */ | 248 | #endif /* __KERNEL__ */ |
240 | #endif /* _LINUX_SLAB_H */ | 249 | #endif /* _LINUX_SLAB_H */ |
241 | 250 | ||
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 5e4364644ed..8d81a60518e 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -109,4 +109,7 @@ found: | |||
109 | 109 | ||
110 | #endif /* CONFIG_NUMA */ | 110 | #endif /* CONFIG_NUMA */ |
111 | 111 | ||
112 | extern const struct seq_operations slabinfo_op; | ||
113 | ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); | ||
114 | |||
112 | #endif /* _LINUX_SLAB_DEF_H */ | 115 | #endif /* _LINUX_SLAB_DEF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index c6c1f4a120e..0764c829d96 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -40,7 +40,6 @@ struct kmem_cache { | |||
40 | int objects; /* Number of objects in slab */ | 40 | int objects; /* Number of objects in slab */ |
41 | int refcount; /* Refcount for slab cache destroy */ | 41 | int refcount; /* Refcount for slab cache destroy */ |
42 | void (*ctor)(void *, struct kmem_cache *, unsigned long); | 42 | void (*ctor)(void *, struct kmem_cache *, unsigned long); |
43 | void (*dtor)(void *, struct kmem_cache *, unsigned long); | ||
44 | int inuse; /* Offset to metadata */ | 43 | int inuse; /* Offset to metadata */ |
45 | int align; /* Alignment */ | 44 | int align; /* Alignment */ |
46 | const char *name; /* Name (only for display!) */ | 45 | const char *name; /* Name (only for display!) */ |
@@ -59,17 +58,6 @@ struct kmem_cache { | |||
59 | */ | 58 | */ |
60 | #define KMALLOC_SHIFT_LOW 3 | 59 | #define KMALLOC_SHIFT_LOW 3 |
61 | 60 | ||
62 | #ifdef CONFIG_LARGE_ALLOCS | ||
63 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \ | ||
64 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | ||
65 | #else | ||
66 | #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 | ||
67 | #define KMALLOC_SHIFT_HIGH 20 | ||
68 | #else | ||
69 | #define KMALLOC_SHIFT_HIGH 18 | ||
70 | #endif | ||
71 | #endif | ||
72 | |||
73 | /* | 61 | /* |
74 | * We keep the general caches in an array of slab caches that are used for | 62 | * We keep the general caches in an array of slab caches that are used for |
75 | * 2^x bytes of allocations. | 63 | * 2^x bytes of allocations. |
@@ -80,7 +68,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |||
80 | * Sorry that the following has to be that ugly but some versions of GCC | 68 | * Sorry that the following has to be that ugly but some versions of GCC |
81 | * have trouble with constant propagation and loops. | 69 | * have trouble with constant propagation and loops. |
82 | */ | 70 | */ |
83 | static inline int kmalloc_index(int size) | 71 | static inline int kmalloc_index(size_t size) |
84 | { | 72 | { |
85 | /* | 73 | /* |
86 | * We should return 0 if size == 0 but we use the smallest object | 74 | * We should return 0 if size == 0 but we use the smallest object |
@@ -88,7 +76,7 @@ static inline int kmalloc_index(int size) | |||
88 | */ | 76 | */ |
89 | WARN_ON_ONCE(size == 0); | 77 | WARN_ON_ONCE(size == 0); |
90 | 78 | ||
91 | if (size > (1 << KMALLOC_SHIFT_HIGH)) | 79 | if (size > KMALLOC_MAX_SIZE) |
92 | return -1; | 80 | return -1; |
93 | 81 | ||
94 | if (size > 64 && size <= 96) | 82 | if (size > 64 && size <= 96) |
@@ -111,17 +99,13 @@ static inline int kmalloc_index(int size) | |||
111 | if (size <= 64 * 1024) return 16; | 99 | if (size <= 64 * 1024) return 16; |
112 | if (size <= 128 * 1024) return 17; | 100 | if (size <= 128 * 1024) return 17; |
113 | if (size <= 256 * 1024) return 18; | 101 | if (size <= 256 * 1024) return 18; |
114 | #if KMALLOC_SHIFT_HIGH > 18 | ||
115 | if (size <= 512 * 1024) return 19; | 102 | if (size <= 512 * 1024) return 19; |
116 | if (size <= 1024 * 1024) return 20; | 103 | if (size <= 1024 * 1024) return 20; |
117 | #endif | ||
118 | #if KMALLOC_SHIFT_HIGH > 20 | ||
119 | if (size <= 2 * 1024 * 1024) return 21; | 104 | if (size <= 2 * 1024 * 1024) return 21; |
120 | if (size <= 4 * 1024 * 1024) return 22; | 105 | if (size <= 4 * 1024 * 1024) return 22; |
121 | if (size <= 8 * 1024 * 1024) return 23; | 106 | if (size <= 8 * 1024 * 1024) return 23; |
122 | if (size <= 16 * 1024 * 1024) return 24; | 107 | if (size <= 16 * 1024 * 1024) return 24; |
123 | if (size <= 32 * 1024 * 1024) return 25; | 108 | if (size <= 32 * 1024 * 1024) return 25; |
124 | #endif | ||
125 | return -1; | 109 | return -1; |
126 | 110 | ||
127 | /* | 111 | /* |
@@ -146,7 +130,12 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) | |||
146 | if (index == 0) | 130 | if (index == 0) |
147 | return NULL; | 131 | return NULL; |
148 | 132 | ||
149 | if (index < 0) { | 133 | /* |
134 | * This function only gets expanded if __builtin_constant_p(size), so | ||
135 | * testing it here shouldn't be needed. But some versions of gcc need | ||
136 | * help. | ||
137 | */ | ||
138 | if (__builtin_constant_p(size) && index < 0) { | ||
150 | /* | 139 | /* |
151 | * Generate a link failure. Would be great if we could | 140 | * Generate a link failure. Would be great if we could |
152 | * do something to stop the compile here. | 141 | * do something to stop the compile here. |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 3f70149eabb..96ac21f8dd7 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * Alan Cox. <alan@redhat.com> | 6 | * Alan Cox. <alan@redhat.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/errno.h> | ||
9 | 10 | ||
10 | extern void cpu_idle(void); | 11 | extern void cpu_idle(void); |
11 | 12 | ||
@@ -99,11 +100,9 @@ static inline void smp_send_reschedule(int cpu) { } | |||
99 | #define num_booting_cpus() 1 | 100 | #define num_booting_cpus() 1 |
100 | #define smp_prepare_boot_cpu() do {} while (0) | 101 | #define smp_prepare_boot_cpu() do {} while (0) |
101 | static inline int smp_call_function_single(int cpuid, void (*func) (void *info), | 102 | static inline int smp_call_function_single(int cpuid, void (*func) (void *info), |
102 | void *info, int retry, int wait) | 103 | void *info, int retry, int wait) |
103 | { | 104 | { |
104 | /* Disable interrupts here? */ | 105 | return -EBUSY; |
105 | func(info); | ||
106 | return 0; | ||
107 | } | 106 | } |
108 | 107 | ||
109 | #endif /* !SMP */ | 108 | #endif /* !SMP */ |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 4a68125b6de..ad293760f6e 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
@@ -47,6 +47,8 @@ extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct r | |||
47 | extern int rpc_unlink(struct dentry *); | 47 | extern int rpc_unlink(struct dentry *); |
48 | extern struct vfsmount *rpc_get_mount(void); | 48 | extern struct vfsmount *rpc_get_mount(void); |
49 | extern void rpc_put_mount(void); | 49 | extern void rpc_put_mount(void); |
50 | extern int register_rpc_pipefs(void); | ||
51 | extern void unregister_rpc_pipefs(void); | ||
50 | 52 | ||
51 | #endif | 53 | #endif |
52 | #endif | 54 | #endif |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index fa89ce6ce07..34f7590506f 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -244,6 +244,8 @@ void xprt_disconnect(struct rpc_xprt *xprt); | |||
244 | */ | 244 | */ |
245 | struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); | 245 | struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); |
246 | struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); | 246 | struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to); |
247 | int init_socket_xprt(void); | ||
248 | void cleanup_socket_xprt(void); | ||
247 | 249 | ||
248 | /* | 250 | /* |
249 | * Reserved bit positions in xprt->state | 251 | * Reserved bit positions in xprt->state |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d555f31c074..ce0719a2cfe 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -122,7 +122,7 @@ extern struct workqueue_struct *__create_workqueue(const char *name, | |||
122 | int singlethread, | 122 | int singlethread, |
123 | int freezeable); | 123 | int freezeable); |
124 | #define create_workqueue(name) __create_workqueue((name), 0, 0) | 124 | #define create_workqueue(name) __create_workqueue((name), 0, 0) |
125 | #define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1) | 125 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) |
126 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) | 126 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) |
127 | 127 | ||
128 | extern void destroy_workqueue(struct workqueue_struct *wq); | 128 | extern void destroy_workqueue(struct workqueue_struct *wq); |
@@ -160,7 +160,7 @@ static inline int cancel_delayed_work(struct delayed_work *work) | |||
160 | { | 160 | { |
161 | int ret; | 161 | int ret; |
162 | 162 | ||
163 | ret = del_timer(&work->timer); | 163 | ret = del_timer_sync(&work->timer); |
164 | if (ret) | 164 | if (ret) |
165 | work_clear_pending(&work->work); | 165 | work_clear_pending(&work->work); |
166 | return ret; | 166 | return ret; |
diff --git a/init/Kconfig b/init/Kconfig index 4e009fde4b6..a9e99f8328f 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -567,7 +567,6 @@ config SLAB | |||
567 | a slab allocator. | 567 | a slab allocator. |
568 | 568 | ||
569 | config SLUB | 569 | config SLUB |
570 | depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT | ||
571 | bool "SLUB (Unqueued Allocator)" | 570 | bool "SLUB (Unqueued Allocator)" |
572 | help | 571 | help |
573 | SLUB is a slab allocator that minimizes cache line usage | 572 | SLUB is a slab allocator that minimizes cache line usage |
@@ -577,14 +576,11 @@ config SLUB | |||
577 | and has enhanced diagnostics. | 576 | and has enhanced diagnostics. |
578 | 577 | ||
579 | config SLOB | 578 | config SLOB |
580 | # | 579 | depends on EMBEDDED && !SPARSEMEM |
581 | # SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported | ||
582 | # | ||
583 | depends on EMBEDDED && !SMP && !SPARSEMEM | ||
584 | bool "SLOB (Simple Allocator)" | 580 | bool "SLOB (Simple Allocator)" |
585 | help | 581 | help |
586 | SLOB replaces the SLAB allocator with a drastically simpler | 582 | SLOB replaces the SLAB allocator with a drastically simpler |
587 | allocator. SLOB is more space efficient that SLAB but does not | 583 | allocator. SLOB is more space efficient than SLAB but does not |
588 | scale well (single lock for all operations) and is also highly | 584 | scale well (single lock for all operations) and is also highly |
589 | susceptible to fragmentation. SLUB can accomplish a higher object | 585 | susceptible to fragmentation. SLUB can accomplish a higher object |
590 | density. It is usually better to use SLUB instead of SLOB. | 586 | density. It is usually better to use SLUB instead of SLOB. |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index fab5707cb5f..a242c83d89d 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -215,8 +215,7 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags | |||
215 | { | 215 | { |
216 | struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; | 216 | struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; |
217 | 217 | ||
218 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 218 | inode_init_once(&p->vfs_inode); |
219 | inode_init_once(&p->vfs_inode); | ||
220 | } | 219 | } |
221 | 220 | ||
222 | static struct inode *mqueue_alloc_inode(struct super_block *sb) | 221 | static struct inode *mqueue_alloc_inode(struct super_block *sb) |
diff --git a/kernel/fork.c b/kernel/fork.c index 49530e40ea8..87069cfc18a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1427,10 +1427,8 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep, | |||
1427 | { | 1427 | { |
1428 | struct sighand_struct *sighand = data; | 1428 | struct sighand_struct *sighand = data; |
1429 | 1429 | ||
1430 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 1430 | spin_lock_init(&sighand->siglock); |
1431 | spin_lock_init(&sighand->siglock); | 1431 | INIT_LIST_HEAD(&sighand->signalfd_list); |
1432 | INIT_LIST_HEAD(&sighand->signalfd_list); | ||
1433 | } | ||
1434 | } | 1432 | } |
1435 | 1433 | ||
1436 | void __init proc_caches_init(void) | 1434 | void __init proc_caches_init(void) |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index b5f0543ed84..f445b9cd60f 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -416,7 +416,8 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n) | |||
416 | 416 | ||
417 | mutex_lock(&pm_mutex); | 417 | mutex_lock(&pm_mutex); |
418 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { | 418 | for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
419 | if (!strncmp(buf, hibernation_modes[i], len)) { | 419 | if (len == strlen(hibernation_modes[i]) |
420 | && !strncmp(buf, hibernation_modes[i], len)) { | ||
420 | mode = i; | 421 | mode = i; |
421 | break; | 422 | break; |
422 | } | 423 | } |
diff --git a/kernel/power/main.c b/kernel/power/main.c index b98b80ccf43..8812985f302 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -290,13 +290,13 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n) | |||
290 | len = p ? p - buf : n; | 290 | len = p ? p - buf : n; |
291 | 291 | ||
292 | /* First, check if we are requested to hibernate */ | 292 | /* First, check if we are requested to hibernate */ |
293 | if (!strncmp(buf, "disk", len)) { | 293 | if (len == 4 && !strncmp(buf, "disk", len)) { |
294 | error = hibernate(); | 294 | error = hibernate(); |
295 | return error ? error : n; | 295 | return error ? error : n; |
296 | } | 296 | } |
297 | 297 | ||
298 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { | 298 | for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { |
299 | if (*s && !strncmp(buf, *s, len)) | 299 | if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) |
300 | break; | 300 | break; |
301 | } | 301 | } |
302 | if (state < PM_SUSPEND_MAX && *s) | 302 | if (state < PM_SUSPEND_MAX && *s) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4073353abd4..30ee462ee79 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -227,7 +227,7 @@ static ctl_table kern_table[] = { | |||
227 | .ctl_name = KERN_CORE_PATTERN, | 227 | .ctl_name = KERN_CORE_PATTERN, |
228 | .procname = "core_pattern", | 228 | .procname = "core_pattern", |
229 | .data = core_pattern, | 229 | .data = core_pattern, |
230 | .maxlen = 128, | 230 | .maxlen = CORENAME_MAX_SIZE, |
231 | .mode = 0644, | 231 | .mode = 0644, |
232 | .proc_handler = &proc_dostring, | 232 | .proc_handler = &proc_dostring, |
233 | .strategy = &sysctl_string, | 233 | .strategy = &sysctl_string, |
diff --git a/mm/memory.c b/mm/memory.c index 1d647ab0ee7..cb94488ab96 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
481 | page = vm_normal_page(vma, addr, pte); | 481 | page = vm_normal_page(vma, addr, pte); |
482 | if (page) { | 482 | if (page) { |
483 | get_page(page); | 483 | get_page(page); |
484 | page_dup_rmap(page); | 484 | page_dup_rmap(page, vma, addr); |
485 | rss[!!PageAnon(page)]++; | 485 | rss[!!PageAnon(page)]++; |
486 | } | 486 | } |
487 | 487 | ||
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma) | |||
162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, | 162 | static void anon_vma_ctor(void *data, struct kmem_cache *cachep, |
163 | unsigned long flags) | 163 | unsigned long flags) |
164 | { | 164 | { |
165 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 165 | struct anon_vma *anon_vma = data; |
166 | struct anon_vma *anon_vma = data; | ||
167 | 166 | ||
168 | spin_lock_init(&anon_vma->lock); | 167 | spin_lock_init(&anon_vma->lock); |
169 | INIT_LIST_HEAD(&anon_vma->head); | 168 | INIT_LIST_HEAD(&anon_vma->head); |
170 | } | ||
171 | } | 169 | } |
172 | 170 | ||
173 | void __init anon_vma_init(void) | 171 | void __init anon_vma_init(void) |
@@ -532,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page, | |||
532 | } | 530 | } |
533 | 531 | ||
534 | /** | 532 | /** |
533 | * page_set_anon_rmap - sanity check anonymous rmap addition | ||
534 | * @page: the page to add the mapping to | ||
535 | * @vma: the vm area in which the mapping is added | ||
536 | * @address: the user virtual address mapped | ||
537 | */ | ||
538 | static void __page_check_anon_rmap(struct page *page, | ||
539 | struct vm_area_struct *vma, unsigned long address) | ||
540 | { | ||
541 | #ifdef CONFIG_DEBUG_VM | ||
542 | /* | ||
543 | * The page's anon-rmap details (mapping and index) are guaranteed to | ||
544 | * be set up correctly at this point. | ||
545 | * | ||
546 | * We have exclusion against page_add_anon_rmap because the caller | ||
547 | * always holds the page locked, except if called from page_dup_rmap, | ||
548 | * in which case the page is already known to be setup. | ||
549 | * | ||
550 | * We have exclusion against page_add_new_anon_rmap because those pages | ||
551 | * are initially only visible via the pagetables, and the pte is locked | ||
552 | * over the call to page_add_new_anon_rmap. | ||
553 | */ | ||
554 | struct anon_vma *anon_vma = vma->anon_vma; | ||
555 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
556 | BUG_ON(page->mapping != (struct address_space *)anon_vma); | ||
557 | BUG_ON(page->index != linear_page_index(vma, address)); | ||
558 | #endif | ||
559 | } | ||
560 | |||
561 | /** | ||
535 | * page_add_anon_rmap - add pte mapping to an anonymous page | 562 | * page_add_anon_rmap - add pte mapping to an anonymous page |
536 | * @page: the page to add the mapping to | 563 | * @page: the page to add the mapping to |
537 | * @vma: the vm area in which the mapping is added | 564 | * @vma: the vm area in which the mapping is added |
538 | * @address: the user virtual address mapped | 565 | * @address: the user virtual address mapped |
539 | * | 566 | * |
540 | * The caller needs to hold the pte lock. | 567 | * The caller needs to hold the pte lock and the page must be locked. |
541 | */ | 568 | */ |
542 | void page_add_anon_rmap(struct page *page, | 569 | void page_add_anon_rmap(struct page *page, |
543 | struct vm_area_struct *vma, unsigned long address) | 570 | struct vm_area_struct *vma, unsigned long address) |
544 | { | 571 | { |
572 | VM_BUG_ON(!PageLocked(page)); | ||
573 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
545 | if (atomic_inc_and_test(&page->_mapcount)) | 574 | if (atomic_inc_and_test(&page->_mapcount)) |
546 | __page_set_anon_rmap(page, vma, address); | 575 | __page_set_anon_rmap(page, vma, address); |
547 | /* else checking page index and mapping is racy */ | 576 | else |
577 | __page_check_anon_rmap(page, vma, address); | ||
548 | } | 578 | } |
549 | 579 | ||
550 | /* | 580 | /* |
@@ -555,10 +585,12 @@ void page_add_anon_rmap(struct page *page, | |||
555 | * | 585 | * |
556 | * Same as page_add_anon_rmap but must only be called on *new* pages. | 586 | * Same as page_add_anon_rmap but must only be called on *new* pages. |
557 | * This means the inc-and-test can be bypassed. | 587 | * This means the inc-and-test can be bypassed. |
588 | * Page does not have to be locked. | ||
558 | */ | 589 | */ |
559 | void page_add_new_anon_rmap(struct page *page, | 590 | void page_add_new_anon_rmap(struct page *page, |
560 | struct vm_area_struct *vma, unsigned long address) | 591 | struct vm_area_struct *vma, unsigned long address) |
561 | { | 592 | { |
593 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); | ||
562 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ | 594 | atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ |
563 | __page_set_anon_rmap(page, vma, address); | 595 | __page_set_anon_rmap(page, vma, address); |
564 | } | 596 | } |
@@ -575,6 +607,26 @@ void page_add_file_rmap(struct page *page) | |||
575 | __inc_zone_page_state(page, NR_FILE_MAPPED); | 607 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
576 | } | 608 | } |
577 | 609 | ||
610 | #ifdef CONFIG_DEBUG_VM | ||
611 | /** | ||
612 | * page_dup_rmap - duplicate pte mapping to a page | ||
613 | * @page: the page to add the mapping to | ||
614 | * | ||
615 | * For copy_page_range only: minimal extract from page_add_file_rmap / | ||
616 | * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's | ||
617 | * quicker. | ||
618 | * | ||
619 | * The caller needs to hold the pte lock. | ||
620 | */ | ||
621 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) | ||
622 | { | ||
623 | BUG_ON(page_mapcount(page) == 0); | ||
624 | if (PageAnon(page)) | ||
625 | __page_check_anon_rmap(page, vma, address); | ||
626 | atomic_inc(&page->_mapcount); | ||
627 | } | ||
628 | #endif | ||
629 | |||
578 | /** | 630 | /** |
579 | * page_remove_rmap - take down pte mapping from a page | 631 | * page_remove_rmap - take down pte mapping from a page |
580 | * @page: page to remove mapping from | 632 | * @page: page to remove mapping from |
diff --git a/mm/shmem.c b/mm/shmem.c index f01e8deed64..e537317bec4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep, | |||
2358 | { | 2358 | { |
2359 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 2359 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; |
2360 | 2360 | ||
2361 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 2361 | inode_init_once(&p->vfs_inode); |
2362 | inode_init_once(&p->vfs_inode); | ||
2363 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2362 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2364 | p->i_acl = NULL; | 2363 | p->i_acl = NULL; |
2365 | p->i_default_acl = NULL; | 2364 | p->i_default_acl = NULL; |
2366 | #endif | 2365 | #endif |
2367 | } | ||
2368 | } | 2366 | } |
2369 | 2367 | ||
2370 | static int init_inodecache(void) | 2368 | static int init_inodecache(void) |
@@ -409,9 +409,6 @@ struct kmem_cache { | |||
409 | /* constructor func */ | 409 | /* constructor func */ |
410 | void (*ctor) (void *, struct kmem_cache *, unsigned long); | 410 | void (*ctor) (void *, struct kmem_cache *, unsigned long); |
411 | 411 | ||
412 | /* de-constructor func */ | ||
413 | void (*dtor) (void *, struct kmem_cache *, unsigned long); | ||
414 | |||
415 | /* 5) cache creation/removal */ | 412 | /* 5) cache creation/removal */ |
416 | const char *name; | 413 | const char *name; |
417 | struct list_head next; | 414 | struct list_head next; |
@@ -572,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |||
572 | #endif | 569 | #endif |
573 | 570 | ||
574 | /* | 571 | /* |
575 | * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp | ||
576 | * order. | ||
577 | */ | ||
578 | #if defined(CONFIG_LARGE_ALLOCS) | ||
579 | #define MAX_OBJ_ORDER 13 /* up to 32Mb */ | ||
580 | #define MAX_GFP_ORDER 13 /* up to 32Mb */ | ||
581 | #elif defined(CONFIG_MMU) | ||
582 | #define MAX_OBJ_ORDER 5 /* 32 pages */ | ||
583 | #define MAX_GFP_ORDER 5 /* 32 pages */ | ||
584 | #else | ||
585 | #define MAX_OBJ_ORDER 8 /* up to 1Mb */ | ||
586 | #define MAX_GFP_ORDER 8 /* up to 1Mb */ | ||
587 | #endif | ||
588 | |||
589 | /* | ||
590 | * Do not go above this order unless 0 objects fit into the slab. | 572 | * Do not go above this order unless 0 objects fit into the slab. |
591 | */ | 573 | */ |
592 | #define BREAK_GFP_ORDER_HI 1 | 574 | #define BREAK_GFP_ORDER_HI 1 |
@@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
792 | */ | 774 | */ |
793 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 775 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); |
794 | #endif | 776 | #endif |
777 | WARN_ON_ONCE(size == 0); | ||
795 | while (size > csizep->cs_size) | 778 | while (size > csizep->cs_size) |
796 | csizep++; | 779 | csizep++; |
797 | 780 | ||
@@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | |||
1911 | slab_error(cachep, "end of a freed object " | 1894 | slab_error(cachep, "end of a freed object " |
1912 | "was overwritten"); | 1895 | "was overwritten"); |
1913 | } | 1896 | } |
1914 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) | ||
1915 | (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); | ||
1916 | } | 1897 | } |
1917 | } | 1898 | } |
1918 | #else | 1899 | #else |
1919 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 1900 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) |
1920 | { | 1901 | { |
1921 | if (cachep->dtor) { | ||
1922 | int i; | ||
1923 | for (i = 0; i < cachep->num; i++) { | ||
1924 | void *objp = index_to_obj(cachep, slabp, i); | ||
1925 | (cachep->dtor) (objp, cachep, 0); | ||
1926 | } | ||
1927 | } | ||
1928 | } | 1902 | } |
1929 | #endif | 1903 | #endif |
1930 | 1904 | ||
@@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, | |||
2013 | size_t left_over = 0; | 1987 | size_t left_over = 0; |
2014 | int gfporder; | 1988 | int gfporder; |
2015 | 1989 | ||
2016 | for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { | 1990 | for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { |
2017 | unsigned int num; | 1991 | unsigned int num; |
2018 | size_t remainder; | 1992 | size_t remainder; |
2019 | 1993 | ||
@@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep) | |||
2124 | * @align: The required alignment for the objects. | 2098 | * @align: The required alignment for the objects. |
2125 | * @flags: SLAB flags | 2099 | * @flags: SLAB flags |
2126 | * @ctor: A constructor for the objects. | 2100 | * @ctor: A constructor for the objects. |
2127 | * @dtor: A destructor for the objects. | 2101 | * @dtor: A destructor for the objects (not implemented anymore). |
2128 | * | 2102 | * |
2129 | * Returns a ptr to the cache on success, NULL on failure. | 2103 | * Returns a ptr to the cache on success, NULL on failure. |
2130 | * Cannot be called within a int, but can be interrupted. | 2104 | * Cannot be called within a int, but can be interrupted. |
@@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2159 | * Sanity checks... these are all serious usage bugs. | 2133 | * Sanity checks... these are all serious usage bugs. |
2160 | */ | 2134 | */ |
2161 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || | 2135 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || |
2162 | (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { | 2136 | size > KMALLOC_MAX_SIZE || dtor) { |
2163 | printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, | 2137 | printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, |
2164 | name); | 2138 | name); |
2165 | BUG(); | 2139 | BUG(); |
@@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2213 | if (flags & SLAB_DESTROY_BY_RCU) | 2187 | if (flags & SLAB_DESTROY_BY_RCU) |
2214 | BUG_ON(flags & SLAB_POISON); | 2188 | BUG_ON(flags & SLAB_POISON); |
2215 | #endif | 2189 | #endif |
2216 | if (flags & SLAB_DESTROY_BY_RCU) | ||
2217 | BUG_ON(dtor); | ||
2218 | |||
2219 | /* | 2190 | /* |
2220 | * Always checks flags, a caller might be expecting debug support which | 2191 | * Always checks flags, a caller might be expecting debug support which |
2221 | * isn't available. | 2192 | * isn't available. |
@@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2370 | BUG_ON(!cachep->slabp_cache); | 2341 | BUG_ON(!cachep->slabp_cache); |
2371 | } | 2342 | } |
2372 | cachep->ctor = ctor; | 2343 | cachep->ctor = ctor; |
2373 | cachep->dtor = dtor; | ||
2374 | cachep->name = name; | 2344 | cachep->name = name; |
2375 | 2345 | ||
2376 | if (setup_cpu_cache(cachep)) { | 2346 | if (setup_cpu_cache(cachep)) { |
@@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | |||
2625 | } | 2595 | } |
2626 | 2596 | ||
2627 | static void cache_init_objs(struct kmem_cache *cachep, | 2597 | static void cache_init_objs(struct kmem_cache *cachep, |
2628 | struct slab *slabp, unsigned long ctor_flags) | 2598 | struct slab *slabp) |
2629 | { | 2599 | { |
2630 | int i; | 2600 | int i; |
2631 | 2601 | ||
@@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2649 | */ | 2619 | */ |
2650 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 2620 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) |
2651 | cachep->ctor(objp + obj_offset(cachep), cachep, | 2621 | cachep->ctor(objp + obj_offset(cachep), cachep, |
2652 | ctor_flags); | 2622 | 0); |
2653 | 2623 | ||
2654 | if (cachep->flags & SLAB_RED_ZONE) { | 2624 | if (cachep->flags & SLAB_RED_ZONE) { |
2655 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 2625 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) |
@@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2665 | cachep->buffer_size / PAGE_SIZE, 0); | 2635 | cachep->buffer_size / PAGE_SIZE, 0); |
2666 | #else | 2636 | #else |
2667 | if (cachep->ctor) | 2637 | if (cachep->ctor) |
2668 | cachep->ctor(objp, cachep, ctor_flags); | 2638 | cachep->ctor(objp, cachep, 0); |
2669 | #endif | 2639 | #endif |
2670 | slab_bufctl(slabp)[i] = i + 1; | 2640 | slab_bufctl(slabp)[i] = i + 1; |
2671 | } | 2641 | } |
@@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2754 | struct slab *slabp; | 2724 | struct slab *slabp; |
2755 | size_t offset; | 2725 | size_t offset; |
2756 | gfp_t local_flags; | 2726 | gfp_t local_flags; |
2757 | unsigned long ctor_flags; | ||
2758 | struct kmem_list3 *l3; | 2727 | struct kmem_list3 *l3; |
2759 | 2728 | ||
2760 | /* | 2729 | /* |
@@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2763 | */ | 2732 | */ |
2764 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); | 2733 | BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); |
2765 | 2734 | ||
2766 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | ||
2767 | local_flags = (flags & GFP_LEVEL_MASK); | 2735 | local_flags = (flags & GFP_LEVEL_MASK); |
2768 | /* Take the l3 list lock to change the colour_next on this node */ | 2736 | /* Take the l3 list lock to change the colour_next on this node */ |
2769 | check_irq_off(); | 2737 | check_irq_off(); |
@@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2808 | slabp->nodeid = nodeid; | 2776 | slabp->nodeid = nodeid; |
2809 | slab_map_pages(cachep, slabp, objp); | 2777 | slab_map_pages(cachep, slabp, objp); |
2810 | 2778 | ||
2811 | cache_init_objs(cachep, slabp, ctor_flags); | 2779 | cache_init_objs(cachep, slabp); |
2812 | 2780 | ||
2813 | if (local_flags & __GFP_WAIT) | 2781 | if (local_flags & __GFP_WAIT) |
2814 | local_irq_disable(); | 2782 | local_irq_disable(); |
@@ -2835,7 +2803,6 @@ failed: | |||
2835 | * Perform extra freeing checks: | 2803 | * Perform extra freeing checks: |
2836 | * - detect bad pointers. | 2804 | * - detect bad pointers. |
2837 | * - POISON/RED_ZONE checking | 2805 | * - POISON/RED_ZONE checking |
2838 | * - destructor calls, for caches with POISON+dtor | ||
2839 | */ | 2806 | */ |
2840 | static void kfree_debugcheck(const void *objp) | 2807 | static void kfree_debugcheck(const void *objp) |
2841 | { | 2808 | { |
@@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2894 | BUG_ON(objnr >= cachep->num); | 2861 | BUG_ON(objnr >= cachep->num); |
2895 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); | 2862 | BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); |
2896 | 2863 | ||
2897 | if (cachep->flags & SLAB_POISON && cachep->dtor) { | ||
2898 | /* we want to cache poison the object, | ||
2899 | * call the destruction callback | ||
2900 | */ | ||
2901 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | ||
2902 | } | ||
2903 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 2864 | #ifdef CONFIG_DEBUG_SLAB_LEAK |
2904 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; | 2865 | slab_bufctl(slabp)[objnr] = BUFCTL_FREE; |
2905 | #endif | 2866 | #endif |
@@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3099 | #endif | 3060 | #endif |
3100 | objp += obj_offset(cachep); | 3061 | objp += obj_offset(cachep); |
3101 | if (cachep->ctor && cachep->flags & SLAB_POISON) | 3062 | if (cachep->ctor && cachep->flags & SLAB_POISON) |
3102 | cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); | 3063 | cachep->ctor(objp, cachep, 0); |
3103 | #if ARCH_SLAB_MINALIGN | 3064 | #if ARCH_SLAB_MINALIGN |
3104 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { | 3065 | if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { |
3105 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3066 | printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", |
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/timer.h> | 37 | #include <linux/timer.h> |
38 | #include <linux/rcupdate.h> | ||
38 | 39 | ||
39 | struct slob_block { | 40 | struct slob_block { |
40 | int units; | 41 | int units; |
@@ -53,6 +54,16 @@ struct bigblock { | |||
53 | }; | 54 | }; |
54 | typedef struct bigblock bigblock_t; | 55 | typedef struct bigblock bigblock_t; |
55 | 56 | ||
57 | /* | ||
58 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which | ||
59 | * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free | ||
60 | * the block using call_rcu. | ||
61 | */ | ||
62 | struct slob_rcu { | ||
63 | struct rcu_head head; | ||
64 | int size; | ||
65 | }; | ||
66 | |||
56 | static slob_t arena = { .next = &arena, .units = 1 }; | 67 | static slob_t arena = { .next = &arena, .units = 1 }; |
57 | static slob_t *slobfree = &arena; | 68 | static slob_t *slobfree = &arena; |
58 | static bigblock_t *bigblocks; | 69 | static bigblock_t *bigblocks; |
@@ -266,9 +277,9 @@ size_t ksize(const void *block) | |||
266 | 277 | ||
267 | struct kmem_cache { | 278 | struct kmem_cache { |
268 | unsigned int size, align; | 279 | unsigned int size, align; |
280 | unsigned long flags; | ||
269 | const char *name; | 281 | const char *name; |
270 | void (*ctor)(void *, struct kmem_cache *, unsigned long); | 282 | void (*ctor)(void *, struct kmem_cache *, unsigned long); |
271 | void (*dtor)(void *, struct kmem_cache *, unsigned long); | ||
272 | }; | 283 | }; |
273 | 284 | ||
274 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 285 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
@@ -283,8 +294,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
283 | if (c) { | 294 | if (c) { |
284 | c->name = name; | 295 | c->name = name; |
285 | c->size = size; | 296 | c->size = size; |
297 | if (flags & SLAB_DESTROY_BY_RCU) { | ||
298 | /* leave room for rcu footer at the end of object */ | ||
299 | c->size += sizeof(struct slob_rcu); | ||
300 | } | ||
301 | c->flags = flags; | ||
286 | c->ctor = ctor; | 302 | c->ctor = ctor; |
287 | c->dtor = dtor; | ||
288 | /* ignore alignment unless it's forced */ | 303 | /* ignore alignment unless it's forced */ |
289 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 304 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
290 | if (c->align < align) | 305 | if (c->align < align) |
@@ -312,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) | |||
312 | b = (void *)__get_free_pages(flags, get_order(c->size)); | 327 | b = (void *)__get_free_pages(flags, get_order(c->size)); |
313 | 328 | ||
314 | if (c->ctor) | 329 | if (c->ctor) |
315 | c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); | 330 | c->ctor(b, c, 0); |
316 | 331 | ||
317 | return b; | 332 | return b; |
318 | } | 333 | } |
@@ -328,15 +343,33 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) | |||
328 | } | 343 | } |
329 | EXPORT_SYMBOL(kmem_cache_zalloc); | 344 | EXPORT_SYMBOL(kmem_cache_zalloc); |
330 | 345 | ||
331 | void kmem_cache_free(struct kmem_cache *c, void *b) | 346 | static void __kmem_cache_free(void *b, int size) |
332 | { | 347 | { |
333 | if (c->dtor) | 348 | if (size < PAGE_SIZE) |
334 | c->dtor(b, c, 0); | 349 | slob_free(b, size); |
335 | |||
336 | if (c->size < PAGE_SIZE) | ||
337 | slob_free(b, c->size); | ||
338 | else | 350 | else |
339 | free_pages((unsigned long)b, get_order(c->size)); | 351 | free_pages((unsigned long)b, get_order(size)); |
352 | } | ||
353 | |||
354 | static void kmem_rcu_free(struct rcu_head *head) | ||
355 | { | ||
356 | struct slob_rcu *slob_rcu = (struct slob_rcu *)head; | ||
357 | void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); | ||
358 | |||
359 | __kmem_cache_free(b, slob_rcu->size); | ||
360 | } | ||
361 | |||
362 | void kmem_cache_free(struct kmem_cache *c, void *b) | ||
363 | { | ||
364 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | ||
365 | struct slob_rcu *slob_rcu; | ||
366 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | ||
367 | INIT_RCU_HEAD(&slob_rcu->head); | ||
368 | slob_rcu->size = c->size; | ||
369 | call_rcu(&slob_rcu->head, kmem_rcu_free); | ||
370 | } else { | ||
371 | __kmem_cache_free(b, c->size); | ||
372 | } | ||
340 | } | 373 | } |
341 | EXPORT_SYMBOL(kmem_cache_free); | 374 | EXPORT_SYMBOL(kmem_cache_free); |
342 | 375 | ||
@@ -78,10 +78,18 @@ | |||
78 | * | 78 | * |
79 | * Overloading of page flags that are otherwise used for LRU management. | 79 | * Overloading of page flags that are otherwise used for LRU management. |
80 | * | 80 | * |
81 | * PageActive The slab is used as a cpu cache. Allocations | 81 | * PageActive The slab is frozen and exempt from list processing. |
82 | * may be performed from the slab. The slab is not | 82 | * This means that the slab is dedicated to a purpose |
83 | * on any slab list and cannot be moved onto one. | 83 | * such as satisfying allocations for a specific |
84 | * The cpu slab may be equipped with an additioanl | 84 | * processor. Objects may be freed in the slab while |
85 | * it is frozen but slab_free will then skip the usual | ||
86 | * list operations. It is up to the processor holding | ||
87 | * the slab to integrate the slab into the slab lists | ||
88 | * when the slab is no longer needed. | ||
89 | * | ||
90 | * One use of this flag is to mark slabs that are | ||
91 | * used for allocations. Then such a slab becomes a cpu | ||
92 | * slab. The cpu slab may be equipped with an additional | ||
85 | * lockless_freelist that allows lockless access to | 93 | * lockless_freelist that allows lockless access to |
86 | * free objects in addition to the regular freelist | 94 | * free objects in addition to the regular freelist |
87 | * that requires the slab lock. | 95 | * that requires the slab lock. |
@@ -91,27 +99,42 @@ | |||
91 | * the fast path and disables lockless freelists. | 99 | * the fast path and disables lockless freelists. |
92 | */ | 100 | */ |
93 | 101 | ||
94 | static inline int SlabDebug(struct page *page) | 102 | #define FROZEN (1 << PG_active) |
95 | { | 103 | |
96 | #ifdef CONFIG_SLUB_DEBUG | 104 | #ifdef CONFIG_SLUB_DEBUG |
97 | return PageError(page); | 105 | #define SLABDEBUG (1 << PG_error) |
98 | #else | 106 | #else |
99 | return 0; | 107 | #define SLABDEBUG 0 |
100 | #endif | 108 | #endif |
109 | |||
110 | static inline int SlabFrozen(struct page *page) | ||
111 | { | ||
112 | return page->flags & FROZEN; | ||
113 | } | ||
114 | |||
115 | static inline void SetSlabFrozen(struct page *page) | ||
116 | { | ||
117 | page->flags |= FROZEN; | ||
118 | } | ||
119 | |||
120 | static inline void ClearSlabFrozen(struct page *page) | ||
121 | { | ||
122 | page->flags &= ~FROZEN; | ||
123 | } | ||
124 | |||
125 | static inline int SlabDebug(struct page *page) | ||
126 | { | ||
127 | return page->flags & SLABDEBUG; | ||
101 | } | 128 | } |
102 | 129 | ||
103 | static inline void SetSlabDebug(struct page *page) | 130 | static inline void SetSlabDebug(struct page *page) |
104 | { | 131 | { |
105 | #ifdef CONFIG_SLUB_DEBUG | 132 | page->flags |= SLABDEBUG; |
106 | SetPageError(page); | ||
107 | #endif | ||
108 | } | 133 | } |
109 | 134 | ||
110 | static inline void ClearSlabDebug(struct page *page) | 135 | static inline void ClearSlabDebug(struct page *page) |
111 | { | 136 | { |
112 | #ifdef CONFIG_SLUB_DEBUG | 137 | page->flags &= ~SLABDEBUG; |
113 | ClearPageError(page); | ||
114 | #endif | ||
115 | } | 138 | } |
116 | 139 | ||
117 | /* | 140 | /* |
@@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
719 | return search == NULL; | 742 | return search == NULL; |
720 | } | 743 | } |
721 | 744 | ||
745 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
746 | { | ||
747 | if (s->flags & SLAB_TRACE) { | ||
748 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
749 | s->name, | ||
750 | alloc ? "alloc" : "free", | ||
751 | object, page->inuse, | ||
752 | page->freelist); | ||
753 | |||
754 | if (!alloc) | ||
755 | print_section("Object", (void *)object, s->objsize); | ||
756 | |||
757 | dump_stack(); | ||
758 | } | ||
759 | } | ||
760 | |||
722 | /* | 761 | /* |
723 | * Tracking of fully allocated slabs for debugging purposes. | 762 | * Tracking of fully allocated slabs for debugging purposes. |
724 | */ | 763 | */ |
@@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page) | |||
743 | spin_unlock(&n->list_lock); | 782 | spin_unlock(&n->list_lock); |
744 | } | 783 | } |
745 | 784 | ||
746 | static int alloc_object_checks(struct kmem_cache *s, struct page *page, | 785 | static void setup_object_debug(struct kmem_cache *s, struct page *page, |
747 | void *object) | 786 | void *object) |
787 | { | ||
788 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) | ||
789 | return; | ||
790 | |||
791 | init_object(s, object, 0); | ||
792 | init_tracking(s, object); | ||
793 | } | ||
794 | |||
795 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | ||
796 | void *object, void *addr) | ||
748 | { | 797 | { |
749 | if (!check_slab(s, page)) | 798 | if (!check_slab(s, page)) |
750 | goto bad; | 799 | goto bad; |
@@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page, | |||
759 | goto bad; | 808 | goto bad; |
760 | } | 809 | } |
761 | 810 | ||
762 | if (!object) | 811 | if (object && !check_object(s, page, object, 0)) |
763 | return 1; | ||
764 | |||
765 | if (!check_object(s, page, object, 0)) | ||
766 | goto bad; | 812 | goto bad; |
767 | 813 | ||
814 | /* Success perform special debug activities for allocs */ | ||
815 | if (s->flags & SLAB_STORE_USER) | ||
816 | set_track(s, object, TRACK_ALLOC, addr); | ||
817 | trace(s, page, object, 1); | ||
818 | init_object(s, object, 1); | ||
768 | return 1; | 819 | return 1; |
820 | |||
769 | bad: | 821 | bad: |
770 | if (PageSlab(page)) { | 822 | if (PageSlab(page)) { |
771 | /* | 823 | /* |
@@ -783,8 +835,8 @@ bad: | |||
783 | return 0; | 835 | return 0; |
784 | } | 836 | } |
785 | 837 | ||
786 | static int free_object_checks(struct kmem_cache *s, struct page *page, | 838 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
787 | void *object) | 839 | void *object, void *addr) |
788 | { | 840 | { |
789 | if (!check_slab(s, page)) | 841 | if (!check_slab(s, page)) |
790 | goto fail; | 842 | goto fail; |
@@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page, | |||
818 | "to slab %s", object, page->slab->name); | 870 | "to slab %s", object, page->slab->name); |
819 | goto fail; | 871 | goto fail; |
820 | } | 872 | } |
873 | |||
874 | /* Special debug activities for freeing objects */ | ||
875 | if (!SlabFrozen(page) && !page->freelist) | ||
876 | remove_full(s, page); | ||
877 | if (s->flags & SLAB_STORE_USER) | ||
878 | set_track(s, object, TRACK_FREE, addr); | ||
879 | trace(s, page, object, 0); | ||
880 | init_object(s, object, 0); | ||
821 | return 1; | 881 | return 1; |
882 | |||
822 | fail: | 883 | fail: |
823 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", | 884 | printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", |
824 | s->name, page, object); | 885 | s->name, page, object); |
825 | return 0; | 886 | return 0; |
826 | } | 887 | } |
827 | 888 | ||
828 | static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) | ||
829 | { | ||
830 | if (s->flags & SLAB_TRACE) { | ||
831 | printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", | ||
832 | s->name, | ||
833 | alloc ? "alloc" : "free", | ||
834 | object, page->inuse, | ||
835 | page->freelist); | ||
836 | |||
837 | if (!alloc) | ||
838 | print_section("Object", (void *)object, s->objsize); | ||
839 | |||
840 | dump_stack(); | ||
841 | } | ||
842 | } | ||
843 | |||
844 | static int __init setup_slub_debug(char *str) | 889 | static int __init setup_slub_debug(char *str) |
845 | { | 890 | { |
846 | if (!str || *str != '=') | 891 | if (!str || *str != '=') |
@@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
891 | * On 32 bit platforms the limit is 256k. On 64bit platforms | 936 | * On 32 bit platforms the limit is 256k. On 64bit platforms |
892 | * the limit is 512k. | 937 | * the limit is 512k. |
893 | * | 938 | * |
894 | * Debugging or ctor/dtors may create a need to move the free | 939 | * Debugging or ctor may create a need to move the free |
895 | * pointer. Fail if this happens. | 940 | * pointer. Fail if this happens. |
896 | */ | 941 | */ |
897 | if (s->size >= 65535 * sizeof(void *)) { | 942 | if (s->size >= 65535 * sizeof(void *)) { |
898 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | | 943 | BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | |
899 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); | 944 | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); |
900 | BUG_ON(s->ctor || s->dtor); | 945 | BUG_ON(s->ctor); |
901 | } | 946 | } |
902 | else | 947 | else |
903 | /* | 948 | /* |
@@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) | |||
909 | s->flags |= slub_debug; | 954 | s->flags |= slub_debug; |
910 | } | 955 | } |
911 | #else | 956 | #else |
957 | static inline void setup_object_debug(struct kmem_cache *s, | ||
958 | struct page *page, void *object) {} | ||
912 | 959 | ||
913 | static inline int alloc_object_checks(struct kmem_cache *s, | 960 | static inline int alloc_debug_processing(struct kmem_cache *s, |
914 | struct page *page, void *object) { return 0; } | 961 | struct page *page, void *object, void *addr) { return 0; } |
915 | 962 | ||
916 | static inline int free_object_checks(struct kmem_cache *s, | 963 | static inline int free_debug_processing(struct kmem_cache *s, |
917 | struct page *page, void *object) { return 0; } | 964 | struct page *page, void *object, void *addr) { return 0; } |
918 | 965 | ||
919 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | ||
920 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | ||
921 | static inline void trace(struct kmem_cache *s, struct page *page, | ||
922 | void *object, int alloc) {} | ||
923 | static inline void init_object(struct kmem_cache *s, | ||
924 | void *object, int active) {} | ||
925 | static inline void init_tracking(struct kmem_cache *s, void *object) {} | ||
926 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 966 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
927 | { return 1; } | 967 | { return 1; } |
928 | static inline int check_object(struct kmem_cache *s, struct page *page, | 968 | static inline int check_object(struct kmem_cache *s, struct page *page, |
929 | void *object, int active) { return 1; } | 969 | void *object, int active) { return 1; } |
930 | static inline void set_track(struct kmem_cache *s, void *object, | 970 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
931 | enum track_item alloc, void *addr) {} | ||
932 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} | 971 | static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} |
933 | #define slub_debug 0 | 972 | #define slub_debug 0 |
934 | #endif | 973 | #endif |
@@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
965 | static void setup_object(struct kmem_cache *s, struct page *page, | 1004 | static void setup_object(struct kmem_cache *s, struct page *page, |
966 | void *object) | 1005 | void *object) |
967 | { | 1006 | { |
968 | if (SlabDebug(page)) { | 1007 | setup_object_debug(s, page, object); |
969 | init_object(s, object, 0); | ||
970 | init_tracking(s, object); | ||
971 | } | ||
972 | |||
973 | if (unlikely(s->ctor)) | 1008 | if (unlikely(s->ctor)) |
974 | s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); | 1009 | s->ctor(object, s, 0); |
975 | } | 1010 | } |
976 | 1011 | ||
977 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | 1012 | static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
@@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1030 | { | 1065 | { |
1031 | int pages = 1 << s->order; | 1066 | int pages = 1 << s->order; |
1032 | 1067 | ||
1033 | if (unlikely(SlabDebug(page) || s->dtor)) { | 1068 | if (unlikely(SlabDebug(page))) { |
1034 | void *p; | 1069 | void *p; |
1035 | 1070 | ||
1036 | slab_pad_check(s, page); | 1071 | slab_pad_check(s, page); |
1037 | for_each_object(p, s, page_address(page)) { | 1072 | for_each_object(p, s, page_address(page)) |
1038 | if (s->dtor) | ||
1039 | s->dtor(p, s, 0); | ||
1040 | check_object(s, page, p, 0); | 1073 | check_object(s, page, p, 0); |
1041 | } | ||
1042 | } | 1074 | } |
1043 | 1075 | ||
1044 | mod_zone_page_state(page_zone(page), | 1076 | mod_zone_page_state(page_zone(page), |
@@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s, | |||
1138 | * | 1170 | * |
1139 | * Must hold list_lock. | 1171 | * Must hold list_lock. |
1140 | */ | 1172 | */ |
1141 | static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) | 1173 | static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) |
1142 | { | 1174 | { |
1143 | if (slab_trylock(page)) { | 1175 | if (slab_trylock(page)) { |
1144 | list_del(&page->lru); | 1176 | list_del(&page->lru); |
1145 | n->nr_partial--; | 1177 | n->nr_partial--; |
1178 | SetSlabFrozen(page); | ||
1146 | return 1; | 1179 | return 1; |
1147 | } | 1180 | } |
1148 | return 0; | 1181 | return 0; |
@@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) | |||
1166 | 1199 | ||
1167 | spin_lock(&n->list_lock); | 1200 | spin_lock(&n->list_lock); |
1168 | list_for_each_entry(page, &n->partial, lru) | 1201 | list_for_each_entry(page, &n->partial, lru) |
1169 | if (lock_and_del_slab(n, page)) | 1202 | if (lock_and_freeze_slab(n, page)) |
1170 | goto out; | 1203 | goto out; |
1171 | page = NULL; | 1204 | page = NULL; |
1172 | out: | 1205 | out: |
@@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1245 | * | 1278 | * |
1246 | * On exit the slab lock will have been dropped. | 1279 | * On exit the slab lock will have been dropped. |
1247 | */ | 1280 | */ |
1248 | static void putback_slab(struct kmem_cache *s, struct page *page) | 1281 | static void unfreeze_slab(struct kmem_cache *s, struct page *page) |
1249 | { | 1282 | { |
1250 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1283 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1251 | 1284 | ||
1285 | ClearSlabFrozen(page); | ||
1252 | if (page->inuse) { | 1286 | if (page->inuse) { |
1253 | 1287 | ||
1254 | if (page->freelist) | 1288 | if (page->freelist) |
@@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) | |||
1299 | page->inuse--; | 1333 | page->inuse--; |
1300 | } | 1334 | } |
1301 | s->cpu_slab[cpu] = NULL; | 1335 | s->cpu_slab[cpu] = NULL; |
1302 | ClearPageActive(page); | 1336 | unfreeze_slab(s, page); |
1303 | |||
1304 | putback_slab(s, page); | ||
1305 | } | 1337 | } |
1306 | 1338 | ||
1307 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) | 1339 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) |
@@ -1392,9 +1424,7 @@ another_slab: | |||
1392 | new_slab: | 1424 | new_slab: |
1393 | page = get_partial(s, gfpflags, node); | 1425 | page = get_partial(s, gfpflags, node); |
1394 | if (page) { | 1426 | if (page) { |
1395 | have_slab: | ||
1396 | s->cpu_slab[cpu] = page; | 1427 | s->cpu_slab[cpu] = page; |
1397 | SetPageActive(page); | ||
1398 | goto load_freelist; | 1428 | goto load_freelist; |
1399 | } | 1429 | } |
1400 | 1430 | ||
@@ -1424,17 +1454,15 @@ have_slab: | |||
1424 | flush_slab(s, s->cpu_slab[cpu], cpu); | 1454 | flush_slab(s, s->cpu_slab[cpu], cpu); |
1425 | } | 1455 | } |
1426 | slab_lock(page); | 1456 | slab_lock(page); |
1427 | goto have_slab; | 1457 | SetSlabFrozen(page); |
1458 | s->cpu_slab[cpu] = page; | ||
1459 | goto load_freelist; | ||
1428 | } | 1460 | } |
1429 | return NULL; | 1461 | return NULL; |
1430 | debug: | 1462 | debug: |
1431 | object = page->freelist; | 1463 | object = page->freelist; |
1432 | if (!alloc_object_checks(s, page, object)) | 1464 | if (!alloc_debug_processing(s, page, object, addr)) |
1433 | goto another_slab; | 1465 | goto another_slab; |
1434 | if (s->flags & SLAB_STORE_USER) | ||
1435 | set_track(s, object, TRACK_ALLOC, addr); | ||
1436 | trace(s, page, object, 1); | ||
1437 | init_object(s, object, 1); | ||
1438 | 1466 | ||
1439 | page->inuse++; | 1467 | page->inuse++; |
1440 | page->freelist = object[page->offset]; | 1468 | page->freelist = object[page->offset]; |
@@ -1511,11 +1539,7 @@ checks_ok: | |||
1511 | page->freelist = object; | 1539 | page->freelist = object; |
1512 | page->inuse--; | 1540 | page->inuse--; |
1513 | 1541 | ||
1514 | if (unlikely(PageActive(page))) | 1542 | if (unlikely(SlabFrozen(page))) |
1515 | /* | ||
1516 | * Cpu slabs are never on partial lists and are | ||
1517 | * never freed. | ||
1518 | */ | ||
1519 | goto out_unlock; | 1543 | goto out_unlock; |
1520 | 1544 | ||
1521 | if (unlikely(!page->inuse)) | 1545 | if (unlikely(!page->inuse)) |
@@ -1545,14 +1569,8 @@ slab_empty: | |||
1545 | return; | 1569 | return; |
1546 | 1570 | ||
1547 | debug: | 1571 | debug: |
1548 | if (!free_object_checks(s, page, x)) | 1572 | if (!free_debug_processing(s, page, x, addr)) |
1549 | goto out_unlock; | 1573 | goto out_unlock; |
1550 | if (!PageActive(page) && !page->freelist) | ||
1551 | remove_full(s, page); | ||
1552 | if (s->flags & SLAB_STORE_USER) | ||
1553 | set_track(s, x, TRACK_FREE, addr); | ||
1554 | trace(s, page, object, 0); | ||
1555 | init_object(s, object, 0); | ||
1556 | goto checks_ok; | 1574 | goto checks_ok; |
1557 | } | 1575 | } |
1558 | 1576 | ||
@@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag | |||
1789 | page->freelist = get_freepointer(kmalloc_caches, n); | 1807 | page->freelist = get_freepointer(kmalloc_caches, n); |
1790 | page->inuse++; | 1808 | page->inuse++; |
1791 | kmalloc_caches->node[node] = n; | 1809 | kmalloc_caches->node[node] = n; |
1792 | init_object(kmalloc_caches, n, 1); | 1810 | setup_object_debug(kmalloc_caches, page, n); |
1793 | init_kmem_cache_node(n); | 1811 | init_kmem_cache_node(n); |
1794 | atomic_long_inc(&n->nr_slabs); | 1812 | atomic_long_inc(&n->nr_slabs); |
1795 | add_partial(n, page); | 1813 | add_partial(n, page); |
@@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1871 | * then we should never poison the object itself. | 1889 | * then we should never poison the object itself. |
1872 | */ | 1890 | */ |
1873 | if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && | 1891 | if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && |
1874 | !s->ctor && !s->dtor) | 1892 | !s->ctor) |
1875 | s->flags |= __OBJECT_POISON; | 1893 | s->flags |= __OBJECT_POISON; |
1876 | else | 1894 | else |
1877 | s->flags &= ~__OBJECT_POISON; | 1895 | s->flags &= ~__OBJECT_POISON; |
@@ -1901,7 +1919,7 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1901 | 1919 | ||
1902 | #ifdef CONFIG_SLUB_DEBUG | 1920 | #ifdef CONFIG_SLUB_DEBUG |
1903 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || | 1921 | if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || |
1904 | s->ctor || s->dtor)) { | 1922 | s->ctor)) { |
1905 | /* | 1923 | /* |
1906 | * Relocate free pointer after the object if it is not | 1924 | * Relocate free pointer after the object if it is not |
1907 | * permitted to overwrite the first word of the object on | 1925 | * permitted to overwrite the first word of the object on |
@@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s) | |||
1970 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 1988 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, |
1971 | const char *name, size_t size, | 1989 | const char *name, size_t size, |
1972 | size_t align, unsigned long flags, | 1990 | size_t align, unsigned long flags, |
1973 | void (*ctor)(void *, struct kmem_cache *, unsigned long), | 1991 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) |
1974 | void (*dtor)(void *, struct kmem_cache *, unsigned long)) | ||
1975 | { | 1992 | { |
1976 | memset(s, 0, kmem_size); | 1993 | memset(s, 0, kmem_size); |
1977 | s->name = name; | 1994 | s->name = name; |
1978 | s->ctor = ctor; | 1995 | s->ctor = ctor; |
1979 | s->dtor = dtor; | ||
1980 | s->objsize = size; | 1996 | s->objsize = size; |
1981 | s->flags = flags; | 1997 | s->flags = flags; |
1982 | s->align = align; | 1998 | s->align = align; |
@@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | |||
2161 | 2177 | ||
2162 | down_write(&slub_lock); | 2178 | down_write(&slub_lock); |
2163 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2179 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, |
2164 | flags, NULL, NULL)) | 2180 | flags, NULL)) |
2165 | goto panic; | 2181 | goto panic; |
2166 | 2182 | ||
2167 | list_add(&s->list, &slab_caches); | 2183 | list_add(&s->list, &slab_caches); |
@@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2463 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | 2479 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) |
2464 | return 1; | 2480 | return 1; |
2465 | 2481 | ||
2466 | if (s->ctor || s->dtor) | 2482 | if (s->ctor) |
2467 | return 1; | 2483 | return 1; |
2468 | 2484 | ||
2469 | return 0; | 2485 | return 0; |
@@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2471 | 2487 | ||
2472 | static struct kmem_cache *find_mergeable(size_t size, | 2488 | static struct kmem_cache *find_mergeable(size_t size, |
2473 | size_t align, unsigned long flags, | 2489 | size_t align, unsigned long flags, |
2474 | void (*ctor)(void *, struct kmem_cache *, unsigned long), | 2490 | void (*ctor)(void *, struct kmem_cache *, unsigned long)) |
2475 | void (*dtor)(void *, struct kmem_cache *, unsigned long)) | ||
2476 | { | 2491 | { |
2477 | struct list_head *h; | 2492 | struct list_head *h; |
2478 | 2493 | ||
2479 | if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) | 2494 | if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) |
2480 | return NULL; | 2495 | return NULL; |
2481 | 2496 | ||
2482 | if (ctor || dtor) | 2497 | if (ctor) |
2483 | return NULL; | 2498 | return NULL; |
2484 | 2499 | ||
2485 | size = ALIGN(size, sizeof(void *)); | 2500 | size = ALIGN(size, sizeof(void *)); |
@@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
2521 | { | 2536 | { |
2522 | struct kmem_cache *s; | 2537 | struct kmem_cache *s; |
2523 | 2538 | ||
2539 | BUG_ON(dtor); | ||
2524 | down_write(&slub_lock); | 2540 | down_write(&slub_lock); |
2525 | s = find_mergeable(size, align, flags, ctor, dtor); | 2541 | s = find_mergeable(size, align, flags, ctor); |
2526 | if (s) { | 2542 | if (s) { |
2527 | s->refcount++; | 2543 | s->refcount++; |
2528 | /* | 2544 | /* |
@@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
2536 | } else { | 2552 | } else { |
2537 | s = kmalloc(kmem_size, GFP_KERNEL); | 2553 | s = kmalloc(kmem_size, GFP_KERNEL); |
2538 | if (s && kmem_cache_open(s, GFP_KERNEL, name, | 2554 | if (s && kmem_cache_open(s, GFP_KERNEL, name, |
2539 | size, align, flags, ctor, dtor)) { | 2555 | size, align, flags, ctor)) { |
2540 | if (sysfs_slab_add(s)) { | 2556 | if (sysfs_slab_add(s)) { |
2541 | kfree(s); | 2557 | kfree(s); |
2542 | goto err; | 2558 | goto err; |
@@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) | |||
3177 | } | 3193 | } |
3178 | SLAB_ATTR_RO(ctor); | 3194 | SLAB_ATTR_RO(ctor); |
3179 | 3195 | ||
3180 | static ssize_t dtor_show(struct kmem_cache *s, char *buf) | ||
3181 | { | ||
3182 | if (s->dtor) { | ||
3183 | int n = sprint_symbol(buf, (unsigned long)s->dtor); | ||
3184 | |||
3185 | return n + sprintf(buf + n, "\n"); | ||
3186 | } | ||
3187 | return 0; | ||
3188 | } | ||
3189 | SLAB_ATTR_RO(dtor); | ||
3190 | |||
3191 | static ssize_t aliases_show(struct kmem_cache *s, char *buf) | 3196 | static ssize_t aliases_show(struct kmem_cache *s, char *buf) |
3192 | { | 3197 | { |
3193 | return sprintf(buf, "%d\n", s->refcount - 1); | 3198 | return sprintf(buf, "%d\n", s->refcount - 1); |
@@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = { | |||
3419 | &partial_attr.attr, | 3424 | &partial_attr.attr, |
3420 | &cpu_slabs_attr.attr, | 3425 | &cpu_slabs_attr.attr, |
3421 | &ctor_attr.attr, | 3426 | &ctor_attr.attr, |
3422 | &dtor_attr.attr, | ||
3423 | &aliases_attr.attr, | 3427 | &aliases_attr.attr, |
3424 | &align_attr.attr, | 3428 | &align_attr.attr, |
3425 | &sanity_checks_attr.attr, | 3429 | &sanity_checks_attr.attr, |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index faa2a521dea..d3a9c536825 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr) | |||
311 | return v; | 311 | return v; |
312 | } | 312 | } |
313 | 313 | ||
314 | void __vunmap(void *addr, int deallocate_pages) | 314 | static void __vunmap(void *addr, int deallocate_pages) |
315 | { | 315 | { |
316 | struct vm_struct *area; | 316 | struct vm_struct *area; |
317 | 317 | ||
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index bfc9a35bad3..1dae3dfc66a 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -665,7 +665,8 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, | |||
665 | /* Detach sockets from device */ | 665 | /* Detach sockets from device */ |
666 | read_lock(&hci_sk_list.lock); | 666 | read_lock(&hci_sk_list.lock); |
667 | sk_for_each(sk, node, &hci_sk_list.head) { | 667 | sk_for_each(sk, node, &hci_sk_list.head) { |
668 | lock_sock(sk); | 668 | local_bh_disable(); |
669 | bh_lock_sock_nested(sk); | ||
669 | if (hci_pi(sk)->hdev == hdev) { | 670 | if (hci_pi(sk)->hdev == hdev) { |
670 | hci_pi(sk)->hdev = NULL; | 671 | hci_pi(sk)->hdev = NULL; |
671 | sk->sk_err = EPIPE; | 672 | sk->sk_err = EPIPE; |
@@ -674,7 +675,8 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, | |||
674 | 675 | ||
675 | hci_dev_put(hdev); | 676 | hci_dev_put(hdev); |
676 | } | 677 | } |
677 | release_sock(sk); | 678 | bh_unlock_sock(sk); |
679 | local_bh_enable(); | ||
678 | } | 680 | } |
679 | read_unlock(&hci_sk_list.lock); | 681 | read_unlock(&hci_sk_list.lock); |
680 | } | 682 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 8301e2ac747..f2b61111e26 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -116,6 +116,7 @@ | |||
116 | #include <linux/dmaengine.h> | 116 | #include <linux/dmaengine.h> |
117 | #include <linux/err.h> | 117 | #include <linux/err.h> |
118 | #include <linux/ctype.h> | 118 | #include <linux/ctype.h> |
119 | #include <linux/if_arp.h> | ||
119 | 120 | ||
120 | /* | 121 | /* |
121 | * The list of packet types we will receive (as opposed to discard) | 122 | * The list of packet types we will receive (as opposed to discard) |
@@ -217,6 +218,73 @@ extern void netdev_unregister_sysfs(struct net_device *); | |||
217 | #define netdev_unregister_sysfs(dev) do { } while(0) | 218 | #define netdev_unregister_sysfs(dev) do { } while(0) |
218 | #endif | 219 | #endif |
219 | 220 | ||
221 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
222 | /* | ||
223 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class | ||
224 | * according to dev->type | ||
225 | */ | ||
226 | static const unsigned short netdev_lock_type[] = | ||
227 | {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, | ||
228 | ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, | ||
229 | ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, | ||
230 | ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, | ||
231 | ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, | ||
232 | ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, | ||
233 | ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, | ||
234 | ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, | ||
235 | ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, | ||
236 | ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, | ||
237 | ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, | ||
238 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | ||
239 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | ||
240 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID, | ||
241 | ARPHRD_NONE}; | ||
242 | |||
243 | static const char *netdev_lock_name[] = | ||
244 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | ||
245 | "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", | ||
246 | "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", | ||
247 | "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", | ||
248 | "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", | ||
249 | "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", | ||
250 | "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", | ||
251 | "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", | ||
252 | "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", | ||
253 | "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", | ||
254 | "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", | ||
255 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | ||
256 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | ||
257 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID", | ||
258 | "_xmit_NONE"}; | ||
259 | |||
260 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | ||
261 | |||
262 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) | ||
263 | { | ||
264 | int i; | ||
265 | |||
266 | for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) | ||
267 | if (netdev_lock_type[i] == dev_type) | ||
268 | return i; | ||
269 | /* the last key is used by default */ | ||
270 | return ARRAY_SIZE(netdev_lock_type) - 1; | ||
271 | } | ||
272 | |||
273 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | ||
274 | unsigned short dev_type) | ||
275 | { | ||
276 | int i; | ||
277 | |||
278 | i = netdev_lock_pos(dev_type); | ||
279 | lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], | ||
280 | netdev_lock_name[i]); | ||
281 | } | ||
282 | #else | ||
283 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | ||
284 | unsigned short dev_type) | ||
285 | { | ||
286 | } | ||
287 | #endif | ||
220 | 288 | ||
221 | /******************************************************************************* | 289 | /******************************************************************************* |
222 | 290 | ||
@@ -3001,6 +3069,7 @@ int register_netdevice(struct net_device *dev) | |||
3001 | 3069 | ||
3002 | spin_lock_init(&dev->queue_lock); | 3070 | spin_lock_init(&dev->queue_lock); |
3003 | spin_lock_init(&dev->_xmit_lock); | 3071 | spin_lock_init(&dev->_xmit_lock); |
3072 | netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); | ||
3004 | dev->xmit_lock_owner = -1; | 3073 | dev->xmit_lock_owner = -1; |
3005 | spin_lock_init(&dev->ingress_lock); | 3074 | spin_lock_init(&dev->ingress_lock); |
3006 | 3075 | ||
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index c68196cc56a..010fbb2d45e 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -43,11 +43,11 @@ config IP_ADVANCED_ROUTER | |||
43 | asymmetric routing (packets from you to a host take a different path | 43 | asymmetric routing (packets from you to a host take a different path |
44 | than packets from that host to you) or if you operate a non-routing | 44 | than packets from that host to you) or if you operate a non-routing |
45 | host which has several IP addresses on different interfaces. To turn | 45 | host which has several IP addresses on different interfaces. To turn |
46 | rp_filter off use: | 46 | rp_filter on use: |
47 | 47 | ||
48 | echo 0 > /proc/sys/net/ipv4/conf/<device>/rp_filter | 48 | echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter |
49 | or | 49 | or |
50 | echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter | 50 | echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter |
51 | 51 | ||
52 | If unsure, say N here. | 52 | If unsure, say N here. |
53 | 53 | ||
@@ -577,6 +577,7 @@ config TCP_CONG_VENO | |||
577 | config TCP_CONG_YEAH | 577 | config TCP_CONG_YEAH |
578 | tristate "YeAH TCP" | 578 | tristate "YeAH TCP" |
579 | depends on EXPERIMENTAL | 579 | depends on EXPERIMENTAL |
580 | select TCP_CONG_VEGAS | ||
580 | default n | 581 | default n |
581 | ---help--- | 582 | ---help--- |
582 | YeAH-TCP is a sender-side high-speed enabled TCP congestion control | 583 | YeAH-TCP is a sender-side high-speed enabled TCP congestion control |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cb76e3c725a..df9fe4f2e8c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2396,7 +2396,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) | |||
2396 | 2396 | ||
2397 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ | 2397 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ |
2398 | dev_out = ip_dev_find(oldflp->fl4_src); | 2398 | dev_out = ip_dev_find(oldflp->fl4_src); |
2399 | if ((dev_out == NULL) && !(sysctl_ip_nonlocal_bind)) | 2399 | if (dev_out == NULL) |
2400 | goto out; | 2400 | goto out; |
2401 | 2401 | ||
2402 | /* I removed check for oif == dev_out->oif here. | 2402 | /* I removed check for oif == dev_out->oif here. |
@@ -2407,7 +2407,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) | |||
2407 | of another iface. --ANK | 2407 | of another iface. --ANK |
2408 | */ | 2408 | */ |
2409 | 2409 | ||
2410 | if (dev_out && oldflp->oif == 0 | 2410 | if (oldflp->oif == 0 |
2411 | && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) { | 2411 | && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) { |
2412 | /* Special hack: user can direct multicasts | 2412 | /* Special hack: user can direct multicasts |
2413 | and limited broadcast via necessary interface | 2413 | and limited broadcast via necessary interface |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 86b26539e54..1260e52ad77 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -276,30 +276,34 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) | |||
276 | 276 | ||
277 | 277 | ||
278 | /* | 278 | /* |
279 | * Slow start (exponential increase) with | 279 | * Slow start is used when congestion window is less than slow start |
280 | * RFC3742 Limited Slow Start (fast linear increase) support. | 280 | * threshold. This version implements the basic RFC2581 version |
281 | * and optionally supports: | ||
282 | * RFC3742 Limited Slow Start - growth limited to max_ssthresh | ||
283 | * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged | ||
281 | */ | 284 | */ |
282 | void tcp_slow_start(struct tcp_sock *tp) | 285 | void tcp_slow_start(struct tcp_sock *tp) |
283 | { | 286 | { |
284 | int cnt = 0; | 287 | int cnt; /* increase in packets */ |
285 | 288 | ||
286 | if (sysctl_tcp_abc) { | 289 | /* RFC3465: ABC Slow start |
287 | /* RFC3465: Slow Start | 290 | * Increase only after a full MSS of bytes is acked |
288 | * TCP sender SHOULD increase cwnd by the number of | 291 | * |
289 | * previously unacknowledged bytes ACKed by each incoming | 292 | * TCP sender SHOULD increase cwnd by the number of |
290 | * acknowledgment, provided the increase is not more than L | 293 | * previously unacknowledged bytes ACKed by each incoming |
291 | */ | 294 | * acknowledgment, provided the increase is not more than L |
292 | if (tp->bytes_acked < tp->mss_cache) | 295 | */ |
293 | return; | 296 | if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) |
294 | } | 297 | return; |
295 | 298 | ||
296 | if (sysctl_tcp_max_ssthresh > 0 && | 299 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) |
297 | tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 300 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ |
298 | cnt += sysctl_tcp_max_ssthresh>>1; | ||
299 | else | 301 | else |
300 | cnt += tp->snd_cwnd; | 302 | cnt = tp->snd_cwnd; /* exponential increase */ |
301 | 303 | ||
302 | /* RFC3465: We MAY increase by 2 if discovered delayed ack */ | 304 | /* RFC3465: ABC |
305 | * We MAY increase by 2 if discovered delayed ack | ||
306 | */ | ||
303 | if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) | 307 | if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) |
304 | cnt <<= 1; | 308 | cnt <<= 1; |
305 | tp->bytes_acked = 0; | 309 | tp->bytes_acked = 0; |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 15419dd682f..8400525177a 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -87,7 +87,7 @@ extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, | |||
87 | unsigned char *node); | 87 | unsigned char *node); |
88 | extern void ipxrtr_del_routes(struct ipx_interface *intrfc); | 88 | extern void ipxrtr_del_routes(struct ipx_interface *intrfc); |
89 | extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, | 89 | extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, |
90 | struct iovec *iov, int len, int noblock); | 90 | struct iovec *iov, size_t len, int noblock); |
91 | extern int ipxrtr_route_skb(struct sk_buff *skb); | 91 | extern int ipxrtr_route_skb(struct sk_buff *skb); |
92 | extern struct ipx_route *ipxrtr_lookup(__be32 net); | 92 | extern struct ipx_route *ipxrtr_lookup(__be32 net); |
93 | extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); | 93 | extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); |
diff --git a/net/socket.c b/net/socket.c index 98a8f67abbf..f4530196a70 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -261,8 +261,7 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) | |||
261 | { | 261 | { |
262 | struct socket_alloc *ei = (struct socket_alloc *)foo; | 262 | struct socket_alloc *ei = (struct socket_alloc *)foo; |
263 | 263 | ||
264 | if (flags & SLAB_CTOR_CONSTRUCTOR) | 264 | inode_init_once(&ei->vfs_inode); |
265 | inode_init_once(&ei->vfs_inode); | ||
266 | } | 265 | } |
267 | 266 | ||
268 | static int init_inodecache(void) | 267 | static int init_inodecache(void) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index a2f1893bde5..5887457dc93 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -828,19 +828,17 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) | |||
828 | { | 828 | { |
829 | struct rpc_inode *rpci = (struct rpc_inode *) foo; | 829 | struct rpc_inode *rpci = (struct rpc_inode *) foo; |
830 | 830 | ||
831 | if (flags & SLAB_CTOR_CONSTRUCTOR) { | 831 | inode_init_once(&rpci->vfs_inode); |
832 | inode_init_once(&rpci->vfs_inode); | 832 | rpci->private = NULL; |
833 | rpci->private = NULL; | 833 | rpci->nreaders = 0; |
834 | rpci->nreaders = 0; | 834 | rpci->nwriters = 0; |
835 | rpci->nwriters = 0; | 835 | INIT_LIST_HEAD(&rpci->in_upcall); |
836 | INIT_LIST_HEAD(&rpci->in_upcall); | 836 | INIT_LIST_HEAD(&rpci->pipe); |
837 | INIT_LIST_HEAD(&rpci->pipe); | 837 | rpci->pipelen = 0; |
838 | rpci->pipelen = 0; | 838 | init_waitqueue_head(&rpci->waitq); |
839 | init_waitqueue_head(&rpci->waitq); | 839 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
840 | INIT_DELAYED_WORK(&rpci->queue_timeout, | 840 | rpc_timeout_upcall_queue); |
841 | rpc_timeout_upcall_queue); | 841 | rpci->ops = NULL; |
842 | rpci->ops = NULL; | ||
843 | } | ||
844 | } | 842 | } |
845 | 843 | ||
846 | int register_rpc_pipefs(void) | 844 | int register_rpc_pipefs(void) |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b011eb625e4..944d75396fb 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -989,8 +989,6 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) | |||
989 | spin_unlock(&rpc_sched_lock); | 989 | spin_unlock(&rpc_sched_lock); |
990 | } | 990 | } |
991 | 991 | ||
992 | static DECLARE_MUTEX_LOCKED(rpciod_running); | ||
993 | |||
994 | static void rpciod_killall(void) | 992 | static void rpciod_killall(void) |
995 | { | 993 | { |
996 | unsigned long flags; | 994 | unsigned long flags; |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 0d35bc796d0..73075dec83c 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -134,11 +134,7 @@ EXPORT_SYMBOL(nfsd_debug); | |||
134 | EXPORT_SYMBOL(nlm_debug); | 134 | EXPORT_SYMBOL(nlm_debug); |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | extern int register_rpc_pipefs(void); | ||
138 | extern void unregister_rpc_pipefs(void); | ||
139 | extern struct cache_detail ip_map_cache, unix_gid_cache; | 137 | extern struct cache_detail ip_map_cache, unix_gid_cache; |
140 | extern int init_socket_xprt(void); | ||
141 | extern void cleanup_socket_xprt(void); | ||
142 | 138 | ||
143 | static int __init | 139 | static int __init |
144 | init_sunrpc(void) | 140 | init_sunrpc(void) |