aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-04-18 08:53:18 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-18 08:53:33 -0400
commit6ddafdaab3f809b110ada253d2f2d4910ebd3ac5 (patch)
tree366bb7513511a05b6e11ab89bfe3b2dbd1d62a03
parent3905c54f2bd2c6f937f87307987ca072eabc3e7b (diff)
parentbd8e7dded88a3e1c085c333f19ff31387616f71a (diff)
Merge branch 'sched/locking' into sched/core
Merge reason: the rq locking changes are stable, propagate them into the .40 queue. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--Documentation/feature-removal-schedule.txt20
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/avr32/include/asm/setup.h9
-rw-r--r--arch/avr32/kernel/setup.c15
-rw-r--r--arch/avr32/kernel/traps.c22
-rw-r--r--arch/avr32/mach-at32ap/clock.c24
-rw-r--r--arch/avr32/mach-at32ap/extint.c22
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S2
-rw-r--r--arch/blackfin/mach-common/smp.c3
-rw-r--r--arch/cris/arch-v32/kernel/smp.c13
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/xen/irq_xen.c10
-rw-r--r--arch/m32r/kernel/smp.c4
-rw-r--r--arch/m68k/include/asm/unistd.h6
-rw-r--r--arch/m68k/kernel/entry_mm.S4
-rw-r--r--arch/m68k/kernel/syscalltable.S4
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c7
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/mn10300/kernel/smp.c5
-rw-r--r--arch/parisc/kernel/smp.c5
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sparc/kernel/smp_32.c4
-rw-r--r--arch/sparc/kernel/smp_64.c1
-rw-r--r--arch/tile/kernel/smp.c6
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smp.c5
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten.c21
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/smp.c5
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pca953x.c5
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c24
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c17
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/stub/Kconfig1
-rw-r--r--drivers/mfd/mfd-core.c16
-rw-r--r--drivers/net/benet/be.h4
-rw-r--r--drivers/net/benet/be_main.c19
-rw-r--r--drivers/net/bna/bfa_ioc.c10
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/mlx4/main.c5
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/sense.c4
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/smsc911x.c8
-rw-r--r--drivers/net/usb/smsc95xx.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/dma.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c20
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h1
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c43
-rw-r--r--drivers/platform/x86/samsung-laptop.c17
-rw-r--r--drivers/platform/x86/sony-laptop.c65
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/dw_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/Kconfig10
-rw-r--r--drivers/staging/samsung-laptop/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/TODO5
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c843
-rw-r--r--drivers/xen/events.c6
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--fs/cifs/README16
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c43
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifs_unicode.c35
-rw-r--r--fs/cifs/cifs_unicode.h2
-rw-r--r--fs/cifs/cifsencrypt.c21
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsglob.h13
-rw-r--r--fs/cifs/cifssmb.c14
-rw-r--r--fs/cifs/connect.c68
-rw-r--r--fs/cifs/file.c70
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c3
-rw-r--r--fs/cifs/sess.c23
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/fsync.c17
-rw-r--r--fs/ext4/inode.c35
-rw-r--r--fs/ext4/super.c74
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/namespace.c16
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs4state.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_message.h24
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c129
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c228
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/quota/xfs_qm.h5
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c2
-rw-r--r--fs/xfs/xfs_alloc.c30
-rw-r--r--fs/xfs/xfs_inode_item.c67
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--fs/xfs/xfs_log_priv.h1
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_trans_ail.c421
-rw-r--r--fs/xfs/xfs_trans_priv.h22
-rw-r--r--include/linux/can/platform/mcp251x.h2
-rw-r--r--include/linux/mfd/core.h13
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h3
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/sched.h24
-rw-r--r--include/linux/suspend.h11
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/route.h5
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/mutex-debug.c2
-rw-r--r--kernel/mutex-debug.h2
-rw-r--r--kernel/mutex.c2
-rw-r--r--kernel/mutex.h2
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/sched.c650
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c23
-rw-r--r--kernel/sched_features.h6
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c54
-rw-r--r--kernel/sched_stoptask.c5
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mlock.c13
-rw-r--r--mm/mmap.c13
-rw-r--r--net/dsa/mv88e6131.c23
-rw-r--r--net/dsa/mv88e6xxx.h2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/netfilter.c13
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c109
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c42
-rw-r--r--net/netfilter/xt_conntrack.c2
216 files changed, 2080 insertions, 2462 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 274b32d12532..492e81df2968 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org>
387 387
388---------------------------- 388----------------------------
389 389
390What: Support for lcd_switch and display_get in asus-laptop driver
391When: March 2010
392Why: These two features use non-standard interfaces. There are the
393 only features that really need multiple path to guess what's
394 the right method name on a specific laptop.
395
396 Removing them will allow to remove a lot of code an significantly
397 clean the drivers.
398
399 This will affect the backlight code which won't be able to know
400 if the backlight is on or off. The platform display file will also be
401 write only (like the one in eeepc-laptop).
402
403 This should'nt affect a lot of user because they usually know
404 when their display is on or off.
405
406Who: Corentin Chary <corentin.chary@gmail.com>
407
408----------------------------
409
410What: sysfs-class-rfkill state file 390What: sysfs-class-rfkill state file
411When: Feb 2014 391When: Feb 2014
412Files: net/rfkill/core.c 392Files: net/rfkill/core.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 6b4b9cdec370..649600cb8ec9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6916,6 +6916,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
6916S: Maintained 6916S: Maintained
6917F: drivers/platform/x86 6917F: drivers/platform/x86
6918 6918
6919XEN NETWORK BACKEND DRIVER
6920M: Ian Campbell <ian.campbell@citrix.com>
6921L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6922L: netdev@vger.kernel.org
6923S: Supported
6924F: drivers/net/xen-netback/*
6925
6919XEN PCI SUBSYSTEM 6926XEN PCI SUBSYSTEM
6920M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6927M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6921L: xen-devel@lists.xensource.com (moderated for non-subscribers) 6928L: xen-devel@lists.xensource.com (moderated for non-subscribers)
diff --git a/Makefile b/Makefile
index 8392b64079df..322e7334ccb9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 39 3SUBLEVEL = 39
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 42aa078a5e4d..5a621c6d22ab 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs)
585 585
586 switch (which) { 586 switch (which) {
587 case IPI_RESCHEDULE: 587 case IPI_RESCHEDULE:
588 /* Reschedule callback. Everything to be done 588 scheduler_ipi();
589 is done by the interrupt return path. */
590 break; 589 break;
591 590
592 case IPI_CALL_FUNC: 591 case IPI_CALL_FUNC:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8fe05ad932e4..7a561eb731ea 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
560 break; 560 break;
561 561
562 case IPI_RESCHEDULE: 562 case IPI_RESCHEDULE:
563 /* 563 scheduler_ipi();
564 * nothing more to do - eveything is
565 * done on the interrupt return path
566 */
567 break; 564 break;
568 565
569 case IPI_CALL_FUNC: 566 case IPI_CALL_FUNC:
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h
index ff5b7cf6be4d..160543dbec7e 100644
--- a/arch/avr32/include/asm/setup.h
+++ b/arch/avr32/include/asm/setup.h
@@ -94,6 +94,13 @@ struct tag_ethernet {
94 94
95#define ETH_INVALID_PHY 0xff 95#define ETH_INVALID_PHY 0xff
96 96
97/* board information */
98#define ATAG_BOARDINFO 0x54410008
99
100struct tag_boardinfo {
101 u32 board_number;
102};
103
97struct tag { 104struct tag {
98 struct tag_header hdr; 105 struct tag_header hdr;
99 union { 106 union {
@@ -102,6 +109,7 @@ struct tag {
102 struct tag_cmdline cmdline; 109 struct tag_cmdline cmdline;
103 struct tag_clock clock; 110 struct tag_clock clock;
104 struct tag_ethernet ethernet; 111 struct tag_ethernet ethernet;
112 struct tag_boardinfo boardinfo;
105 } u; 113 } u;
106}; 114};
107 115
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
128 136
129extern resource_size_t fbmem_start; 137extern resource_size_t fbmem_start;
130extern resource_size_t fbmem_size; 138extern resource_size_t fbmem_size;
139extern u32 board_number;
131 140
132void setup_processor(void); 141void setup_processor(void);
133 142
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 5c7083916c33..bb0974cce4ac 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag)
391__tagtable(ATAG_CLOCK, parse_tag_clock); 391__tagtable(ATAG_CLOCK, parse_tag_clock);
392 392
393/* 393/*
394 * The board_number correspond to the bd->bi_board_number in U-Boot. This
395 * parameter is only available during initialisation and can be used in some
396 * kind of board identification.
397 */
398u32 __initdata board_number;
399
400static int __init parse_tag_boardinfo(struct tag *tag)
401{
402 board_number = tag->u.boardinfo.board_number;
403
404 return 0;
405}
406__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
407
408/*
394 * Scan the tag table for this tag, and call its parse function. The 409 * Scan the tag table for this tag, and call its parse function. The
395 * tag table is built by the linker from all the __tagtable 410 * tag table is built by the linker from all the __tagtable
396 * declarations. 411 * declarations.
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index b91b2044af9c..7aa25756412f 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
95 info.si_code = code; 95 info.si_code = code;
96 info.si_addr = (void __user *)addr; 96 info.si_addr = (void __user *)addr;
97 force_sig_info(signr, &info, current); 97 force_sig_info(signr, &info, current);
98
99 /*
100 * Init gets no signals that it doesn't have a handler for.
101 * That's all very well, but if it has caused a synchronous
102 * exception and we ignore the resulting signal, it will just
103 * generate the same exception over and over again and we get
104 * nowhere. Better to kill it and let the kernel panic.
105 */
106 if (is_global_init(current)) {
107 __sighandler_t handler;
108
109 spin_lock_irq(&current->sighand->siglock);
110 handler = current->sighand->action[signr-1].sa.sa_handler;
111 spin_unlock_irq(&current->sighand->siglock);
112 if (handler == SIG_DFL) {
113 /* init has generated a synchronous exception
114 and it doesn't have a handler for the signal */
115 printk(KERN_CRIT "init has generated signal %ld "
116 "but has no handler for it\n", signr);
117 do_exit(signr);
118 }
119 }
120} 98}
121 99
122asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) 100asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 442f08c5e641..86925fd6ea5b 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
35 spin_unlock(&clk_list_lock); 35 spin_unlock(&clk_list_lock);
36} 36}
37 37
38struct clk *clk_get(struct device *dev, const char *id) 38static struct clk *__clk_get(struct device *dev, const char *id)
39{ 39{
40 struct clk *clk; 40 struct clk *clk;
41 41
42 spin_lock(&clk_list_lock);
43
44 list_for_each_entry(clk, &at32_clock_list, list) { 42 list_for_each_entry(clk, &at32_clock_list, list) {
45 if (clk->dev == dev && strcmp(id, clk->name) == 0) { 43 if (clk->dev == dev && strcmp(id, clk->name) == 0) {
46 spin_unlock(&clk_list_lock);
47 return clk; 44 return clk;
48 } 45 }
49 } 46 }
50 47
51 spin_unlock(&clk_list_lock);
52 return ERR_PTR(-ENOENT); 48 return ERR_PTR(-ENOENT);
53} 49}
50
51struct clk *clk_get(struct device *dev, const char *id)
52{
53 struct clk *clk;
54
55 spin_lock(&clk_list_lock);
56 clk = __clk_get(dev, id);
57 spin_unlock(&clk_list_lock);
58
59 return clk;
60}
61
54EXPORT_SYMBOL(clk_get); 62EXPORT_SYMBOL(clk_get);
55 63
56void clk_put(struct clk *clk) 64void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
257 spin_lock(&clk_list_lock); 265 spin_lock(&clk_list_lock);
258 266
259 /* show clock tree as derived from the three oscillators */ 267 /* show clock tree as derived from the three oscillators */
260 clk = clk_get(NULL, "osc32k"); 268 clk = __clk_get(NULL, "osc32k");
261 dump_clock(clk, &r); 269 dump_clock(clk, &r);
262 clk_put(clk); 270 clk_put(clk);
263 271
264 clk = clk_get(NULL, "osc0"); 272 clk = __clk_get(NULL, "osc0");
265 dump_clock(clk, &r); 273 dump_clock(clk, &r);
266 clk_put(clk); 274 clk_put(clk);
267 275
268 clk = clk_get(NULL, "osc1"); 276 clk = __clk_get(NULL, "osc1");
269 dump_clock(clk, &r); 277 dump_clock(clk, &r);
270 clk_put(clk); 278 clk_put(clk);
271 279
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 47ba4b9b6db1..fbc2aeaebddb 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -61,34 +61,34 @@ struct eic {
61static struct eic *nmi_eic; 61static struct eic *nmi_eic;
62static bool nmi_enabled; 62static bool nmi_enabled;
63 63
64static void eic_ack_irq(struct irq_chip *d) 64static void eic_ack_irq(struct irq_data *d)
65{ 65{
66 struct eic *eic = irq_data_get_irq_chip_data(data); 66 struct eic *eic = irq_data_get_irq_chip_data(d);
67 eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); 67 eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
68} 68}
69 69
70static void eic_mask_irq(struct irq_chip *d) 70static void eic_mask_irq(struct irq_data *d)
71{ 71{
72 struct eic *eic = irq_data_get_irq_chip_data(data); 72 struct eic *eic = irq_data_get_irq_chip_data(d);
73 eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); 73 eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
74} 74}
75 75
76static void eic_mask_ack_irq(struct irq_chip *d) 76static void eic_mask_ack_irq(struct irq_data *d)
77{ 77{
78 struct eic *eic = irq_data_get_irq_chip_data(data); 78 struct eic *eic = irq_data_get_irq_chip_data(d);
79 eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); 79 eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
80 eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); 80 eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
81} 81}
82 82
83static void eic_unmask_irq(struct irq_chip *d) 83static void eic_unmask_irq(struct irq_data *d)
84{ 84{
85 struct eic *eic = irq_data_get_irq_chip_data(data); 85 struct eic *eic = irq_data_get_irq_chip_data(d);
86 eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); 86 eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
87} 87}
88 88
89static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) 89static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
90{ 90{
91 struct eic *eic = irq_data_get_irq_chip_data(data); 91 struct eic *eic = irq_data_get_irq_chip_data(d);
92 unsigned int irq = d->irq; 92 unsigned int irq = d->irq;
93 unsigned int i = irq - eic->first_irq; 93 unsigned int i = irq - eic->first_irq;
94 u32 mode, edge, level; 94 u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
191 191
192 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 192 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
193 int_irq = platform_get_irq(pdev, 0); 193 int_irq = platform_get_irq(pdev, 0);
194 if (!regs || !int_irq) { 194 if (!regs || (int)int_irq <= 0) {
195 dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); 195 dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
196 return -ENXIO; 196 return -ENXIO;
197 } 197 }
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index f308e1ddc629..2e0aa853a4bc 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
257 pio_writel(pio, IDR, 1 << (gpio & 0x1f)); 257 pio_writel(pio, IDR, 1 << (gpio & 0x1f));
258} 258}
259 259
260static void gpio_irq_unmask(struct irq_data *d)) 260static void gpio_irq_unmask(struct irq_data *d)
261{ 261{
262 unsigned gpio = irq_to_gpio(d->irq); 262 unsigned gpio = irq_to_gpio(d->irq);
263 struct pio_device *pio = &pio_dev[gpio >> 5]; 263 struct pio_device *pio = &pio_dev[gpio >> 5];
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index 17503b0ed6c9..f868f4ce761b 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -53,7 +53,7 @@ cpu_enter_idle:
53 st.w r8[TI_flags], r9 53 st.w r8[TI_flags], r9
54 unmask_interrupts 54 unmask_interrupts
55 sleep CPU_SLEEP_IDLE 55 sleep CPU_SLEEP_IDLE
56 .size cpu_idle_sleep, . - cpu_idle_sleep 56 .size cpu_enter_idle, . - cpu_enter_idle
57 57
58 /* 58 /*
59 * Common return path for PM functions that don't run from 59 * Common return path for PM functions that don't run from
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 6e17a265c4d3..326bb86f4d29 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -164,6 +164,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
164 while (msg_queue->count) { 164 while (msg_queue->count) {
165 msg = &msg_queue->ipi_message[msg_queue->head]; 165 msg = &msg_queue->ipi_message[msg_queue->head];
166 switch (msg->type) { 166 switch (msg->type) {
167 case BFIN_IPI_RESCHEDULE:
168 scheduler_ipi();
169 break;
167 case BFIN_IPI_CALL_FUNC: 170 case BFIN_IPI_CALL_FUNC:
168 spin_unlock_irqrestore(&msg_queue->lock, flags); 171 spin_unlock_irqrestore(&msg_queue->lock, flags);
169 ipi_call_function(cpu, msg); 172 ipi_call_function(cpu, msg);
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 4c9e3e1ba5d1..66cc75657e2f 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
342 342
343 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); 343 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
344 344
345 if (ipi.vector & IPI_SCHEDULE) {
346 scheduler_ipi();
347 }
345 if (ipi.vector & IPI_CALL) { 348 if (ipi.vector & IPI_CALL) {
346 func(info); 349 func(info);
347 } 350 }
348 if (ipi.vector & IPI_FLUSH_TLB) { 351 if (ipi.vector & IPI_FLUSH_TLB) {
349 if (flush_mm == FLUSH_ALL) 352 if (flush_mm == FLUSH_ALL)
350 __flush_tlb_all(); 353 __flush_tlb_all();
351 else if (flush_vma == FLUSH_ALL) 354 else if (flush_vma == FLUSH_ALL)
352 __flush_tlb_mm(flush_mm); 355 __flush_tlb_mm(flush_mm);
353 else 356 else
354 __flush_tlb_page(flush_vma, flush_addr); 357 __flush_tlb_page(flush_vma, flush_addr);
355 } 358 }
356 359
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5b704740f160..782c3a357f24 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -31,6 +31,7 @@
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/acpi.h> 33#include <linux/acpi.h>
34#include <linux/sched.h>
34 35
35#include <asm/delay.h> 36#include <asm/delay.h>
36#include <asm/intrinsics.h> 37#include <asm/intrinsics.h>
@@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
496 smp_local_flush_tlb(); 497 smp_local_flush_tlb();
497 kstat_incr_irqs_this_cpu(irq, desc); 498 kstat_incr_irqs_this_cpu(irq, desc);
498 } else if (unlikely(IS_RESCHEDULE(vector))) { 499 } else if (unlikely(IS_RESCHEDULE(vector))) {
500 scheduler_ipi();
499 kstat_incr_irqs_this_cpu(irq, desc); 501 kstat_incr_irqs_this_cpu(irq, desc);
500 } else { 502 } else {
501 ia64_setreg(_IA64_REG_CR_TPR, vector); 503 ia64_setreg(_IA64_REG_CR_TPR, vector);
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index 108bb858acf2..b279e142c633 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt;
92static int xen_slab_ready; 92static int xen_slab_ready;
93 93
94#ifdef CONFIG_SMP 94#ifdef CONFIG_SMP
95#include <linux/sched.h>
96
95/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, 97/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
96 * it ends up to issue several memory accesses upon percpu data and 98 * it ends up to issue several memory accesses upon percpu data and
97 * thus adds unnecessary traffic to other paths. 99 * thus adds unnecessary traffic to other paths.
@@ -99,7 +101,13 @@ static int xen_slab_ready;
99static irqreturn_t 101static irqreturn_t
100xen_dummy_handler(int irq, void *dev_id) 102xen_dummy_handler(int irq, void *dev_id)
101{ 103{
104 return IRQ_HANDLED;
105}
102 106
107static irqreturn_t
108xen_resched_handler(int irq, void *dev_id)
109{
110 scheduler_ipi();
103 return IRQ_HANDLED; 111 return IRQ_HANDLED;
104} 112}
105 113
@@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = {
110}; 118};
111 119
112static struct irqaction xen_resched_irqaction = { 120static struct irqaction xen_resched_irqaction = {
113 .handler = xen_dummy_handler, 121 .handler = xen_resched_handler,
114 .flags = IRQF_DISABLED, 122 .flags = IRQF_DISABLED,
115 .name = "resched" 123 .name = "resched"
116}; 124};
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 31cef20b2996..fc10b39893d4 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id)
122 * 122 *
123 * Description: This routine executes on CPU which received 123 * Description: This routine executes on CPU which received
124 * 'RESCHEDULE_IPI'. 124 * 'RESCHEDULE_IPI'.
125 * Rescheduling is processed at the exit of interrupt
126 * operation.
127 * 125 *
128 * Born on Date: 2002.02.05 126 * Born on Date: 2002.02.05
129 * 127 *
@@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id)
138 *==========================================================================*/ 136 *==========================================================================*/
139void smp_reschedule_interrupt(void) 137void smp_reschedule_interrupt(void)
140{ 138{
141 /* nothing to do */ 139 scheduler_ipi();
142} 140}
143 141
144/*==========================================================================* 142/*==========================================================================*
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 26d851d385bb..29e17907d9f2 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -343,10 +343,14 @@
343#define __NR_fanotify_init 337 343#define __NR_fanotify_init 337
344#define __NR_fanotify_mark 338 344#define __NR_fanotify_mark 338
345#define __NR_prlimit64 339 345#define __NR_prlimit64 339
346#define __NR_name_to_handle_at 340
347#define __NR_open_by_handle_at 341
348#define __NR_clock_adjtime 342
349#define __NR_syncfs 343
346 350
347#ifdef __KERNEL__ 351#ifdef __KERNEL__
348 352
349#define NR_syscalls 340 353#define NR_syscalls 344
350 354
351#define __ARCH_WANT_IPC_PARSE_VERSION 355#define __ARCH_WANT_IPC_PARSE_VERSION
352#define __ARCH_WANT_OLD_READDIR 356#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index 1559dea36e55..1359ee659574 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -750,4 +750,8 @@ sys_call_table:
750 .long sys_fanotify_init 750 .long sys_fanotify_init
751 .long sys_fanotify_mark 751 .long sys_fanotify_mark
752 .long sys_prlimit64 752 .long sys_prlimit64
753 .long sys_name_to_handle_at /* 340 */
754 .long sys_open_by_handle_at
755 .long sys_clock_adjtime
756 .long sys_syncfs
753 757
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 79b1ed198c07..9b8393d8adb8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
358 .long sys_fanotify_init 358 .long sys_fanotify_init
359 .long sys_fanotify_mark 359 .long sys_fanotify_mark
360 .long sys_prlimit64 360 .long sys_prlimit64
361 .long sys_name_to_handle_at /* 340 */
362 .long sys_open_by_handle_at
363 .long sys_clock_adjtime
364 .long sys_syncfs
361 365
362 .rept NR_syscalls-(.-sys_call_table)/4 366 .rept NR_syscalls-(.-sys_call_table)/4
363 .long sys_ni_syscall 367 .long sys_ni_syscall
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ba78b21cc8d0..76923eeb58b9 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -44,6 +44,8 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
44 44
45 if (action & SMP_CALL_FUNCTION) 45 if (action & SMP_CALL_FUNCTION)
46 smp_call_function_interrupt(); 46 smp_call_function_interrupt();
47 if (action & SMP_RESCHEDULE_YOURSELF)
48 scheduler_ipi();
47 49
48 /* Check if we've been told to flush the icache */ 50 /* Check if we've been told to flush the icache */
49 if (action & SMP_ICACHE_FLUSH) 51 if (action & SMP_ICACHE_FLUSH)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 5a88cc4ccd5a..cedac4633741 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
929 929
930static void ipi_resched_interrupt(void) 930static void ipi_resched_interrupt(void)
931{ 931{
932 /* Return from interrupt should be enough to cause scheduler check */ 932 scheduler_ipi();
933} 933}
934 934
935static void ipi_call_interrupt(void) 935static void ipi_call_interrupt(void)
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 9027061f0ead..7d93e6fbfa5a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -309,6 +309,8 @@ static void ipi_call_dispatch(void)
309 309
310static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 310static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
311{ 311{
312 scheduler_ipi();
313
312 return IRQ_HANDLED; 314 return IRQ_HANDLED;
313} 315}
314 316
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index efc9e889b349..2608752898c0 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -55,6 +55,8 @@ void titan_mailbox_irq(void)
55 55
56 if (status & 0x2) 56 if (status & 0x2)
57 smp_call_function_interrupt(); 57 smp_call_function_interrupt();
58 if (status & 0x4)
59 scheduler_ipi();
58 break; 60 break;
59 61
60 case 1: 62 case 1:
@@ -63,6 +65,8 @@ void titan_mailbox_irq(void)
63 65
64 if (status & 0x2) 66 if (status & 0x2)
65 smp_call_function_interrupt(); 67 smp_call_function_interrupt();
68 if (status & 0x4)
69 scheduler_ipi();
66 break; 70 break;
67 } 71 }
68} 72}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 0a04603d577c..b18b04e48577 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void)
147#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
148 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { 148 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
149 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); 149 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
150 scheduler_ipi();
150 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { 151 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
151 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); 152 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
153 scheduler_ipi();
152 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 154 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
153 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 155 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
154 smp_call_function_interrupt(); 156 smp_call_function_interrupt();
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 47b347c992ea..d667875be564 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
23#include <linux/sched.h>
23 24
24#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void)
189 /* Clear the mailbox to clear the interrupt */ 190 /* Clear the mailbox to clear the interrupt */
190 __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); 191 __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
191 192
192 /* 193 if (action & SMP_RESCHEDULE_YOURSELF)
193 * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the 194 scheduler_ipi();
194 * interrupt will do the reschedule for us
195 */
196 195
197 if (action & SMP_CALL_FUNCTION) 196 if (action & SMP_CALL_FUNCTION)
198 smp_call_function_interrupt(); 197 smp_call_function_interrupt();
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c00a5cb1128d..38e7f6bd7922 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/sched.h>
24 25
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void)
177 /* Clear the mailbox to clear the interrupt */ 178 /* Clear the mailbox to clear the interrupt */
178 ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); 179 ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
179 180
180 /* 181 if (action & SMP_RESCHEDULE_YOURSELF)
181 * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the 182 scheduler_ipi();
182 * interrupt will do the reschedule for us
183 */
184 183
185 if (action & SMP_CALL_FUNCTION) 184 if (action & SMP_CALL_FUNCTION)
186 smp_call_function_interrupt(); 185 smp_call_function_interrupt();
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 226c826a2194..83fb27912231 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -494,14 +494,11 @@ void smp_send_stop(void)
494 * @irq: The interrupt number. 494 * @irq: The interrupt number.
495 * @dev_id: The device ID. 495 * @dev_id: The device ID.
496 * 496 *
497 * We need do nothing here, since the scheduling will be effected on our way
498 * back through entry.S.
499 *
500 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. 497 * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
501 */ 498 */
502static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) 499static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
503{ 500{
504 /* do nothing */ 501 scheduler_ipi();
505 return IRQ_HANDLED; 502 return IRQ_HANDLED;
506} 503}
507 504
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 69d63d354ef0..828305f19cff 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
155 155
156 case IPI_RESCHEDULE: 156 case IPI_RESCHEDULE:
157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); 157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
158 /* 158 scheduler_ipi();
159 * Reschedule callback. Everything to be
160 * done is done by the interrupt return path.
161 */
162 break; 159 break;
163 160
164 case IPI_CALL_FUNC: 161 case IPI_CALL_FUNC:
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c00d4ca1ee15..28581f1ad2c0 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
527 527
528#endif /* !CONFIG_SUSPEND */ 528#endif /* !CONFIG_SUSPEND */
529 529
530#ifdef CONFIG_HIBERNATION 530#ifdef CONFIG_HIBERNATE_CALLBACKS
531 531
532static int ibmebus_bus_pm_freeze(struct device *dev) 532static int ibmebus_bus_pm_freeze(struct device *dev)
533{ 533{
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
665 return ret; 665 return ret;
666} 666}
667 667
668#else /* !CONFIG_HIBERNATION */ 668#else /* !CONFIG_HIBERNATE_CALLBACKS */
669 669
670#define ibmebus_bus_pm_freeze NULL 670#define ibmebus_bus_pm_freeze NULL
671#define ibmebus_bus_pm_thaw NULL 671#define ibmebus_bus_pm_thaw NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
676#define ibmebus_bus_pm_poweroff_noirq NULL 676#define ibmebus_bus_pm_poweroff_noirq NULL
677#define ibmebus_bus_pm_restore_noirq NULL 677#define ibmebus_bus_pm_restore_noirq NULL
678 678
679#endif /* !CONFIG_HIBERNATION */ 679#endif /* !CONFIG_HIBERNATE_CALLBACKS */
680 680
681static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { 681static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
682 .prepare = ibmebus_bus_pm_prepare, 682 .prepare = ibmebus_bus_pm_prepare,
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index cbdbb14be4b0..9f9c204bef69 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -116,7 +116,7 @@ void smp_message_recv(int msg)
116 generic_smp_call_function_interrupt(); 116 generic_smp_call_function_interrupt();
117 break; 117 break;
118 case PPC_MSG_RESCHEDULE: 118 case PPC_MSG_RESCHEDULE:
119 /* we notice need_resched on exit */ 119 scheduler_ipi();
120 break; 120 break;
121 case PPC_MSG_CALL_FUNC_SINGLE: 121 case PPC_MSG_CALL_FUNC_SINGLE:
122 generic_smp_call_function_single_interrupt(); 122 generic_smp_call_function_single_interrupt();
@@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data)
146 146
147static irqreturn_t reschedule_action(int irq, void *data) 147static irqreturn_t reschedule_action(int irq, void *data)
148{ 148{
149 /* we just need the return path side effect of checking need_resched */ 149 scheduler_ipi();
150 return IRQ_HANDLED; 150 return IRQ_HANDLED;
151} 151}
152 152
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 63a97db83f96..63c7d9ff220d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
165 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; 165 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
166 /* 166 /*
167 * handle bit signal external calls 167 * handle bit signal external calls
168 *
169 * For the ec_schedule signal we have to do nothing. All the work
170 * is done automatically when we return from the interrupt.
171 */ 168 */
172 bits = xchg(&S390_lowcore.ext_call_fast, 0); 169 bits = xchg(&S390_lowcore.ext_call_fast, 0);
173 170
171 if (test_bit(ec_schedule, &bits))
172 scheduler_ipi();
173
174 if (test_bit(ec_call_function, &bits)) 174 if (test_bit(ec_call_function, &bits))
175 generic_smp_call_function_interrupt(); 175 generic_smp_call_function_interrupt();
176 176
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 509b36b45115..6207561ea34a 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/sched.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/system.h> 26#include <asm/system.h>
@@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg)
323 generic_smp_call_function_interrupt(); 324 generic_smp_call_function_interrupt();
324 break; 325 break;
325 case SMP_MSG_RESCHEDULE: 326 case SMP_MSG_RESCHEDULE:
327 scheduler_ipi();
326 break; 328 break;
327 case SMP_MSG_FUNCTION_SINGLE: 329 case SMP_MSG_FUNCTION_SINGLE:
328 generic_smp_call_function_single_interrupt(); 330 generic_smp_call_function_single_interrupt();
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 91c10fb70858..f95690c167b6 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -125,7 +125,9 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
125 125
126void smp_send_reschedule(int cpu) 126void smp_send_reschedule(int cpu)
127{ 127{
128 /* See sparc64 */ 128 /*
129 * XXX missing reschedule IPI, see scheduler_ipi()
130 */
129} 131}
130 132
131void smp_send_stop(void) 133void smp_send_stop(void)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3e94a8c23238..9478da7fdb3e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu)
1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1369{ 1369{
1370 clear_softint(1 << irq); 1370 clear_softint(1 << irq);
1371 scheduler_ipi();
1371} 1372}
1372 1373
1373/* This is a nop because we capture all other cpus 1374/* This is a nop because we capture all other cpus
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index a4293102ef81..c52224d5ed45 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ 189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
190static irqreturn_t handle_reschedule_ipi(int irq, void *token) 190static irqreturn_t handle_reschedule_ipi(int irq, void *token)
191{ 191{
192 /*
193 * Nothing to do here; when we return from interrupt, the
194 * rescheduling will occur there. But do bump the interrupt
195 * profiler count in the meantime.
196 */
197 __get_cpu_var(irq_stat).irq_resched_count++; 192 __get_cpu_var(irq_stat).irq_resched_count++;
193 scheduler_ipi();
198 194
199 return IRQ_HANDLED; 195 return IRQ_HANDLED;
200} 196}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 106bf27e2a9a..eefb107d2d73 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -173,7 +173,7 @@ void IPI_handler(int cpu)
173 break; 173 break;
174 174
175 case 'R': 175 case 'R':
176 set_tsk_need_resched(current); 176 scheduler_ipi();
177 break; 177 break;
178 178
179 case 'S': 179 case 'S':
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 513deac7228d..013e7eba83bb 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait)
194} 194}
195 195
196/* 196/*
197 * Reschedule call back. Nothing to do, 197 * Reschedule call back.
198 * all the work is done automatically when
199 * we return from the interrupt.
200 */ 198 */
201void smp_reschedule_interrupt(struct pt_regs *regs) 199void smp_reschedule_interrupt(struct pt_regs *regs)
202{ 200{
203 ack_APIC_irq(); 201 ack_APIC_irq();
204 inc_irq_stat(irq_resched_count); 202 inc_irq_stat(irq_resched_count);
203 scheduler_ipi();
205 /* 204 /*
206 * KVM uses this interrupt to force a cpu out of guest mode 205 * KVM uses this interrupt to force a cpu out of guest mode
207 */ 206 */
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1c7121ba18ff..5cc821cb2e09 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
39config XEN_SAVE_RESTORE 39config XEN_SAVE_RESTORE
40 bool 40 bool
41 depends on XEN 41 depends on XEN
42 select HIBERNATE_CALLBACKS
42 default y 43 default y
43 44
44config XEN_DEBUG_FS 45config XEN_DEBUG_FS
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 49dbd78ec3cb..e3c6a06cf725 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
238static __init void xen_init_cpuid_mask(void) 238static __init void xen_init_cpuid_mask(void)
239{ 239{
240 unsigned int ax, bx, cx, dx; 240 unsigned int ax, bx, cx, dx;
241 unsigned int xsave_mask;
241 242
242 cpuid_leaf1_edx_mask = 243 cpuid_leaf1_edx_mask =
243 ~((1 << X86_FEATURE_MCE) | /* disable MCE */ 244 ~((1 << X86_FEATURE_MCE) | /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
249 cpuid_leaf1_edx_mask &= 250 cpuid_leaf1_edx_mask &=
250 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ 251 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
251 (1 << X86_FEATURE_ACPI)); /* disable ACPI */ 252 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
252
253 ax = 1; 253 ax = 1;
254 cx = 0;
255 xen_cpuid(&ax, &bx, &cx, &dx); 254 xen_cpuid(&ax, &bx, &cx, &dx);
256 255
257 /* cpuid claims we support xsave; try enabling it to see what happens */ 256 xsave_mask =
258 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { 257 (1 << (X86_FEATURE_XSAVE % 32)) |
259 unsigned long cr4; 258 (1 << (X86_FEATURE_OSXSAVE % 32));
260
261 set_in_cr4(X86_CR4_OSXSAVE);
262
263 cr4 = read_cr4();
264 259
265 if ((cr4 & X86_CR4_OSXSAVE) == 0) 260 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
266 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); 261 if ((cx & xsave_mask) != xsave_mask)
267 262 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
268 clear_in_cr4(X86_CR4_OSXSAVE);
269 }
270} 263}
271 264
272static void xen_set_debugreg(int reg, unsigned long val) 265static void xen_set_debugreg(int reg, unsigned long val)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c82df6c9c0f0..a991b57f91fe 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
565 if (io_page && 565 if (io_page &&
566 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { 566 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
567 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; 567 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
568 WARN(addr != other_addr, 568 WARN_ONCE(addr != other_addr,
569 "0x%lx is using VM_IO, but it is 0x%lx!\n", 569 "0x%lx is using VM_IO, but it is 0x%lx!\n",
570 (unsigned long)addr, (unsigned long)other_addr); 570 (unsigned long)addr, (unsigned long)other_addr);
571 } else { 571 } else {
572 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; 572 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
573 other_addr = (_pte.pte & PTE_PFN_MASK); 573 other_addr = (_pte.pte & PTE_PFN_MASK);
574 WARN((addr == other_addr) && (!io_page) && (!iomap_set), 574 WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
575 "0x%lx is missing VM_IO (and wasn't fixed)!\n", 575 "0x%lx is missing VM_IO (and wasn't fixed)!\n",
576 (unsigned long)addr); 576 (unsigned long)addr);
577 } 577 }
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 30612441ed99..762b46ab14d5 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -46,13 +46,12 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 46static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
47 47
48/* 48/*
49 * Reschedule call back. Nothing to do, 49 * Reschedule call back.
50 * all the work is done automatically when
51 * we return from the interrupt.
52 */ 50 */
53static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
54{ 52{
55 inc_irq_stat(irq_resched_count); 53 inc_irq_stat(irq_resched_count);
54 scheduler_ipi();
56 55
57 return IRQ_HANDLED; 56 return IRQ_HANDLED;
58} 57}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 821040503154..7025593a58c8 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
214 214
215#endif /* !CONFIG_SUSPEND */ 215#endif /* !CONFIG_SUSPEND */
216 216
217#ifdef CONFIG_HIBERNATION 217#ifdef CONFIG_HIBERNATE_CALLBACKS
218 218
219static int amba_pm_freeze(struct device *dev) 219static int amba_pm_freeze(struct device *dev)
220{ 220{
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
352 return ret; 352 return ret;
353} 353}
354 354
355#else /* !CONFIG_HIBERNATION */ 355#else /* !CONFIG_HIBERNATE_CALLBACKS */
356 356
357#define amba_pm_freeze NULL 357#define amba_pm_freeze NULL
358#define amba_pm_thaw NULL 358#define amba_pm_thaw NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
363#define amba_pm_poweroff_noirq NULL 363#define amba_pm_poweroff_noirq NULL
364#define amba_pm_restore_noirq NULL 364#define amba_pm_restore_noirq NULL
365 365
366#endif /* !CONFIG_HIBERNATION */ 366#endif /* !CONFIG_HIBERNATE_CALLBACKS */
367 367
368#ifdef CONFIG_PM 368#ifdef CONFIG_PM
369 369
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f051cfff18af..9e0e4fc24c46 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
149 149
150 of_device_node_put(&pa->pdev.dev); 150 of_device_node_put(&pa->pdev.dev);
151 kfree(pa->pdev.dev.platform_data); 151 kfree(pa->pdev.dev.platform_data);
152 kfree(pa->pdev.mfd_cell);
152 kfree(pa->pdev.resource); 153 kfree(pa->pdev.resource);
153 kfree(pa); 154 kfree(pa);
154} 155}
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
771 772
772#endif /* !CONFIG_SUSPEND */ 773#endif /* !CONFIG_SUSPEND */
773 774
774#ifdef CONFIG_HIBERNATION 775#ifdef CONFIG_HIBERNATE_CALLBACKS
775 776
776static int platform_pm_freeze(struct device *dev) 777static int platform_pm_freeze(struct device *dev)
777{ 778{
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
909 return ret; 910 return ret;
910} 911}
911 912
912#else /* !CONFIG_HIBERNATION */ 913#else /* !CONFIG_HIBERNATE_CALLBACKS */
913 914
914#define platform_pm_freeze NULL 915#define platform_pm_freeze NULL
915#define platform_pm_thaw NULL 916#define platform_pm_thaw NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
920#define platform_pm_poweroff_noirq NULL 921#define platform_pm_poweroff_noirq NULL
921#define platform_pm_restore_noirq NULL 922#define platform_pm_restore_noirq NULL
922 923
923#endif /* !CONFIG_HIBERNATION */ 924#endif /* !CONFIG_HIBERNATE_CALLBACKS */
924 925
925#ifdef CONFIG_PM_RUNTIME 926#ifdef CONFIG_PM_RUNTIME
926 927
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 052dc53eef38..fbc5b6e7c591 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
233 } 233 }
234 break; 234 break;
235#endif /* CONFIG_SUSPEND */ 235#endif /* CONFIG_SUSPEND */
236#ifdef CONFIG_HIBERNATION 236#ifdef CONFIG_HIBERNATE_CALLBACKS
237 case PM_EVENT_FREEZE: 237 case PM_EVENT_FREEZE:
238 case PM_EVENT_QUIESCE: 238 case PM_EVENT_QUIESCE:
239 if (ops->freeze) { 239 if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
260 suspend_report_result(ops->restore, error); 260 suspend_report_result(ops->restore, error);
261 } 261 }
262 break; 262 break;
263#endif /* CONFIG_HIBERNATION */ 263#endif /* CONFIG_HIBERNATE_CALLBACKS */
264 default: 264 default:
265 error = -EINVAL; 265 error = -EINVAL;
266 } 266 }
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
308 } 308 }
309 break; 309 break;
310#endif /* CONFIG_SUSPEND */ 310#endif /* CONFIG_SUSPEND */
311#ifdef CONFIG_HIBERNATION 311#ifdef CONFIG_HIBERNATE_CALLBACKS
312 case PM_EVENT_FREEZE: 312 case PM_EVENT_FREEZE:
313 case PM_EVENT_QUIESCE: 313 case PM_EVENT_QUIESCE:
314 if (ops->freeze_noirq) { 314 if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
335 suspend_report_result(ops->restore_noirq, error); 335 suspend_report_result(ops->restore_noirq, error);
336 } 336 }
337 break; 337 break;
338#endif /* CONFIG_HIBERNATION */ 338#endif /* CONFIG_HIBERNATE_CALLBACKS */
339 default: 339 default:
340 error = -EINVAL; 340 error = -EINVAL;
341 } 341 }
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6b396759e7f5..8a781540590c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
1448 {} 1448 {}
1449}; 1449};
1450 1450
1451static struct of_platform_driver fsldma_of_driver = { 1451static struct platform_driver fsldma_of_driver = {
1452 .driver = { 1452 .driver = {
1453 .name = "fsl-elo-dma", 1453 .name = "fsl-elo-dma",
1454 .owner = THIS_MODULE, 1454 .owner = THIS_MODULE,
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index 7f6f01a4b145..0a775f7987c2 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
116 reg_val |= (1 << nr); 116 reg_val |= (1 << nr);
117 else 117 else
118 reg_val &= ~(1 << nr); 118 reg_val &= ~(1 << nr);
119 iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
119 120
120 mutex_unlock(&chip->lock); 121 mutex_unlock(&chip->lock);
121 122
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 583e92592073..7630ab7b9bec 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
558 558
559 ret = gpiochip_add(&chip->gpio_chip); 559 ret = gpiochip_add(&chip->gpio_chip);
560 if (ret) 560 if (ret)
561 goto out_failed; 561 goto out_failed_irq;
562 562
563 if (pdata->setup) { 563 if (pdata->setup) {
564 ret = pdata->setup(client, chip->gpio_chip.base, 564 ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
570 i2c_set_clientdata(client, chip); 570 i2c_set_clientdata(client, chip);
571 return 0; 571 return 0;
572 572
573out_failed: 573out_failed_irq:
574 pca953x_irq_teardown(chip); 574 pca953x_irq_teardown(chip);
575out_failed:
575 kfree(chip->dyn_pdata); 576 kfree(chip->dyn_pdata);
576 kfree(chip); 577 kfree(chip);
577 return ret; 578 return ret;
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 2c6af8705103..f970a5f3585e 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
105 reg_val |= (1 << nr); 105 reg_val |= (1 << nr);
106 else 106 else
107 reg_val &= ~(1 << nr); 107 reg_val &= ~(1 << nr);
108 iowrite32(reg_val, &chip->reg->po);
108 109
109 mutex_unlock(&chip->lock); 110 mutex_unlock(&chip->lock);
110 111
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a6feb78c404c..c58f691ec3ce 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -96,6 +96,7 @@ config DRM_I915
96 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_CLASS_DEVICE if ACPI 98 select BACKLIGHT_CLASS_DEVICE if ACPI
99 select VIDEO_OUTPUT_CONTROL if ACPI
99 select INPUT if ACPI 100 select INPUT if ACPI
100 select ACPI_VIDEO if ACPI 101 select ACPI_VIDEO if ACPI
101 select ACPI_BUTTON if ACPI 102 select ACPI_BUTTON if ACPI
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8314a49b6b9a..90aef64b76f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -269,7 +269,7 @@ struct init_tbl_entry {
269 int (*handler)(struct nvbios *, uint16_t, struct init_exec *); 269 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
270}; 270};
271 271
272static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); 272static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
273 273
274#define MACRO_INDEX_SIZE 2 274#define MACRO_INDEX_SIZE 2
275#define MACRO_SIZE 8 275#define MACRO_SIZE 8
@@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2011} 2011}
2012 2012
2013static int 2013static int
2014init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2015{
2016 /*
2017 * INIT_JUMP opcode: 0x5C ('\')
2018 *
2019 * offset (8 bit): opcode
2020 * offset + 1 (16 bit): offset (in bios)
2021 *
2022 * Continue execution of init table from 'offset'
2023 */
2024
2025 uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
2026
2027 if (!iexec->execute)
2028 return 3;
2029
2030 BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
2031 return jmp_offset - offset;
2032}
2033
2034static int
2014init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2035init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2015{ 2036{
2016 /* 2037 /*
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
3659 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, 3680 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
3660 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ 3681 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
3661 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, 3682 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
3683 { "INIT_JUMP" , 0x5C, init_jump },
3662 { "INIT_I2C_IF" , 0x5E, init_i2c_if }, 3684 { "INIT_I2C_IF" , 0x5E, init_i2c_if },
3663 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, 3685 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
3664 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, 3686 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
3700#define MAX_TABLE_OPS 1000 3722#define MAX_TABLE_OPS 1000
3701 3723
3702static int 3724static int
3703parse_init_table(struct nvbios *bios, unsigned int offset, 3725parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3704 struct init_exec *iexec)
3705{ 3726{
3706 /* 3727 /*
3707 * Parses all commands in an init table. 3728 * Parses all commands in an init table.
@@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6333 } 6354 }
6334 } 6355 }
6335 6356
6357 /* XFX GT-240X-YA
6358 *
6359 * So many things wrong here, replace the entire encoder table..
6360 */
6361 if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
6362 if (idx == 0) {
6363 *conn = 0x02001300; /* VGA, connector 1 */
6364 *conf = 0x00000028;
6365 } else
6366 if (idx == 1) {
6367 *conn = 0x01010312; /* DVI, connector 0 */
6368 *conf = 0x00020030;
6369 } else
6370 if (idx == 2) {
6371 *conn = 0x01010310; /* VGA, connector 0 */
6372 *conf = 0x00000028;
6373 } else
6374 if (idx == 3) {
6375 *conn = 0x02022362; /* HDMI, connector 2 */
6376 *conf = 0x00020010;
6377 } else {
6378 *conn = 0x0000000e; /* EOL */
6379 *conf = 0x00000000;
6380 }
6381 }
6382
6336 return true; 6383 return true;
6337} 6384}
6338 6385
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 57e5302503db..856d56a98d1e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
1190extern int nv50_graph_unload_context(struct drm_device *); 1190extern int nv50_graph_unload_context(struct drm_device *);
1191extern int nv50_grctx_init(struct nouveau_grctx *); 1191extern int nv50_grctx_init(struct nouveau_grctx *);
1192extern void nv50_graph_tlb_flush(struct drm_device *dev); 1192extern void nv50_graph_tlb_flush(struct drm_device *dev);
1193extern void nv86_graph_tlb_flush(struct drm_device *dev); 1193extern void nv84_graph_tlb_flush(struct drm_device *dev);
1194extern struct nouveau_enum nv50_data_error_names[]; 1194extern struct nouveau_enum nv50_data_error_names[];
1195 1195
1196/* nvc0_graph.c */ 1196/* nvc0_graph.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2683377f4131..78f467fe30be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
552 u8 tRC; /* Byte 9 */ 552 u8 tRC; /* Byte 9 */
553 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; 553 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
554 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; 554 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
555 u8 magic_number = 0; /* Yeah... sorry*/
555 u8 *mem = NULL, *entry; 556 u8 *mem = NULL, *entry;
556 int i, recordlen, entries; 557 int i, recordlen, entries;
557 558
@@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
596 if (!memtimings->timing) 597 if (!memtimings->timing)
597 return; 598 return;
598 599
600 /* Get "some number" from the timing reg for NV_40
601 * Used in calculations later */
602 if(dev_priv->card_type == NV_40) {
603 magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
604 }
605
599 entry = mem + mem[1]; 606 entry = mem + mem[1];
600 for (i = 0; i < entries; i++, entry += recordlen) { 607 for (i = 0; i < entries; i++, entry += recordlen) {
601 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; 608 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
635 642
636 /* XXX: I don't trust the -1's and +1's... they must come 643 /* XXX: I don't trust the -1's and +1's... they must come
637 * from somewhere! */ 644 * from somewhere! */
638 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | 645 timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
639 tUNK_18 << 16 | 646 tUNK_18 << 16 |
640 (tUNK_1 + tUNK_19 + 1) << 8 | 647 (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
641 (tUNK_2 - 1)); 648 if(dev_priv->chipset == 0xa8) {
649 timing->reg_100224 |= (tUNK_2 - 1);
650 } else {
651 timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
652 }
642 653
643 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 654 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
644 if(recordlen > 19) { 655 if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
645 timing->reg_100228 += (tUNK_19 - 1) << 24; 656 timing->reg_100228 |= (tUNK_19 - 1) << 24;
646 }/* I cannot back-up this else-statement right now 657 }
647 else { 658
648 timing->reg_100228 += tUNK_12 << 24; 659 if(dev_priv->card_type == NV_40) {
649 }*/ 660 /* NV40: don't know what the rest of the regs are..
650 661 * And don't need to know either */
651 /* XXX: reg_10022c */ 662 timing->reg_100228 |= 0x20200000 | magic_number << 24;
652 timing->reg_10022c = tUNK_2 - 1; 663 } else if(dev_priv->card_type >= NV_50) {
653 664 /* XXX: reg_10022c */
654 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 665 timing->reg_10022c = tUNK_2 - 1;
655 tUNK_13 << 8 | tUNK_13); 666
656 667 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
657 /* XXX: +6? */ 668 tUNK_13 << 8 | tUNK_13);
658 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 669
659 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 670 timing->reg_100234 = (tRAS << 24 | tRC);
660 671 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
661 /* XXX; reg_100238, reg_10023c 672
662 * reg: 0x00?????? 673 if(dev_priv->chipset < 0xa3) {
663 * reg_10023c: 674 timing->reg_100234 |= (tUNK_2 + 2) << 8;
664 * 0 for pre-NV50 cards 675 } else {
665 * 0x????0202 for NV50+ cards (empirical evidence) */ 676 /* XXX: +6? */
666 if(dev_priv->card_type >= NV_50) { 677 timing->reg_100234 |= (tUNK_19 + 6) << 8;
678 }
679
680 /* XXX; reg_100238, reg_10023c
681 * reg_100238: 0x00??????
682 * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
667 timing->reg_10023c = 0x202; 683 timing->reg_10023c = 0x202;
684 if(dev_priv->chipset < 0xa3) {
685 timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
686 } else {
687 /* currently unknown
688 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
689 }
668 } 690 }
669 691
670 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 692 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
675 timing->reg_100238, timing->reg_10023c); 697 timing->reg_100238, timing->reg_10023c);
676 } 698 }
677 699
678 memtimings->nr_timing = entries; 700 memtimings->nr_timing = entries;
679 memtimings->supported = true; 701 memtimings->supported = true;
680} 702}
681 703
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ac62a1b8c4fc..670e3cb697ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
134 case 0x13: 134 case 0x13:
135 case 0x15: 135 case 0x15:
136 perflvl->fanspeed = entry[55]; 136 perflvl->fanspeed = entry[55];
137 perflvl->voltage = entry[56]; 137 perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
138 perflvl->core = ROM32(entry[1]) * 10; 138 perflvl->core = ROM32(entry[1]) * 10;
139 perflvl->memory = ROM32(entry[5]) * 20; 139 perflvl->memory = ROM32(entry[5]) * 20;
140 break; 140 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 5bb2859001e2..6e2b1a6caa2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
376 engine->graph.destroy_context = nv50_graph_destroy_context; 376 engine->graph.destroy_context = nv50_graph_destroy_context;
377 engine->graph.load_context = nv50_graph_load_context; 377 engine->graph.load_context = nv50_graph_load_context;
378 engine->graph.unload_context = nv50_graph_unload_context; 378 engine->graph.unload_context = nv50_graph_unload_context;
379 if (dev_priv->chipset != 0x86) 379 if (dev_priv->chipset == 0x50 ||
380 dev_priv->chipset == 0xac)
380 engine->graph.tlb_flush = nv50_graph_tlb_flush; 381 engine->graph.tlb_flush = nv50_graph_tlb_flush;
381 else { 382 else
382 /* from what i can see nvidia do this on every 383 engine->graph.tlb_flush = nv84_graph_tlb_flush;
383 * pre-NVA3 board except NVAC, but, we've only
384 * ever seen problems on NV86
385 */
386 engine->graph.tlb_flush = nv86_graph_tlb_flush;
387 }
388 engine->fifo.channels = 128; 384 engine->fifo.channels = 128;
389 engine->fifo.init = nv50_fifo_init; 385 engine->fifo.init = nv50_fifo_init;
390 engine->fifo.takedown = nv50_fifo_takedown; 386 engine->fifo.takedown = nv50_fifo_takedown;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c82db37d9f41..12098bf839c4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
581 int head = nv_encoder->restore.head; 581 int head = nv_encoder->restore.head;
582 582
583 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 583 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
584 struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; 584 struct nouveau_connector *connector =
585 if (native_mode) 585 nouveau_encoder_connector_get(nv_encoder);
586 call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, 586
587 native_mode->clock); 587 if (connector && connector->native_mode)
588 else 588 call_lvds_script(dev, nv_encoder->dcb, head,
589 NV_ERROR(dev, "Not restoring LVDS without native mode\n"); 589 LVDS_PANEL_ON,
590 connector->native_mode->clock);
590 591
591 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { 592 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
592 int clock = nouveau_hw_pllvals_to_clk 593 int clock = nouveau_hw_pllvals_to_clk
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 2b9984027f41..a19ccaa025b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
469 469
470 start = ptimer->read(dev); 470 start = ptimer->read(dev);
471 do { 471 do {
472 nv_wr32(dev, 0x61002c, 0x370);
473 nv_wr32(dev, 0x000140, 1);
474
475 if (nv_ro32(disp->ntfy, 0x000)) 472 if (nv_ro32(disp->ntfy, 0x000))
476 return 0; 473 return 0;
477 } while (ptimer->read(dev) - start < 2000000000ULL); 474 } while (ptimer->read(dev) - start < 2000000000ULL);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index a2cfaa691e9b..c8e83c1a4de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
186 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); 186 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
187 187
188 evo->dma.max = (4096/4) - 2; 188 evo->dma.max = (4096/4) - 2;
189 evo->dma.max &= ~7;
189 evo->dma.put = 0; 190 evo->dma.put = 0;
190 evo->dma.cur = evo->dma.put; 191 evo->dma.cur = evo->dma.put;
191 evo->dma.free = evo->dma.max - evo->dma.cur; 192 evo->dma.free = evo->dma.max - evo->dma.cur;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8675b00caf18..b02a5b1e7d37 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
503} 503}
504 504
505void 505void
506nv86_graph_tlb_flush(struct drm_device *dev) 506nv84_graph_tlb_flush(struct drm_device *dev)
507{ 507{
508 struct drm_nouveau_private *dev_priv = dev->dev_private; 508 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 509 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 69af0ba7edd3..a0a2a0277f73 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm)
104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
105 struct drm_device *dev = vm->dev; 105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd; 106 struct nouveau_vm_pgd *vpgd;
107 u32 r100c80, engine; 107 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
108 108
109 pinstmem->flush(vm->dev); 109 pinstmem->flush(vm->dev);
110 110
111 if (vm == dev_priv->chan_vm) 111 spin_lock(&dev_priv->ramin_lock);
112 engine = 1;
113 else
114 engine = 5;
115
116 list_for_each_entry(vpgd, &vm->pgd_list, head) { 112 list_for_each_entry(vpgd, &vm->pgd_list, head) {
117 r100c80 = nv_rd32(dev, 0x100c80); 113 /* looks like maybe a "free flush slots" counter, the
114 * faster you write to 0x100cbc to more it decreases
115 */
116 if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
117 NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
118 nv_rd32(dev, 0x100c80), engine);
119 }
118 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); 120 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
119 nv_wr32(dev, 0x100cbc, 0x80000000 | engine); 121 nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
120 if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) 122 /* wait for flush to be queued? */
121 NV_ERROR(dev, "vm flush timeout eng %d\n", engine); 123 if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
124 NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
125 nv_rd32(dev, 0x100c80), engine);
126 }
122 } 127 }
128 spin_unlock(&dev_priv->ramin_lock);
123} 129}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 258fa5e7a2d9..d71d375149f8 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -32,6 +32,7 @@
32#include "atom.h" 32#include "atom.h"
33#include "atom-names.h" 33#include "atom-names.h"
34#include "atom-bits.h" 34#include "atom-bits.h"
35#include "radeon.h"
35 36
36#define ATOM_COND_ABOVE 0 37#define ATOM_COND_ABOVE 0
37#define ATOM_COND_ABOVEOREQUAL 1 38#define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
101static uint32_t atom_iio_execute(struct atom_context *ctx, int base, 102static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
102 uint32_t index, uint32_t data) 103 uint32_t index, uint32_t data)
103{ 104{
105 struct radeon_device *rdev = ctx->card->dev->dev_private;
104 uint32_t temp = 0xCDCDCDCD; 106 uint32_t temp = 0xCDCDCDCD;
107
105 while (1) 108 while (1)
106 switch (CU8(base)) { 109 switch (CU8(base)) {
107 case ATOM_IIO_NOP: 110 case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
112 base += 3; 115 base += 3;
113 break; 116 break;
114 case ATOM_IIO_WRITE: 117 case ATOM_IIO_WRITE:
115 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); 118 if (rdev->family == CHIP_RV515)
119 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
116 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); 120 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
117 base += 3; 121 base += 3;
118 break; 122 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b41ec59c7100..9d516a8c4dfa 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
531 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 531 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
532 else 532 else
533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
534
535 if ((rdev->family == CHIP_R600) ||
536 (rdev->family == CHIP_RV610) ||
537 (rdev->family == CHIP_RV630) ||
538 (rdev->family == CHIP_RV670))
539 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
534 } else { 540 } else {
535 pll->flags |= RADEON_PLL_LEGACY; 541 pll->flags |= RADEON_PLL_LEGACY;
536 542
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b0cc74c08c0..3453910ee0f3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
120 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 120 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
121 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 121 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
122 122
123 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 123 if (voltage->type == VOLTAGE_SW) {
124 if (voltage->voltage != rdev->pm.current_vddc) { 124 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
125 radeon_atom_set_voltage(rdev, voltage->voltage); 125 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
126 rdev->pm.current_vddc = voltage->voltage; 126 rdev->pm.current_vddc = voltage->voltage;
127 DRM_DEBUG("Setting: v: %d\n", voltage->voltage); 127 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
128 }
129 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
130 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
131 rdev->pm.current_vddci = voltage->vddci;
132 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
128 } 133 }
129 } 134 }
130} 135}
@@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev)
3036{ 3041{
3037 int r; 3042 int r;
3038 3043
3039 r = radeon_dummy_page_init(rdev);
3040 if (r)
3041 return r;
3042 /* This don't do much */ 3044 /* This don't do much */
3043 r = radeon_gem_init(rdev); 3045 r = radeon_gem_init(rdev);
3044 if (r) 3046 if (r)
@@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev)
3150 radeon_atombios_fini(rdev); 3152 radeon_atombios_fini(rdev);
3151 kfree(rdev->bios); 3153 kfree(rdev->bios);
3152 rdev->bios = NULL; 3154 rdev->bios = NULL;
3153 radeon_dummy_page_fini(rdev);
3154} 3155}
3155 3156
3156static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 3157static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index be271c42de4d..15d58292677a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
587 587
588 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 588 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
589 if (voltage->voltage != rdev->pm.current_vddc) { 589 if (voltage->voltage != rdev->pm.current_vddc) {
590 radeon_atom_set_voltage(rdev, voltage->voltage); 590 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
591 rdev->pm.current_vddc = voltage->voltage; 591 rdev->pm.current_vddc = voltage->voltage;
592 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 592 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
593 } 593 }
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
2509{ 2509{
2510 int r; 2510 int r;
2511 2511
2512 r = radeon_dummy_page_init(rdev);
2513 if (r)
2514 return r;
2515 if (r600_debugfs_mc_info_init(rdev)) { 2512 if (r600_debugfs_mc_info_init(rdev)) {
2516 DRM_ERROR("Failed to register debugfs file for mc !\n"); 2513 DRM_ERROR("Failed to register debugfs file for mc !\n");
2517 } 2514 }
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
2625 radeon_atombios_fini(rdev); 2622 radeon_atombios_fini(rdev);
2626 kfree(rdev->bios); 2623 kfree(rdev->bios);
2627 rdev->bios = NULL; 2624 rdev->bios = NULL;
2628 radeon_dummy_page_fini(rdev);
2629} 2625}
2630 2626
2631 2627
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 93f536594c73..ba643b576054 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
177void radeon_pm_resume(struct radeon_device *rdev); 177void radeon_pm_resume(struct radeon_device *rdev);
178void radeon_combios_get_power_modes(struct radeon_device *rdev); 178void radeon_combios_get_power_modes(struct radeon_device *rdev);
179void radeon_atombios_get_power_modes(struct radeon_device *rdev); 179void radeon_atombios_get_power_modes(struct radeon_device *rdev);
180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
181void rs690_pm_info(struct radeon_device *rdev); 181void rs690_pm_info(struct radeon_device *rdev);
182extern int rv6xx_get_temp(struct radeon_device *rdev); 182extern int rv6xx_get_temp(struct radeon_device *rdev);
183extern int rv770_get_temp(struct radeon_device *rdev); 183extern int rv770_get_temp(struct radeon_device *rdev);
@@ -767,7 +767,9 @@ struct radeon_voltage {
767 u8 vddci_id; /* index into vddci voltage table */ 767 u8 vddci_id; /* index into vddci voltage table */
768 bool vddci_enabled; 768 bool vddci_enabled;
769 /* r6xx+ sw */ 769 /* r6xx+ sw */
770 u32 voltage; 770 u16 voltage;
771 /* evergreen+ vddci */
772 u16 vddci;
771}; 773};
772 774
773/* clock mode flags */ 775/* clock mode flags */
@@ -835,10 +837,12 @@ struct radeon_pm {
835 int default_power_state_index; 837 int default_power_state_index;
836 u32 current_sclk; 838 u32 current_sclk;
837 u32 current_mclk; 839 u32 current_mclk;
838 u32 current_vddc; 840 u16 current_vddc;
841 u16 current_vddci;
839 u32 default_sclk; 842 u32 default_sclk;
840 u32 default_mclk; 843 u32 default_mclk;
841 u32 default_vddc; 844 u16 default_vddc;
845 u16 default_vddci;
842 struct radeon_i2c_chan *i2c_bus; 846 struct radeon_i2c_chan *i2c_bus;
843 /* selected pm method */ 847 /* selected pm method */
844 enum radeon_pm_method pm_method; 848 enum radeon_pm_method pm_method;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eb888ee5f674..ca576191d058 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
94 rdev->mc_rreg = &rs600_mc_rreg; 94 rdev->mc_rreg = &rs600_mc_rreg;
95 rdev->mc_wreg = &rs600_mc_wreg; 95 rdev->mc_wreg = &rs600_mc_wreg;
96 } 96 }
97 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { 97 if (rdev->family >= CHIP_R600) {
98 rdev->pciep_rreg = &r600_pciep_rreg; 98 rdev->pciep_rreg = &r600_pciep_rreg;
99 rdev->pciep_wreg = &r600_pciep_wreg; 99 rdev->pciep_wreg = &r600_pciep_wreg;
100 } 100 }
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 99768d9d91da..f5d12fb103fa 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2176 } 2176 }
2177} 2177}
2178 2178
2179static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) 2179static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
2180 u16 *vddc, u16 *vddci)
2180{ 2181{
2181 struct radeon_mode_info *mode_info = &rdev->mode_info; 2182 struct radeon_mode_info *mode_info = &rdev->mode_info;
2182 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 2183 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2183 u8 frev, crev; 2184 u8 frev, crev;
2184 u16 data_offset; 2185 u16 data_offset;
2185 union firmware_info *firmware_info; 2186 union firmware_info *firmware_info;
2186 u16 vddc = 0; 2187
2188 *vddc = 0;
2189 *vddci = 0;
2187 2190
2188 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2191 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2189 &frev, &crev, &data_offset)) { 2192 &frev, &crev, &data_offset)) {
2190 firmware_info = 2193 firmware_info =
2191 (union firmware_info *)(mode_info->atom_context->bios + 2194 (union firmware_info *)(mode_info->atom_context->bios +
2192 data_offset); 2195 data_offset);
2193 vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); 2196 *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2197 if ((frev == 2) && (crev >= 2))
2198 *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
2194 } 2199 }
2195
2196 return vddc;
2197} 2200}
2198 2201
2199static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, 2202static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2203 int j; 2206 int j;
2204 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2207 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2205 u32 misc2 = le16_to_cpu(non_clock_info->usClassification); 2208 u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
2206 u16 vddc = radeon_atombios_get_default_vddc(rdev); 2209 u16 vddc, vddci;
2210
2211 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
2207 2212
2208 rdev->pm.power_state[state_index].misc = misc; 2213 rdev->pm.power_state[state_index].misc = misc;
2209 rdev->pm.power_state[state_index].misc2 = misc2; 2214 rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2244 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2249 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2245 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2250 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
2246 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; 2251 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
2252 rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
2247 } else { 2253 } else {
2248 /* patch the table values with the default slck/mclk from firmware info */ 2254 /* patch the table values with the default slck/mclk from firmware info */
2249 for (j = 0; j < mode_index; j++) { 2255 for (j = 0; j < mode_index; j++) {
@@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2286 VOLTAGE_SW; 2292 VOLTAGE_SW;
2287 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2293 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2288 le16_to_cpu(clock_info->evergreen.usVDDC); 2294 le16_to_cpu(clock_info->evergreen.usVDDC);
2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
2296 le16_to_cpu(clock_info->evergreen.usVDDCI);
2289 } else { 2297 } else {
2290 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2298 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2291 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2299 sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2577,25 +2585,25 @@ union set_voltage {
2577 struct _SET_VOLTAGE_PARAMETERS_V2 v2; 2585 struct _SET_VOLTAGE_PARAMETERS_V2 v2;
2578}; 2586};
2579 2587
2580void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) 2588void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
2581{ 2589{
2582 union set_voltage args; 2590 union set_voltage args;
2583 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); 2591 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2584 u8 frev, crev, volt_index = level; 2592 u8 frev, crev, volt_index = voltage_level;
2585 2593
2586 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 2594 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2587 return; 2595 return;
2588 2596
2589 switch (crev) { 2597 switch (crev) {
2590 case 1: 2598 case 1:
2591 args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; 2599 args.v1.ucVoltageType = voltage_type;
2592 args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; 2600 args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
2593 args.v1.ucVoltageIndex = volt_index; 2601 args.v1.ucVoltageIndex = volt_index;
2594 break; 2602 break;
2595 case 2: 2603 case 2:
2596 args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; 2604 args.v2.ucVoltageType = voltage_type;
2597 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; 2605 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
2598 args.v2.usVoltageLevel = cpu_to_le16(level); 2606 args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
2599 break; 2607 break;
2600 default: 2608 default:
2601 DRM_ERROR("Unknown table version %d, %d\n", frev, crev); 2609 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9e59868d354e..bbcd1dd7bac0 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
79 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 79 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80 else 80 else
81 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 81 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
82 seq = rdev->wb.wb[scratch_index/4]; 82 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
83 } else 83 } else
84 seq = RREG32(rdev->fence_drv.scratch_reg); 84 seq = RREG32(rdev->fence_drv.scratch_reg);
85 if (seq != rdev->fence_drv.last_seq) { 85 if (seq != rdev->fence_drv.last_seq) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0534ef2f331..8a955bbdb608 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
285 rdev->gart.pages = NULL; 285 rdev->gart.pages = NULL;
286 rdev->gart.pages_addr = NULL; 286 rdev->gart.pages_addr = NULL;
287 rdev->gart.ttm_alloced = NULL; 287 rdev->gart.ttm_alloced = NULL;
288
289 radeon_dummy_page_fini(rdev);
288} 290}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ded2a45bc95c..ccbabf734a61 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
1062 *val = in_buf[0]; 1062 *val = in_buf[0];
1063 DRM_DEBUG("val = 0x%02x\n", *val); 1063 DRM_DEBUG("val = 0x%02x\n", *val);
1064 } else { 1064 } else {
1065 DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", 1065 DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
1066 addr, *val); 1066 addr, *val);
1067 } 1067 }
1068} 1068}
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 out_buf[1] = val; 1084 out_buf[1] = val;
1085 1085
1086 if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) 1086 if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
1087 DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", 1087 DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
1088 addr, val); 1088 addr, val);
1089} 1089}
1090 1090
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5b54268ed6b2..2f46e0c8df53 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
269 .disable = radeon_legacy_encoder_disable, 269 .disable = radeon_legacy_encoder_disable,
270}; 270};
271 271
272#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
273 273
274#define MAX_RADEON_LEVEL 0xFF 274#define MAX_RADEON_LEVEL 0xFF
275 275
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 08de669e025a..86eda1ea94df 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,6 +23,7 @@
23#include "drmP.h" 23#include "drmP.h"
24#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h" 25#include "avivod.h"
26#include "atom.h"
26#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
27#include <linux/acpi.h> 28#include <linux/acpi.h>
28#endif 29#endif
@@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
535 /* set up the default clocks if the MC ucode is loaded */ 536 /* set up the default clocks if the MC ucode is loaded */
536 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 537 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
537 if (rdev->pm.default_vddc) 538 if (rdev->pm.default_vddc)
538 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); 539 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
540 SET_VOLTAGE_TYPE_ASIC_VDDC);
541 if (rdev->pm.default_vddci)
542 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
543 SET_VOLTAGE_TYPE_ASIC_VDDCI);
539 if (rdev->pm.default_sclk) 544 if (rdev->pm.default_sclk)
540 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 545 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
541 if (rdev->pm.default_mclk) 546 if (rdev->pm.default_mclk)
@@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
548 rdev->pm.current_sclk = rdev->pm.default_sclk; 553 rdev->pm.current_sclk = rdev->pm.default_sclk;
549 rdev->pm.current_mclk = rdev->pm.default_mclk; 554 rdev->pm.current_mclk = rdev->pm.default_mclk;
550 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 555 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
556 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
551 if (rdev->pm.pm_method == PM_METHOD_DYNPM 557 if (rdev->pm.pm_method == PM_METHOD_DYNPM
552 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 558 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
553 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 559 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
585 /* set up the default clocks if the MC ucode is loaded */ 591 /* set up the default clocks if the MC ucode is loaded */
586 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 592 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
587 if (rdev->pm.default_vddc) 593 if (rdev->pm.default_vddc)
588 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); 594 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
595 SET_VOLTAGE_TYPE_ASIC_VDDC);
589 if (rdev->pm.default_sclk) 596 if (rdev->pm.default_sclk)
590 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 597 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
591 if (rdev->pm.default_mclk) 598 if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bbc9cd823334..c6776e48fdde 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
248void radeon_ring_free_size(struct radeon_device *rdev) 248void radeon_ring_free_size(struct radeon_device *rdev)
249{ 249{
250 if (rdev->wb.enabled) 250 if (rdev->wb.enabled)
251 rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; 251 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
252 else { 252 else {
253 if (rdev->family >= CHIP_R600) 253 if (rdev->family >= CHIP_R600)
254 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); 254 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 876cebc4b8ba..6e3b11e5abbe 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
114 udelay(voltage->delay); 114 udelay(voltage->delay);
115 } 115 }
116 } else if (voltage->type == VOLTAGE_VDDC) 116 } else if (voltage->type == VOLTAGE_VDDC)
117 radeon_atom_set_voltage(rdev, voltage->vddc_id); 117 radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
118 118
119 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 119 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
120 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 120 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b974ac7df8df..ef8a5babe9f7 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
106 106
107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
108 if (voltage->voltage != rdev->pm.current_vddc) { 108 if (voltage->voltage != rdev->pm.current_vddc) {
109 radeon_atom_set_voltage(rdev, voltage->voltage); 109 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
110 rdev->pm.current_vddc = voltage->voltage; 110 rdev->pm.current_vddc = voltage->voltage;
111 DRM_DEBUG("Setting: v: %d\n", voltage->voltage); 111 DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
112 } 112 }
@@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
1255{ 1255{
1256 int r; 1256 int r;
1257 1257
1258 r = radeon_dummy_page_init(rdev);
1259 if (r)
1260 return r;
1261 /* This don't do much */ 1258 /* This don't do much */
1262 r = radeon_gem_init(rdev); 1259 r = radeon_gem_init(rdev);
1263 if (r) 1260 if (r)
@@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
1372 radeon_atombios_fini(rdev); 1369 radeon_atombios_fini(rdev);
1373 kfree(rdev->bios); 1370 kfree(rdev->bios);
1374 rdev->bios = NULL; 1371 rdev->bios = NULL;
1375 radeon_dummy_page_fini(rdev);
1376} 1372}
1377 1373
1378static void rv770_pcie_gen2_enable(struct radeon_device *rdev) 1374static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 737a2a2e46a5..9d9d92945f8c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
683 gfp_flags |= GFP_HIGHUSER; 683 gfp_flags |= GFP_HIGHUSER;
684 684
685 for (r = 0; r < count; ++r) { 685 for (r = 0; r < count; ++r) {
686 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { 686 p = alloc_page(gfp_flags);
687 void *addr;
688 addr = dma_alloc_coherent(NULL, PAGE_SIZE,
689 &dma_address[r],
690 gfp_flags);
691 if (addr == NULL)
692 return -ENOMEM;
693 p = virt_to_page(addr);
694 } else
695 p = alloc_page(gfp_flags);
696 if (!p) { 687 if (!p) {
697 688
698 printk(KERN_ERR TTM_PFX 689 printk(KERN_ERR TTM_PFX
699 "Unable to allocate page."); 690 "Unable to allocate page.");
700 return -ENOMEM; 691 return -ENOMEM;
701 } 692 }
693
702 list_add(&p->lru, pages); 694 list_add(&p->lru, pages);
703 } 695 }
704 return 0; 696 return 0;
@@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
746 unsigned long irq_flags; 738 unsigned long irq_flags;
747 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 739 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
748 struct page *p, *tmp; 740 struct page *p, *tmp;
749 unsigned r;
750 741
751 if (pool == NULL) { 742 if (pool == NULL) {
752 /* No pool for this memory type so free the pages */ 743 /* No pool for this memory type so free the pages */
753 744
754 r = page_count-1;
755 list_for_each_entry_safe(p, tmp, pages, lru) { 745 list_for_each_entry_safe(p, tmp, pages, lru) {
756 if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { 746 __free_page(p);
757 void *addr = page_address(p);
758 WARN_ON(!addr || !dma_address[r]);
759 if (addr)
760 dma_free_coherent(NULL, PAGE_SIZE,
761 addr,
762 dma_address[r]);
763 dma_address[r] = 0;
764 } else
765 __free_page(p);
766 r--;
767 } 747 }
768 /* Make the pages list empty */ 748 /* Make the pages list empty */
769 INIT_LIST_HEAD(pages); 749 INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 70e60a4bb678..419917955bf6 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -5,6 +5,7 @@ config STUB_POULSBO
5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
7 select BACKLIGHT_CLASS_DEVICE if ACPI 7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select VIDEO_OUTPUT_CONTROL if ACPI
8 select INPUT if ACPI 9 select INPUT if ACPI
9 select ACPI_VIDEO if ACPI 10 select ACPI_VIDEO if ACPI
10 select THERMAL if ACPI 11 select THERMAL if ACPI
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d01574d98870..f4c8c844b913 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
55} 55}
56EXPORT_SYMBOL(mfd_cell_disable); 56EXPORT_SYMBOL(mfd_cell_disable);
57 57
58static int mfd_platform_add_cell(struct platform_device *pdev,
59 const struct mfd_cell *cell)
60{
61 if (!cell)
62 return 0;
63
64 pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
65 if (!pdev->mfd_cell)
66 return -ENOMEM;
67
68 return 0;
69}
70
58static int mfd_add_device(struct device *parent, int id, 71static int mfd_add_device(struct device *parent, int id,
59 const struct mfd_cell *cell, 72 const struct mfd_cell *cell,
60 struct resource *mem_base, 73 struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
75 88
76 pdev->dev.parent = parent; 89 pdev->dev.parent = parent;
77 90
78 ret = platform_device_add_data(pdev, cell, sizeof(*cell)); 91 ret = mfd_platform_add_cell(pdev, cell);
79 if (ret) 92 if (ret)
80 goto fail_res; 93 goto fail_res;
81 94
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
123 136
124 return 0; 137 return 0;
125 138
126/* platform_device_del(pdev); */
127fail_res: 139fail_res:
128 kfree(res); 140 kfree(res);
129fail_device: 141fail_device:
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f803c58b941d..66823eded7a3 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -154,7 +154,7 @@ struct be_eq_obj {
154 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
155 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
156 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx; 157 u8 eq_idx;
158 158
159 struct napi_struct napi; 159 struct napi_struct napi;
160}; 160};
@@ -291,7 +291,7 @@ struct be_adapter {
291 u32 num_rx_qs; 291 u32 num_rx_qs;
292 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 292 u32 big_page_size; /* Compounded page size shared by rx wrbs */
293 293
294 u8 msix_vec_next_idx; 294 u8 eq_next_idx;
295 struct be_drv_stats drv_stats; 295 struct be_drv_stats drv_stats;
296 296
297 struct vlan_group *vlan_grp; 297 struct vlan_group *vlan_grp;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9a54c8b24ff9..7cb5a114c733 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1497 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1497 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1498 goto tx_eq_free; 1498 goto tx_eq_free;
1499 1499
1500 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; 1500 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1501 1501
1502 1502
1503 /* Alloc TX eth compl queue */ 1503 /* Alloc TX eth compl queue */
@@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1590 if (rc) 1590 if (rc)
1591 goto err; 1591 goto err;
1592 1592
1593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; 1593 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1594 1594
1595 /* CQ */ 1595 /* CQ */
1596 cq = &rxo->cq; 1596 cq = &rxo->cq;
@@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
1666 if (!isr) 1666 if (!isr)
1667 return IRQ_NONE; 1667 return IRQ_NONE;
1668 1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr)) 1669 if ((1 << adapter->tx_eq.eq_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq); 1670 event_handle(adapter, &adapter->tx_eq);
1671 1671
1672 for_all_rx_queues(adapter, rxo, i) { 1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr)) 1673 if ((1 << rxo->rx_eq.eq_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq); 1674 event_handle(adapter, &rxo->rx_eq);
1675 } 1675 }
1676 } 1676 }
@@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
1951static inline int be_msix_vec_get(struct be_adapter *adapter, 1951static inline int be_msix_vec_get(struct be_adapter *adapter,
1952 struct be_eq_obj *eq_obj) 1952 struct be_eq_obj *eq_obj)
1953{ 1953{
1954 return adapter->msix_entries[eq_obj->msix_vec_idx].vector; 1954 return adapter->msix_entries[eq_obj->eq_idx].vector;
1955} 1955}
1956 1956
1957static int be_request_irq(struct be_adapter *adapter, 1957static int be_request_irq(struct be_adapter *adapter,
@@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
2345 be_mcc_queues_destroy(adapter); 2345 be_mcc_queues_destroy(adapter);
2346 be_rx_queues_destroy(adapter); 2346 be_rx_queues_destroy(adapter);
2347 be_tx_queues_destroy(adapter); 2347 be_tx_queues_destroy(adapter);
2348 adapter->eq_next_idx = 0;
2348 2349
2349 if (be_physfn(adapter) && adapter->sriov_enabled) 2350 if (be_physfn(adapter) && adapter->sriov_enabled)
2350 for (vf = 0; vf < num_vfs; vf++) 2351 for (vf = 0; vf < num_vfs; vf++)
@@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
3141static void be_shutdown(struct pci_dev *pdev) 3142static void be_shutdown(struct pci_dev *pdev)
3142{ 3143{
3143 struct be_adapter *adapter = pci_get_drvdata(pdev); 3144 struct be_adapter *adapter = pci_get_drvdata(pdev);
3144 struct net_device *netdev = adapter->netdev;
3145 3145
3146 if (netif_running(netdev)) 3146 if (!adapter)
3147 return;
3148
3149 if (netif_running(adapter->netdev))
3147 cancel_delayed_work_sync(&adapter->work); 3150 cancel_delayed_work_sync(&adapter->work);
3148 3151
3149 netif_device_detach(netdev); 3152 netif_device_detach(adapter->netdev);
3150 3153
3151 be_cmd_reset_function(adapter); 3154 be_cmd_reset_function(adapter);
3152 3155
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 34933cb9569f..e3de0b8625cd 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2219static void 2219static void
2220bfa_ioc_recover(struct bfa_ioc *ioc) 2220bfa_ioc_recover(struct bfa_ioc *ioc)
2221{ 2221{
2222 u16 bdf; 2222 pr_crit("Heart Beat of IOC has failed\n");
2223 2223 bfa_ioc_stats(ioc, ioc_hbfails);
2224 bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | 2224 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2225 ioc->pcidev.device_id);
2226
2227 pr_crit("Firmware heartbeat failure at %d", bdf);
2228 BUG_ON(1);
2229} 2225}
2230 2226
2231static void 2227static void
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7513c4523ac4..330140ee266d 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
931 priv->tx_len = 0; 931 priv->tx_len = 0;
932 932
933 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, 933 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
934 IRQF_TRIGGER_FALLING, DEVICE_NAME, priv); 934 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
935 DEVICE_NAME, priv);
935 if (ret) { 936 if (ret) {
936 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 937 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
937 if (pdata->transceiver_enable) 938 if (pdata->transceiver_enable)
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index cfd50bc49169..62dd21b06df4 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
345 err = mlx4_en_init_allocator(priv, ring); 345 err = mlx4_en_init_allocator(priv, ring);
346 if (err) { 346 if (err) {
347 en_err(priv, "Failed initializing ring allocator\n"); 347 en_err(priv, "Failed initializing ring allocator\n");
348 if (ring->stride <= TXBB_SIZE)
349 ring->buf -= TXBB_SIZE;
348 ring_ind--; 350 ring_ind--;
349 goto err_allocator; 351 goto err_allocator;
350 } 352 }
@@ -369,6 +371,8 @@ err_buffers:
369 ring_ind = priv->rx_ring_num - 1; 371 ring_ind = priv->rx_ring_num - 1;
370err_allocator: 372err_allocator:
371 while (ring_ind >= 0) { 373 while (ring_ind >= 0) {
374 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
375 priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
372 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 376 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
373 ring_ind--; 377 ring_ind--;
374 } 378 }
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 62fa7eec5f0c..3814fc9b1145 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
944 } 944 }
945 945
946 for (port = 1; port <= dev->caps.num_ports; port++) { 946 for (port = 1; port <= dev->caps.num_ports; port++) {
947 enum mlx4_port_type port_type = 0;
948 mlx4_SENSE_PORT(dev, port, &port_type);
949 if (port_type)
950 dev->caps.port_type[port] = port_type;
947 ib_port_default_caps = 0; 951 ib_port_default_caps = 0;
948 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); 952 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
949 if (err) 953 if (err)
@@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
958 goto err_mcg_table_free; 962 goto err_mcg_table_free;
959 } 963 }
960 } 964 }
965 mlx4_set_port_mask(dev);
961 966
962 return 0; 967 return 0;
963 968
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index c1e0e5f1bcdb..dd7d745fbab4 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
431 431
432void mlx4_handle_catas_err(struct mlx4_dev *dev); 432void mlx4_handle_catas_err(struct mlx4_dev *dev);
433 433
434int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
435 enum mlx4_port_type *type);
434void mlx4_do_sense_ports(struct mlx4_dev *dev, 436void mlx4_do_sense_ports(struct mlx4_dev *dev,
435 enum mlx4_port_type *stype, 437 enum mlx4_port_type *stype,
436 enum mlx4_port_type *defaults); 438 enum mlx4_port_type *defaults);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 015fbe785c13..e2337a7411d9 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -38,8 +38,8 @@
38 38
39#include "mlx4.h" 39#include "mlx4.h"
40 40
41static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 41int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
42 enum mlx4_port_type *type) 42 enum mlx4_port_type *type)
43{ 43{
44 u64 out_param; 44 u64 out_param;
45 int err = 0; 45 int err = 0;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 693aaef4e3ce..718879b35b7d 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
317 lock_sock(sk); 317 lock_sock(sk);
318 318
319 if (po->pppoe_dev == dev && 319 if (po->pppoe_dev == dev &&
320 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 320 sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
321 pppox_unbind_sock(sk); 321 pppox_unbind_sock(sk);
322 sk->sk_state = PPPOX_ZOMBIE; 322 sk->sk_state = PPPOX_ZOMBIE;
323 sk->sk_state_change(sk); 323 sk->sk_state_change(sk);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c498b720b532..4b42ecc63dcf 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1818 SMSC_TRACE(PROBE, "PHY will be autodetected."); 1818 SMSC_TRACE(PROBE, "PHY will be autodetected.");
1819 1819
1820 spin_lock_init(&pdata->dev_lock); 1820 spin_lock_init(&pdata->dev_lock);
1821 spin_lock_init(&pdata->mac_lock);
1821 1822
1822 if (pdata->ioaddr == 0) { 1823 if (pdata->ioaddr == 0) {
1823 SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000"); 1824 SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
@@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
1895 /* workaround for platforms without an eeprom, where the mac address 1896 /* workaround for platforms without an eeprom, where the mac address
1896 * is stored elsewhere and set by the bootloader. This saves the 1897 * is stored elsewhere and set by the bootloader. This saves the
1897 * mac address before resetting the device */ 1898 * mac address before resetting the device */
1898 if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) 1899 if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
1900 spin_lock_irq(&pdata->mac_lock);
1899 smsc911x_read_mac_address(dev); 1901 smsc911x_read_mac_address(dev);
1902 spin_unlock_irq(&pdata->mac_lock);
1903 }
1900 1904
1901 /* Reset the LAN911x */ 1905 /* Reset the LAN911x */
1902 if (smsc911x_soft_reset(pdata)) 1906 if (smsc911x_soft_reset(pdata))
@@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2059 SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name); 2063 SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
2060 } 2064 }
2061 2065
2062 spin_lock_init(&pdata->mac_lock);
2063
2064 retval = smsc911x_mii_init(pdev, dev); 2066 retval = smsc911x_mii_init(pdev, dev);
2065 if (retval) { 2067 if (retval) {
2066 SMSC_WARNING(PROBE, 2068 SMSC_WARNING(PROBE,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 727874d9deb6..47a6c870b51f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = {
1313 USB_DEVICE(0x0424, 0x9909), 1313 USB_DEVICE(0x0424, 0x9909),
1314 .driver_info = (unsigned long) &smsc95xx_info, 1314 .driver_info = (unsigned long) &smsc95xx_info,
1315 }, 1315 },
1316 {
1317 /* SMSC LAN9530 USB Ethernet Device */
1318 USB_DEVICE(0x0424, 0x9530),
1319 .driver_info = (unsigned long) &smsc95xx_info,
1320 },
1321 {
1322 /* SMSC LAN9730 USB Ethernet Device */
1323 USB_DEVICE(0x0424, 0x9730),
1324 .driver_info = (unsigned long) &smsc95xx_info,
1325 },
1326 {
1327 /* SMSC LAN89530 USB Ethernet Device */
1328 USB_DEVICE(0x0424, 0x9E08),
1329 .driver_info = (unsigned long) &smsc95xx_info,
1330 },
1316 { }, /* END */ 1331 { }, /* END */
1317}; 1332};
1318MODULE_DEVICE_TABLE(usb, products); 1333MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 338b07502f1a..1ec9bcd6b281 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2546,6 +2546,7 @@ static struct {
2546 { AR_SREV_VERSION_9287, "9287" }, 2546 { AR_SREV_VERSION_9287, "9287" },
2547 { AR_SREV_VERSION_9271, "9271" }, 2547 { AR_SREV_VERSION_9271, "9271" },
2548 { AR_SREV_VERSION_9300, "9300" }, 2548 { AR_SREV_VERSION_9300, "9300" },
2549 { AR_SREV_VERSION_9485, "9485" },
2549}; 2550};
2550 2551
2551/* For devices with external radios */ 2552/* For devices with external radios */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 3d5566e7af0a..ff0f5ba14b2c 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1536 dmaaddr = meta->dmaaddr; 1536 dmaaddr = meta->dmaaddr;
1537 goto drop_recycle_buffer; 1537 goto drop_recycle_buffer;
1538 } 1538 }
1539 if (unlikely(len > ring->rx_buffersize)) { 1539 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1540 /* The data did not fit into one descriptor buffer 1540 /* The data did not fit into one descriptor buffer
1541 * and is split over multiple buffers. 1541 * and is split over multiple buffers.
1542 * This should never happen, as we try to allocate buffers 1542 * This should never happen, as we try to allocate buffers
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index a01c2100f166..e8a80a1251bf 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
163/* DMA engine tuning knobs */ 163/* DMA engine tuning knobs */
164#define B43_TXRING_SLOTS 256 164#define B43_TXRING_SLOTS 256
165#define B43_RXRING_SLOTS 64 165#define B43_RXRING_SLOTS 64
166#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN 166#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
167 167
168/* Pointer poison */ 168/* Pointer poison */
169#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) 169#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 98aa8af01192..20b66469d68f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
241 241
242/* 6x00 Specific */ 242/* 6x00 Specific */
243#define EEPROM_6000_TX_POWER_VERSION (4) 243#define EEPROM_6000_TX_POWER_VERSION (4)
244#define EEPROM_6000_EEPROM_VERSION (0x434) 244#define EEPROM_6000_EEPROM_VERSION (0x423)
245 245
246/* 6x50 Specific */ 246/* 6x50 Specific */
247#define EEPROM_6050_TX_POWER_VERSION (4) 247#define EEPROM_6050_TX_POWER_VERSION (4)
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b344a921e74..e18358725b69 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
56 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ 56 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
57 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 57 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
58 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 58 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
59 {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */
59 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ 60 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
60 {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ 61 {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
61 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ 62 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
68 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ 69 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
69 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ 70 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
70 {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ 71 {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */
72 {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */
71 {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ 73 {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */
72 {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ 74 {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */
73 75
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9de9dbe94399..84eb6ad36377 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1062 * Stop all work. 1062 * Stop all work.
1063 */ 1063 */
1064 cancel_work_sync(&rt2x00dev->intf_work); 1064 cancel_work_sync(&rt2x00dev->intf_work);
1065 cancel_work_sync(&rt2x00dev->rxdone_work); 1065 if (rt2x00_is_usb(rt2x00dev)) {
1066 cancel_work_sync(&rt2x00dev->txdone_work); 1066 cancel_work_sync(&rt2x00dev->rxdone_work);
1067 cancel_work_sync(&rt2x00dev->txdone_work);
1068 }
1067 destroy_workqueue(rt2x00dev->workqueue); 1069 destroy_workqueue(rt2x00dev->workqueue);
1068 1070
1069 /* 1071 /*
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index f74a8701c67d..590f14f45a89 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
685 685
686 u8 efuse_data, word_cnts = 0; 686 u8 efuse_data, word_cnts = 0;
687 u16 efuse_addr = 0; 687 u16 efuse_addr = 0;
688 u8 hworden; 688 u8 hworden = 0;
689 u8 tmpdata[8]; 689 u8 tmpdata[8];
690 690
691 if (data == NULL) 691 if (data == NULL)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 5ef91374b230..28a6ce3bc239 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
303 u16 box_reg, box_extreg; 303 u16 box_reg, box_extreg;
304 u8 u1b_tmp; 304 u8 u1b_tmp;
305 bool isfw_read = false; 305 bool isfw_read = false;
306 u8 buf_index; 306 u8 buf_index = 0;
307 bool bwrite_sucess = false; 307 bool bwrite_sucess = false;
308 u8 wait_h2c_limmit = 100; 308 u8 wait_h2c_limmit = 100;
309 u8 wait_writeh2c_limmit = 100; 309 u8 wait_writeh2c_limmit = 100;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a4b2613d6a8c..f5d85735d642 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
246 246
247static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) 247static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
248{ 248{
249 struct rtl_priv *rtlpriv = rtl_priv(hw); 249 struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
250 250
251 mutex_destroy(&rtlpriv->io.bb_mutex); 251 mutex_destroy(&rtlpriv->io.bb_mutex);
252} 252}
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5b9dbeafec06..b1c7d031c391 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -340,7 +340,7 @@ module_init(wl1271_init);
340module_exit(wl1271_exit); 340module_exit(wl1271_exit);
341 341
342MODULE_LICENSE("GPL"); 342MODULE_LICENSE("GPL");
343MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 343MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
344MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 344MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
345MODULE_FIRMWARE(WL1271_FW_NAME); 345MODULE_FIRMWARE(WL1271_FW_NAME);
346MODULE_FIRMWARE(WL1271_AP_FW_NAME); 346MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 18cf01719ae0..ffc745b17f4d 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -487,7 +487,7 @@ module_init(wl1271_init);
487module_exit(wl1271_exit); 487module_exit(wl1271_exit);
488 488
489MODULE_LICENSE("GPL"); 489MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 490MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
491MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 491MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
492MODULE_FIRMWARE(WL1271_FW_NAME); 492MODULE_FIRMWARE(WL1271_FW_NAME);
493MODULE_FIRMWARE(WL1271_AP_FW_NAME); 493MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index e64403b6896d..6ec06a4a4c6d 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
204 204
205 kfree(wl->nvs); 205 kfree(wl->nvs);
206 206
207 wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); 207 if (len != sizeof(struct wl1271_nvs_file))
208 return -EINVAL;
209
210 wl->nvs = kzalloc(len, GFP_KERNEL);
208 if (!wl->nvs) { 211 if (!wl->nvs) {
209 wl1271_error("could not allocate memory for the nvs file"); 212 wl1271_error("could not allocate memory for the nvs file");
210 ret = -ENOMEM; 213 ret = -ENOMEM;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 58236e6d0921..ab607bbd6291 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb)
643 usb = urb->context; 643 usb = urb->context;
644 rx = &usb->rx; 644 rx = &usb->rx;
645 645
646 zd_usb_reset_rx_idle_timer(usb); 646 tasklet_schedule(&rx->reset_timer_tasklet);
647 647
648 if (length%rx->usb_packet_size > rx->usb_packet_size-4) { 648 if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
649 /* If there is an old first fragment, we don't care. */ 649 /* If there is an old first fragment, we don't care. */
@@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb)
812 __zd_usb_disable_rx(usb); 812 __zd_usb_disable_rx(usb);
813 mutex_unlock(&rx->setup_mutex); 813 mutex_unlock(&rx->setup_mutex);
814 814
815 tasklet_kill(&rx->reset_timer_tasklet);
815 cancel_delayed_work_sync(&rx->idle_work); 816 cancel_delayed_work_sync(&rx->idle_work);
816} 817}
817 818
@@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
1106 zd_usb_reset_rx(usb); 1107 zd_usb_reset_rx(usb);
1107} 1108}
1108 1109
1110static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
1111{
1112 struct zd_usb *usb = (struct zd_usb *)param;
1113
1114 zd_usb_reset_rx_idle_timer(usb);
1115}
1116
1109void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) 1117void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
1110{ 1118{
1111 struct zd_usb_rx *rx = &usb->rx; 1119 struct zd_usb_rx *rx = &usb->rx;
@@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
1127static inline void init_usb_rx(struct zd_usb *usb) 1135static inline void init_usb_rx(struct zd_usb *usb)
1128{ 1136{
1129 struct zd_usb_rx *rx = &usb->rx; 1137 struct zd_usb_rx *rx = &usb->rx;
1138
1130 spin_lock_init(&rx->lock); 1139 spin_lock_init(&rx->lock);
1131 mutex_init(&rx->setup_mutex); 1140 mutex_init(&rx->setup_mutex);
1132 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { 1141 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
@@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb)
1136 } 1145 }
1137 ZD_ASSERT(rx->fragment_length == 0); 1146 ZD_ASSERT(rx->fragment_length == 0);
1138 INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); 1147 INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
1148 rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
1149 rx->reset_timer_tasklet.data = (unsigned long)usb;
1139} 1150}
1140 1151
1141static inline void init_usb_tx(struct zd_usb *usb) 1152static inline void init_usb_tx(struct zd_usb *usb)
1142{ 1153{
1143 struct zd_usb_tx *tx = &usb->tx; 1154 struct zd_usb_tx *tx = &usb->tx;
1155
1144 spin_lock_init(&tx->lock); 1156 spin_lock_init(&tx->lock);
1145 atomic_set(&tx->enabled, 0); 1157 atomic_set(&tx->enabled, 0);
1146 tx->stopped = 0; 1158 tx->stopped = 0;
@@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb)
1671 1683
1672 if (urb->status && !usb->cmd_error) 1684 if (urb->status && !usb->cmd_error)
1673 usb->cmd_error = urb->status; 1685 usb->cmd_error = urb->status;
1686
1687 if (!usb->cmd_error &&
1688 urb->actual_length != urb->transfer_buffer_length)
1689 usb->cmd_error = -EIO;
1674} 1690}
1675 1691
1676static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) 1692static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
@@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1805 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), 1821 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
1806 req, req_len, iowrite16v_urb_complete, usb, 1822 req, req_len, iowrite16v_urb_complete, usb,
1807 ep->desc.bInterval); 1823 ep->desc.bInterval);
1808 urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK; 1824 urb->transfer_flags |= URB_FREE_BUFFER;
1809 1825
1810 /* Submit previous URB */ 1826 /* Submit previous URB */
1811 r = zd_submit_waiting_urb(usb, false); 1827 r = zd_submit_waiting_urb(usb, false);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index b3df2c8116cc..325d0f989257 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -183,6 +183,7 @@ struct zd_usb_rx {
183 spinlock_t lock; 183 spinlock_t lock;
184 struct mutex setup_mutex; 184 struct mutex setup_mutex;
185 struct delayed_work idle_work; 185 struct delayed_work idle_work;
186 struct tasklet_struct reset_timer_tasklet;
186 u8 fragment[2 * USB_MAX_RX_SIZE]; 187 u8 fragment[2 * USB_MAX_RX_SIZE];
187 unsigned int fragment_length; 188 unsigned int fragment_length;
188 unsigned int usb_packet_size; 189 unsigned int usb_packet_size;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d86ea8b01137..135df164a4c1 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev)
781 781
782#endif /* !CONFIG_SUSPEND */ 782#endif /* !CONFIG_SUSPEND */
783 783
784#ifdef CONFIG_HIBERNATION 784#ifdef CONFIG_HIBERNATE_CALLBACKS
785 785
786static int pci_pm_freeze(struct device *dev) 786static int pci_pm_freeze(struct device *dev)
787{ 787{
@@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev)
970 return error; 970 return error;
971} 971}
972 972
973#else /* !CONFIG_HIBERNATION */ 973#else /* !CONFIG_HIBERNATE_CALLBACKS */
974 974
975#define pci_pm_freeze NULL 975#define pci_pm_freeze NULL
976#define pci_pm_freeze_noirq NULL 976#define pci_pm_freeze_noirq NULL
@@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev)
981#define pci_pm_restore NULL 981#define pci_pm_restore NULL
982#define pci_pm_restore_noirq NULL 982#define pci_pm_restore_noirq NULL
983 983
984#endif /* !CONFIG_HIBERNATION */ 984#endif /* !CONFIG_HIBERNATE_CALLBACKS */
985 985
986#ifdef CONFIG_PM_RUNTIME 986#ifdef CONFIG_PM_RUNTIME
987 987
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 89d0a6a88df7..ebf51ad1b714 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
676 min_align = align1 >> 1; 676 min_align = align1 >> 1;
677 align += aligns[order]; 677 align += aligns[order];
678 } 678 }
679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align); 679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
680 size1 = !add_size ? size : 680 size1 = !add_size ? size :
681 calculate_memsize(size, min_size+add_size, 0, 681 calculate_memsize(size, min_size+add_size, 0,
682 resource_size(b_res), align); 682 resource_size(b_res), min_align);
683 if (!size0 && !size1) { 683 if (!size0 && !size1) {
684 if (b_res->start || b_res->end) 684 if (b_res->start || b_res->end)
685 dev_info(&bus->self->dev, "disabling bridge window " 685 dev_info(&bus->self->dev, "disabling bridge window "
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2ee442c2a5db..0485e394712a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -187,7 +187,8 @@ config MSI_LAPTOP
187 depends on ACPI 187 depends on ACPI
188 depends on BACKLIGHT_CLASS_DEVICE 188 depends on BACKLIGHT_CLASS_DEVICE
189 depends on RFKILL 189 depends on RFKILL
190 depends on SERIO_I8042 190 depends on INPUT && SERIO_I8042
191 select INPUT_SPARSEKMAP
191 ---help--- 192 ---help---
192 This is a driver for laptops built by MSI (MICRO-STAR 193 This is a driver for laptops built by MSI (MICRO-STAR
193 INTERNATIONAL): 194 INTERNATIONAL):
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 5ea6c3477d17..ac4e7f83ce6c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -89,7 +89,7 @@ MODULE_LICENSE("GPL");
89#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" 89#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
90 90
91MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); 91MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
92MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); 92MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
93MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); 93MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
94 94
95enum acer_wmi_event_ids { 95enum acer_wmi_event_ids {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index efc776cb0c66..832a3fd7c1c8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
201 if (!asus->inputdev) 201 if (!asus->inputdev)
202 return -ENOMEM; 202 return -ENOMEM;
203 203
204 asus->inputdev->name = asus->driver->input_phys; 204 asus->inputdev->name = asus->driver->input_name;
205 asus->inputdev->phys = asus->driver->input_name; 205 asus->inputdev->phys = asus->driver->input_phys;
206 asus->inputdev->id.bustype = BUS_HOST; 206 asus->inputdev->id.bustype = BUS_HOST;
207 asus->inputdev->dev.parent = &asus->platform_device->dev; 207 asus->inputdev->dev.parent = &asus->platform_device->dev;
208 208
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 0ddc434fb93b..649dcadd8ea3 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
67 { KE_KEY, 0x82, { KEY_CAMERA } }, 67 { KE_KEY, 0x82, { KEY_CAMERA } },
68 { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, 68 { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
69 { KE_KEY, 0x88, { KEY_WLAN } }, 69 { KE_KEY, 0x88, { KEY_WLAN } },
70 { KE_KEY, 0xbd, { KEY_CAMERA } },
70 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, 71 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
71 { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ 72 { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
72 { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ 73 { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
74 { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
73 { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, 75 { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
74 { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, 76 { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
75 { KE_KEY, 0xec, { KEY_CAMERA_UP } }, 77 { KE_KEY, 0xec, { KEY_CAMERA_UP } },
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index d653104b59cb..464bb3fc4d88 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -74,6 +74,19 @@ struct pmic_gpio {
74 u32 trigger_type; 74 u32 trigger_type;
75}; 75};
76 76
77static void pmic_program_irqtype(int gpio, int type)
78{
79 if (type & IRQ_TYPE_EDGE_RISING)
80 intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
81 else
82 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
83
84 if (type & IRQ_TYPE_EDGE_FALLING)
85 intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
86 else
87 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
88};
89
77static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 90static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
78{ 91{
79 if (offset > 8) { 92 if (offset > 8) {
@@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
166 return pg->irq_base + offset; 179 return pg->irq_base + offset;
167} 180}
168 181
182static void pmic_bus_lock(struct irq_data *data)
183{
184 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
185
186 mutex_lock(&pg->buslock);
187}
188
189static void pmic_bus_sync_unlock(struct irq_data *data)
190{
191 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
192
193 if (pg->update_type) {
194 unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
195
196 pmic_program_irqtype(gpio, pg->trigger_type);
197 pg->update_type = 0;
198 }
199 mutex_unlock(&pg->buslock);
200}
201
169/* the gpiointr register is read-clear, so just do nothing. */ 202/* the gpiointr register is read-clear, so just do nothing. */
170static void pmic_irq_unmask(struct irq_data *data) { } 203static void pmic_irq_unmask(struct irq_data *data) { }
171 204
172static void pmic_irq_mask(struct irq_data *data) { } 205static void pmic_irq_mask(struct irq_data *data) { }
173 206
174static struct irq_chip pmic_irqchip = { 207static struct irq_chip pmic_irqchip = {
175 .name = "PMIC-GPIO", 208 .name = "PMIC-GPIO",
176 .irq_mask = pmic_irq_mask, 209 .irq_mask = pmic_irq_mask,
177 .irq_unmask = pmic_irq_unmask, 210 .irq_unmask = pmic_irq_unmask,
178 .irq_set_type = pmic_irq_type, 211 .irq_set_type = pmic_irq_type,
212 .irq_bus_lock = pmic_bus_lock,
213 .irq_bus_sync_unlock = pmic_bus_sync_unlock,
179}; 214};
180 215
181static irqreturn_t pmic_irq_handler(int irq, void *data) 216static irqreturn_t pmic_irq_handler(int irq, void *data)
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index de434c6dc2d6..d347116d150e 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -571,6 +571,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
571 .callback = dmi_check_cb, 571 .callback = dmi_check_cb,
572 }, 572 },
573 { 573 {
574 .ident = "R410 Plus",
575 .matches = {
576 DMI_MATCH(DMI_SYS_VENDOR,
577 "SAMSUNG ELECTRONICS CO., LTD."),
578 DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
579 DMI_MATCH(DMI_BOARD_NAME, "R460"),
580 },
581 .callback = dmi_check_cb,
582 },
583 {
574 .ident = "R518", 584 .ident = "R518",
575 .matches = { 585 .matches = {
576 DMI_MATCH(DMI_SYS_VENDOR, 586 DMI_MATCH(DMI_SYS_VENDOR,
@@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
591 .callback = dmi_check_cb, 601 .callback = dmi_check_cb,
592 }, 602 },
593 { 603 {
594 .ident = "N150/N210/N220", 604 .ident = "N150/N210/N220/N230",
595 .matches = { 605 .matches = {
596 DMI_MATCH(DMI_SYS_VENDOR, 606 DMI_MATCH(DMI_SYS_VENDOR,
597 "SAMSUNG ELECTRONICS CO., LTD."), 607 "SAMSUNG ELECTRONICS CO., LTD."),
598 DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), 608 DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
599 DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), 609 DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
600 }, 610 },
601 .callback = dmi_check_cb, 611 .callback = dmi_check_cb,
602 }, 612 },
@@ -771,6 +781,7 @@ static int __init samsung_init(void)
771 781
772 /* create a backlight device to talk to this one */ 782 /* create a backlight device to talk to this one */
773 memset(&props, 0, sizeof(struct backlight_properties)); 783 memset(&props, 0, sizeof(struct backlight_properties));
784 props.type = BACKLIGHT_PLATFORM;
774 props.max_brightness = sabi_config->max_brightness; 785 props.max_brightness = sabi_config->max_brightness;
775 backlight_device = backlight_device_register("samsung", &sdev->dev, 786 backlight_device = backlight_device_register("samsung", &sdev->dev,
776 NULL, &backlight_ops, 787 NULL, &backlight_ops,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e642f5f29504..8f709aec4da0 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
138 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 138 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
139 "(default: 0)"); 139 "(default: 0)");
140 140
141static void sony_nc_kbd_backlight_resume(void);
142
141enum sony_nc_rfkill { 143enum sony_nc_rfkill {
142 SONY_WIFI, 144 SONY_WIFI,
143 SONY_BLUETOOTH, 145 SONY_BLUETOOTH,
@@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd)
771 if (!handles) 773 if (!handles)
772 return -ENOMEM; 774 return -ENOMEM;
773 775
774 sysfs_attr_init(&handles->devattr.attr);
775 handles->devattr.attr.name = "handles";
776 handles->devattr.attr.mode = S_IRUGO;
777 handles->devattr.show = sony_nc_handles_show;
778
779 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { 776 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
780 if (!acpi_callsetfunc(sony_nc_acpi_handle, 777 if (!acpi_callsetfunc(sony_nc_acpi_handle,
781 "SN00", i + 0x20, &result)) { 778 "SN00", i + 0x20, &result)) {
@@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd)
785 } 782 }
786 } 783 }
787 784
788 /* allow reading capabilities via sysfs */ 785 if (debug) {
789 if (device_create_file(&pd->dev, &handles->devattr)) { 786 sysfs_attr_init(&handles->devattr.attr);
790 kfree(handles); 787 handles->devattr.attr.name = "handles";
791 handles = NULL; 788 handles->devattr.attr.mode = S_IRUGO;
792 return -1; 789 handles->devattr.show = sony_nc_handles_show;
790
791 /* allow reading capabilities via sysfs */
792 if (device_create_file(&pd->dev, &handles->devattr)) {
793 kfree(handles);
794 handles = NULL;
795 return -1;
796 }
793 } 797 }
794 798
795 return 0; 799 return 0;
@@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd)
798static int sony_nc_handles_cleanup(struct platform_device *pd) 802static int sony_nc_handles_cleanup(struct platform_device *pd)
799{ 803{
800 if (handles) { 804 if (handles) {
801 device_remove_file(&pd->dev, &handles->devattr); 805 if (debug)
806 device_remove_file(&pd->dev, &handles->devattr);
802 kfree(handles); 807 kfree(handles);
803 handles = NULL; 808 handles = NULL;
804 } 809 }
@@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd)
808static int sony_find_snc_handle(int handle) 813static int sony_find_snc_handle(int handle)
809{ 814{
810 int i; 815 int i;
816
817 /* not initialized yet, return early */
818 if (!handles)
819 return -1;
820
811 for (i = 0; i < 0x10; i++) { 821 for (i = 0; i < 0x10; i++) {
812 if (handles->cap[i] == handle) { 822 if (handles->cap[i] == handle) {
813 dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", 823 dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
@@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device)
1168 /* re-read rfkill state */ 1178 /* re-read rfkill state */
1169 sony_nc_rfkill_update(); 1179 sony_nc_rfkill_update();
1170 1180
1181 /* restore kbd backlight states */
1182 sony_nc_kbd_backlight_resume();
1183
1171 return 0; 1184 return 0;
1172} 1185}
1173 1186
@@ -1355,6 +1368,7 @@ out_no_enum:
1355#define KBDBL_HANDLER 0x137 1368#define KBDBL_HANDLER 0x137
1356#define KBDBL_PRESENT 0xB00 1369#define KBDBL_PRESENT 0xB00
1357#define SET_MODE 0xC00 1370#define SET_MODE 0xC00
1371#define SET_STATE 0xD00
1358#define SET_TIMEOUT 0xE00 1372#define SET_TIMEOUT 0xE00
1359 1373
1360struct kbd_backlight { 1374struct kbd_backlight {
@@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1377 (value << 0x10) | SET_MODE, &result)) 1391 (value << 0x10) | SET_MODE, &result))
1378 return -EIO; 1392 return -EIO;
1379 1393
1394 /* Try to turn the light on/off immediately */
1395 sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
1396 &result);
1397
1380 kbdbl_handle->mode = value; 1398 kbdbl_handle->mode = value;
1381 1399
1382 return 0; 1400 return 0;
@@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
1458{ 1476{
1459 int result; 1477 int result;
1460 1478
1461 if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) 1479 if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
1462 return 0; 1480 return 0;
1463 if (!(result & 0x02)) 1481 if (!(result & 0x02))
1464 return 0; 1482 return 0;
@@ -1501,13 +1519,36 @@ outkzalloc:
1501static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1519static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1502{ 1520{
1503 if (kbdbl_handle) { 1521 if (kbdbl_handle) {
1522 int result;
1523
1504 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); 1524 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
1505 device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); 1525 device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
1526
1527 /* restore the default hw behaviour */
1528 sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
1529 sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
1530
1506 kfree(kbdbl_handle); 1531 kfree(kbdbl_handle);
1507 } 1532 }
1508 return 0; 1533 return 0;
1509} 1534}
1510 1535
1536static void sony_nc_kbd_backlight_resume(void)
1537{
1538 int ignore = 0;
1539
1540 if (!kbdbl_handle)
1541 return;
1542
1543 if (kbdbl_handle->mode == 0)
1544 sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
1545
1546 if (kbdbl_handle->timeout != 0)
1547 sony_call_snc_handle(KBDBL_HANDLER,
1548 (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
1549 &ignore);
1550}
1551
1511static void sony_nc_backlight_setup(void) 1552static void sony_nc_backlight_setup(void)
1512{ 1553{
1513 acpi_handle unused; 1554 acpi_handle unused;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a08561f5349e..efb3b6b9bcdb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
8618 tpacpi_is_fw_digit(s[1]) && 8618 tpacpi_is_fw_digit(s[1]) &&
8619 s[2] == t && s[3] == 'T' && 8619 s[2] == t && s[3] == 'T' &&
8620 tpacpi_is_fw_digit(s[4]) && 8620 tpacpi_is_fw_digit(s[4]) &&
8621 tpacpi_is_fw_digit(s[5]) && 8621 tpacpi_is_fw_digit(s[5]);
8622 s[6] == 'W' && s[7] == 'W';
8623} 8622}
8624 8623
8625/* returns 0 - probe ok, or < 0 - probe error. 8624/* returns 0 - probe ok, or < 0 - probe error.
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 5825370bad25..08de58e7f59f 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022)
1555 * A wait_queue on the pl022->busy could be used, but then the common 1555 * A wait_queue on the pl022->busy could be used, but then the common
1556 * execution path (pump_messages) would be required to call wake_up or 1556 * execution path (pump_messages) would be required to call wake_up or
1557 * friends on every SPI message. Do this instead */ 1557 * friends on every SPI message. Do this instead */
1558 while (!list_empty(&pl022->queue) && pl022->busy && limit--) { 1558 while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
1559 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1559 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1560 msleep(10); 1560 msleep(10);
1561 spin_lock_irqsave(&pl022->queue_lock, flags); 1561 spin_lock_irqsave(&pl022->queue_lock, flags);
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index b1a4b9f503ae..871e337c917f 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws)
821 821
822 spin_lock_irqsave(&dws->lock, flags); 822 spin_lock_irqsave(&dws->lock, flags);
823 dws->run = QUEUE_STOPPED; 823 dws->run = QUEUE_STOPPED;
824 while (!list_empty(&dws->queue) && dws->busy && limit--) { 824 while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
825 spin_unlock_irqrestore(&dws->lock, flags); 825 spin_unlock_irqrestore(&dws->lock, flags);
826 msleep(10); 826 msleep(10);
827 spin_lock_irqsave(&dws->lock, flags); 827 spin_lock_irqsave(&dws->lock, flags);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9c74aad6be93..dc25bee8d33f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data)
1493 * execution path (pump_messages) would be required to call wake_up or 1493 * execution path (pump_messages) would be required to call wake_up or
1494 * friends on every SPI message. Do this instead */ 1494 * friends on every SPI message. Do this instead */
1495 drv_data->run = QUEUE_STOPPED; 1495 drv_data->run = QUEUE_STOPPED;
1496 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1496 while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
1497 spin_unlock_irqrestore(&drv_data->lock, flags); 1497 spin_unlock_irqrestore(&drv_data->lock, flags);
1498 msleep(10); 1498 msleep(10);
1499 spin_lock_irqsave(&drv_data->lock, flags); 1499 spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index bdb7289a1d22..f706dba165cf 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
1284 * friends on every SPI message. Do this instead 1284 * friends on every SPI message. Do this instead
1285 */ 1285 */
1286 drv_data->running = false; 1286 drv_data->running = false;
1287 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1287 while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
1288 spin_unlock_irqrestore(&drv_data->lock, flags); 1288 spin_unlock_irqrestore(&drv_data->lock, flags);
1289 msleep(10); 1289 msleep(10);
1290 spin_lock_irqsave(&drv_data->lock, flags); 1290 spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dca4a0bb6ca9..e3786f161bc3 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
131 131
132source "drivers/staging/wlags49_h25/Kconfig" 132source "drivers/staging/wlags49_h25/Kconfig"
133 133
134source "drivers/staging/samsung-laptop/Kconfig"
135
136source "drivers/staging/sm7xx/Kconfig" 134source "drivers/staging/sm7xx/Kconfig"
137 135
138source "drivers/staging/dt3155v4l/Kconfig" 136source "drivers/staging/dt3155v4l/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index eb93012b6f59..f0d5c5315612 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/
48obj-$(CONFIG_ZCACHE) += zcache/ 48obj-$(CONFIG_ZCACHE) += zcache/
49obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 49obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
50obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ 50obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
51obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
52obj-$(CONFIG_FB_SM7XX) += sm7xx/ 51obj-$(CONFIG_FB_SM7XX) += sm7xx/
53obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ 52obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
54obj-$(CONFIG_CRYSTALHD) += crystalhd/ 53obj-$(CONFIG_CRYSTALHD) += crystalhd/
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig
deleted file mode 100644
index f27c60864c26..000000000000
--- a/drivers/staging/samsung-laptop/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config SAMSUNG_LAPTOP
2 tristate "Samsung Laptop driver"
3 default n
4 depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
5 help
6 This module implements a driver for the N128 Samsung Laptop
7 providing control over the Wireless LED and the LCD backlight
8
9 To compile this driver as a module, choose
10 M here: the module will be called samsung-laptop.
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile
deleted file mode 100644
index 3c6f42045211..000000000000
--- a/drivers/staging/samsung-laptop/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO
deleted file mode 100644
index f7a6d589916e..000000000000
--- a/drivers/staging/samsung-laptop/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
1TODO:
2 - review from other developers
3 - figure out ACPI video issues
4
5Please send patches to Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
deleted file mode 100644
index 25294462b8b6..000000000000
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ /dev/null
@@ -1,843 +0,0 @@
1/*
2 * Samsung Laptop driver
3 *
4 * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
5 * Copyright (C) 2009,2011 Novell Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/delay.h>
18#include <linux/pci.h>
19#include <linux/backlight.h>
20#include <linux/fb.h>
21#include <linux/dmi.h>
22#include <linux/platform_device.h>
23#include <linux/rfkill.h>
24
25/*
26 * This driver is needed because a number of Samsung laptops do not hook
27 * their control settings through ACPI. So we have to poke around in the
28 * BIOS to do things like brightness values, and "special" key controls.
29 */
30
31/*
32 * We have 0 - 8 as valid brightness levels. The specs say that level 0 should
33 * be reserved by the BIOS (which really doesn't make much sense), we tell
34 * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
35 */
36#define MAX_BRIGHT 0x07
37
38
39#define SABI_IFACE_MAIN 0x00
40#define SABI_IFACE_SUB 0x02
41#define SABI_IFACE_COMPLETE 0x04
42#define SABI_IFACE_DATA 0x05
43
44/* Structure to get data back to the calling function */
45struct sabi_retval {
46 u8 retval[20];
47};
48
49struct sabi_header_offsets {
50 u8 port;
51 u8 re_mem;
52 u8 iface_func;
53 u8 en_mem;
54 u8 data_offset;
55 u8 data_segment;
56};
57
58struct sabi_commands {
59 /*
60 * Brightness is 0 - 8, as described above.
61 * Value 0 is for the BIOS to use
62 */
63 u8 get_brightness;
64 u8 set_brightness;
65
66 /*
67 * first byte:
68 * 0x00 - wireless is off
69 * 0x01 - wireless is on
70 * second byte:
71 * 0x02 - 3G is off
72 * 0x03 - 3G is on
73 * TODO, verify 3G is correct, that doesn't seem right...
74 */
75 u8 get_wireless_button;
76 u8 set_wireless_button;
77
78 /* 0 is off, 1 is on */
79 u8 get_backlight;
80 u8 set_backlight;
81
82 /*
83 * 0x80 or 0x00 - no action
84 * 0x81 - recovery key pressed
85 */
86 u8 get_recovery_mode;
87 u8 set_recovery_mode;
88
89 /*
90 * on seclinux: 0 is low, 1 is high,
91 * on swsmi: 0 is normal, 1 is silent, 2 is turbo
92 */
93 u8 get_performance_level;
94 u8 set_performance_level;
95
96 /*
97 * Tell the BIOS that Linux is running on this machine.
98 * 81 is on, 80 is off
99 */
100 u8 set_linux;
101};
102
103struct sabi_performance_level {
104 const char *name;
105 u8 value;
106};
107
108struct sabi_config {
109 const char *test_string;
110 u16 main_function;
111 const struct sabi_header_offsets header_offsets;
112 const struct sabi_commands commands;
113 const struct sabi_performance_level performance_levels[4];
114 u8 min_brightness;
115 u8 max_brightness;
116};
117
118static const struct sabi_config sabi_configs[] = {
119 {
120 .test_string = "SECLINUX",
121
122 .main_function = 0x4c49,
123
124 .header_offsets = {
125 .port = 0x00,
126 .re_mem = 0x02,
127 .iface_func = 0x03,
128 .en_mem = 0x04,
129 .data_offset = 0x05,
130 .data_segment = 0x07,
131 },
132
133 .commands = {
134 .get_brightness = 0x00,
135 .set_brightness = 0x01,
136
137 .get_wireless_button = 0x02,
138 .set_wireless_button = 0x03,
139
140 .get_backlight = 0x04,
141 .set_backlight = 0x05,
142
143 .get_recovery_mode = 0x06,
144 .set_recovery_mode = 0x07,
145
146 .get_performance_level = 0x08,
147 .set_performance_level = 0x09,
148
149 .set_linux = 0x0a,
150 },
151
152 .performance_levels = {
153 {
154 .name = "silent",
155 .value = 0,
156 },
157 {
158 .name = "normal",
159 .value = 1,
160 },
161 { },
162 },
163 .min_brightness = 1,
164 .max_brightness = 8,
165 },
166 {
167 .test_string = "SwSmi@",
168
169 .main_function = 0x5843,
170
171 .header_offsets = {
172 .port = 0x00,
173 .re_mem = 0x04,
174 .iface_func = 0x02,
175 .en_mem = 0x03,
176 .data_offset = 0x05,
177 .data_segment = 0x07,
178 },
179
180 .commands = {
181 .get_brightness = 0x10,
182 .set_brightness = 0x11,
183
184 .get_wireless_button = 0x12,
185 .set_wireless_button = 0x13,
186
187 .get_backlight = 0x2d,
188 .set_backlight = 0x2e,
189
190 .get_recovery_mode = 0xff,
191 .set_recovery_mode = 0xff,
192
193 .get_performance_level = 0x31,
194 .set_performance_level = 0x32,
195
196 .set_linux = 0xff,
197 },
198
199 .performance_levels = {
200 {
201 .name = "normal",
202 .value = 0,
203 },
204 {
205 .name = "silent",
206 .value = 1,
207 },
208 {
209 .name = "overclock",
210 .value = 2,
211 },
212 { },
213 },
214 .min_brightness = 0,
215 .max_brightness = 8,
216 },
217 { },
218};
219
220static const struct sabi_config *sabi_config;
221
222static void __iomem *sabi;
223static void __iomem *sabi_iface;
224static void __iomem *f0000_segment;
225static struct backlight_device *backlight_device;
226static struct mutex sabi_mutex;
227static struct platform_device *sdev;
228static struct rfkill *rfk;
229
230static int force;
231module_param(force, bool, 0);
232MODULE_PARM_DESC(force,
233 "Disable the DMI check and forces the driver to be loaded");
234
235static int debug;
236module_param(debug, bool, S_IRUGO | S_IWUSR);
237MODULE_PARM_DESC(debug, "Debug enabled or not");
238
239static int sabi_get_command(u8 command, struct sabi_retval *sretval)
240{
241 int retval = 0;
242 u16 port = readw(sabi + sabi_config->header_offsets.port);
243 u8 complete, iface_data;
244
245 mutex_lock(&sabi_mutex);
246
247 /* enable memory to be able to write to it */
248 outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
249
250 /* write out the command */
251 writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
252 writew(command, sabi_iface + SABI_IFACE_SUB);
253 writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
254 outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
255
256 /* write protect memory to make it safe */
257 outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
258
259 /* see if the command actually succeeded */
260 complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
261 iface_data = readb(sabi_iface + SABI_IFACE_DATA);
262 if (complete != 0xaa || iface_data == 0xff) {
263 pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
264 command, complete, iface_data);
265 retval = -EINVAL;
266 goto exit;
267 }
268 /*
269 * Save off the data into a structure so the caller use it.
270 * Right now we only want the first 4 bytes,
271 * There are commands that need more, but not for the ones we
272 * currently care about.
273 */
274 sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
275 sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
276 sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
277 sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
278
279exit:
280 mutex_unlock(&sabi_mutex);
281 return retval;
282
283}
284
285static int sabi_set_command(u8 command, u8 data)
286{
287 int retval = 0;
288 u16 port = readw(sabi + sabi_config->header_offsets.port);
289 u8 complete, iface_data;
290
291 mutex_lock(&sabi_mutex);
292
293 /* enable memory to be able to write to it */
294 outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
295
296 /* write out the command */
297 writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
298 writew(command, sabi_iface + SABI_IFACE_SUB);
299 writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
300 writeb(data, sabi_iface + SABI_IFACE_DATA);
301 outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
302
303 /* write protect memory to make it safe */
304 outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
305
306 /* see if the command actually succeeded */
307 complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
308 iface_data = readb(sabi_iface + SABI_IFACE_DATA);
309 if (complete != 0xaa || iface_data == 0xff) {
310 pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
311 command, complete, iface_data);
312 retval = -EINVAL;
313 }
314
315 mutex_unlock(&sabi_mutex);
316 return retval;
317}
318
319static void test_backlight(void)
320{
321 struct sabi_retval sretval;
322
323 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
324 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
325
326 sabi_set_command(sabi_config->commands.set_backlight, 0);
327 printk(KERN_DEBUG "backlight should be off\n");
328
329 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
330 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
331
332 msleep(1000);
333
334 sabi_set_command(sabi_config->commands.set_backlight, 1);
335 printk(KERN_DEBUG "backlight should be on\n");
336
337 sabi_get_command(sabi_config->commands.get_backlight, &sretval);
338 printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
339}
340
341static void test_wireless(void)
342{
343 struct sabi_retval sretval;
344
345 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
346 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
347
348 sabi_set_command(sabi_config->commands.set_wireless_button, 0);
349 printk(KERN_DEBUG "wireless led should be off\n");
350
351 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
352 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
353
354 msleep(1000);
355
356 sabi_set_command(sabi_config->commands.set_wireless_button, 1);
357 printk(KERN_DEBUG "wireless led should be on\n");
358
359 sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
360 printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
361}
362
363static u8 read_brightness(void)
364{
365 struct sabi_retval sretval;
366 int user_brightness = 0;
367 int retval;
368
369 retval = sabi_get_command(sabi_config->commands.get_brightness,
370 &sretval);
371 if (!retval) {
372 user_brightness = sretval.retval[0];
373 if (user_brightness != 0)
374 user_brightness -= sabi_config->min_brightness;
375 }
376 return user_brightness;
377}
378
379static void set_brightness(u8 user_brightness)
380{
381 u8 user_level = user_brightness - sabi_config->min_brightness;
382
383 sabi_set_command(sabi_config->commands.set_brightness, user_level);
384}
385
386static int get_brightness(struct backlight_device *bd)
387{
388 return (int)read_brightness();
389}
390
391static int update_status(struct backlight_device *bd)
392{
393 set_brightness(bd->props.brightness);
394
395 if (bd->props.power == FB_BLANK_UNBLANK)
396 sabi_set_command(sabi_config->commands.set_backlight, 1);
397 else
398 sabi_set_command(sabi_config->commands.set_backlight, 0);
399 return 0;
400}
401
402static const struct backlight_ops backlight_ops = {
403 .get_brightness = get_brightness,
404 .update_status = update_status,
405};
406
407static int rfkill_set(void *data, bool blocked)
408{
409 /* Do something with blocked...*/
410 /*
411 * blocked == false is on
412 * blocked == true is off
413 */
414 if (blocked)
415 sabi_set_command(sabi_config->commands.set_wireless_button, 0);
416 else
417 sabi_set_command(sabi_config->commands.set_wireless_button, 1);
418
419 return 0;
420}
421
422static struct rfkill_ops rfkill_ops = {
423 .set_block = rfkill_set,
424};
425
426static int init_wireless(struct platform_device *sdev)
427{
428 int retval;
429
430 rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
431 &rfkill_ops, NULL);
432 if (!rfk)
433 return -ENOMEM;
434
435 retval = rfkill_register(rfk);
436 if (retval) {
437 rfkill_destroy(rfk);
438 return -ENODEV;
439 }
440
441 return 0;
442}
443
444static void destroy_wireless(void)
445{
446 rfkill_unregister(rfk);
447 rfkill_destroy(rfk);
448}
449
450static ssize_t get_performance_level(struct device *dev,
451 struct device_attribute *attr, char *buf)
452{
453 struct sabi_retval sretval;
454 int retval;
455 int i;
456
457 /* Read the state */
458 retval = sabi_get_command(sabi_config->commands.get_performance_level,
459 &sretval);
460 if (retval)
461 return retval;
462
463 /* The logic is backwards, yeah, lots of fun... */
464 for (i = 0; sabi_config->performance_levels[i].name; ++i) {
465 if (sretval.retval[0] == sabi_config->performance_levels[i].value)
466 return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
467 }
468 return sprintf(buf, "%s\n", "unknown");
469}
470
471static ssize_t set_performance_level(struct device *dev,
472 struct device_attribute *attr, const char *buf,
473 size_t count)
474{
475 if (count >= 1) {
476 int i;
477 for (i = 0; sabi_config->performance_levels[i].name; ++i) {
478 const struct sabi_performance_level *level =
479 &sabi_config->performance_levels[i];
480 if (!strncasecmp(level->name, buf, strlen(level->name))) {
481 sabi_set_command(sabi_config->commands.set_performance_level,
482 level->value);
483 break;
484 }
485 }
486 if (!sabi_config->performance_levels[i].name)
487 return -EINVAL;
488 }
489 return count;
490}
491static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
492 get_performance_level, set_performance_level);
493
494
495static int __init dmi_check_cb(const struct dmi_system_id *id)
496{
497 pr_info("found laptop model '%s'\n",
498 id->ident);
499 return 0;
500}
501
502static struct dmi_system_id __initdata samsung_dmi_table[] = {
503 {
504 .ident = "N128",
505 .matches = {
506 DMI_MATCH(DMI_SYS_VENDOR,
507 "SAMSUNG ELECTRONICS CO., LTD."),
508 DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
509 DMI_MATCH(DMI_BOARD_NAME, "N128"),
510 },
511 .callback = dmi_check_cb,
512 },
513 {
514 .ident = "N130",
515 .matches = {
516 DMI_MATCH(DMI_SYS_VENDOR,
517 "SAMSUNG ELECTRONICS CO., LTD."),
518 DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
519 DMI_MATCH(DMI_BOARD_NAME, "N130"),
520 },
521 .callback = dmi_check_cb,
522 },
523 {
524 .ident = "X125",
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR,
527 "SAMSUNG ELECTRONICS CO., LTD."),
528 DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
529 DMI_MATCH(DMI_BOARD_NAME, "X125"),
530 },
531 .callback = dmi_check_cb,
532 },
533 {
534 .ident = "X120/X170",
535 .matches = {
536 DMI_MATCH(DMI_SYS_VENDOR,
537 "SAMSUNG ELECTRONICS CO., LTD."),
538 DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
539 DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
540 },
541 .callback = dmi_check_cb,
542 },
543 {
544 .ident = "NC10",
545 .matches = {
546 DMI_MATCH(DMI_SYS_VENDOR,
547 "SAMSUNG ELECTRONICS CO., LTD."),
548 DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
549 DMI_MATCH(DMI_BOARD_NAME, "NC10"),
550 },
551 .callback = dmi_check_cb,
552 },
553 {
554 .ident = "NP-Q45",
555 .matches = {
556 DMI_MATCH(DMI_SYS_VENDOR,
557 "SAMSUNG ELECTRONICS CO., LTD."),
558 DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
559 DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
560 },
561 .callback = dmi_check_cb,
562 },
563 {
564 .ident = "X360",
565 .matches = {
566 DMI_MATCH(DMI_SYS_VENDOR,
567 "SAMSUNG ELECTRONICS CO., LTD."),
568 DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
569 DMI_MATCH(DMI_BOARD_NAME, "X360"),
570 },
571 .callback = dmi_check_cb,
572 },
573 {
574 .ident = "R410 Plus",
575 .matches = {
576 DMI_MATCH(DMI_SYS_VENDOR,
577 "SAMSUNG ELECTRONICS CO., LTD."),
578 DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
579 DMI_MATCH(DMI_BOARD_NAME, "R460"),
580 },
581 .callback = dmi_check_cb,
582 },
583 {
584 .ident = "R518",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR,
587 "SAMSUNG ELECTRONICS CO., LTD."),
588 DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
589 DMI_MATCH(DMI_BOARD_NAME, "R518"),
590 },
591 .callback = dmi_check_cb,
592 },
593 {
594 .ident = "R519/R719",
595 .matches = {
596 DMI_MATCH(DMI_SYS_VENDOR,
597 "SAMSUNG ELECTRONICS CO., LTD."),
598 DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
599 DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
600 },
601 .callback = dmi_check_cb,
602 },
603 {
604 .ident = "N150/N210/N220/N230",
605 .matches = {
606 DMI_MATCH(DMI_SYS_VENDOR,
607 "SAMSUNG ELECTRONICS CO., LTD."),
608 DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
609 DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
610 },
611 .callback = dmi_check_cb,
612 },
613 {
614 .ident = "N150P/N210P/N220P",
615 .matches = {
616 DMI_MATCH(DMI_SYS_VENDOR,
617 "SAMSUNG ELECTRONICS CO., LTD."),
618 DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
619 DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
620 },
621 .callback = dmi_check_cb,
622 },
623 {
624 .ident = "R530/R730",
625 .matches = {
626 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
627 DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
628 DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
629 },
630 .callback = dmi_check_cb,
631 },
632 {
633 .ident = "NF110/NF210/NF310",
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
636 DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
637 DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
638 },
639 .callback = dmi_check_cb,
640 },
641 {
642 .ident = "N145P/N250P/N260P",
643 .matches = {
644 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
645 DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
646 DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
647 },
648 .callback = dmi_check_cb,
649 },
650 {
651 .ident = "R70/R71",
652 .matches = {
653 DMI_MATCH(DMI_SYS_VENDOR,
654 "SAMSUNG ELECTRONICS CO., LTD."),
655 DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
656 DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
657 },
658 .callback = dmi_check_cb,
659 },
660 {
661 .ident = "P460",
662 .matches = {
663 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
664 DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
665 DMI_MATCH(DMI_BOARD_NAME, "P460"),
666 },
667 .callback = dmi_check_cb,
668 },
669 { },
670};
671MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
672
673static int find_signature(void __iomem *memcheck, const char *testStr)
674{
675 int i = 0;
676 int loca;
677
678 for (loca = 0; loca < 0xffff; loca++) {
679 char temp = readb(memcheck + loca);
680
681 if (temp == testStr[i]) {
682 if (i == strlen(testStr)-1)
683 break;
684 ++i;
685 } else {
686 i = 0;
687 }
688 }
689 return loca;
690}
691
692static int __init samsung_init(void)
693{
694 struct backlight_properties props;
695 struct sabi_retval sretval;
696 unsigned int ifaceP;
697 int i;
698 int loca;
699 int retval;
700
701 mutex_init(&sabi_mutex);
702
703 if (!force && !dmi_check_system(samsung_dmi_table))
704 return -ENODEV;
705
706 f0000_segment = ioremap_nocache(0xf0000, 0xffff);
707 if (!f0000_segment) {
708 pr_err("Can't map the segment at 0xf0000\n");
709 return -EINVAL;
710 }
711
712 /* Try to find one of the signatures in memory to find the header */
713 for (i = 0; sabi_configs[i].test_string != 0; ++i) {
714 sabi_config = &sabi_configs[i];
715 loca = find_signature(f0000_segment, sabi_config->test_string);
716 if (loca != 0xffff)
717 break;
718 }
719
720 if (loca == 0xffff) {
721 pr_err("This computer does not support SABI\n");
722 goto error_no_signature;
723 }
724
725 /* point to the SMI port Number */
726 loca += 1;
727 sabi = (f0000_segment + loca);
728
729 if (debug) {
730 printk(KERN_DEBUG "This computer supports SABI==%x\n",
731 loca + 0xf0000 - 6);
732 printk(KERN_DEBUG "SABI header:\n");
733 printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
734 readw(sabi + sabi_config->header_offsets.port));
735 printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
736 readb(sabi + sabi_config->header_offsets.iface_func));
737 printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
738 readb(sabi + sabi_config->header_offsets.en_mem));
739 printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
740 readb(sabi + sabi_config->header_offsets.re_mem));
741 printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
742 readw(sabi + sabi_config->header_offsets.data_offset));
743 printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
744 readw(sabi + sabi_config->header_offsets.data_segment));
745 }
746
747 /* Get a pointer to the SABI Interface */
748 ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
749 ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
750 sabi_iface = ioremap_nocache(ifaceP, 16);
751 if (!sabi_iface) {
752 pr_err("Can't remap %x\n", ifaceP);
753 goto exit;
754 }
755 if (debug) {
756 printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
757 printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
758
759 test_backlight();
760 test_wireless();
761
762 retval = sabi_get_command(sabi_config->commands.get_brightness,
763 &sretval);
764 printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
765 }
766
767 /* Turn on "Linux" mode in the BIOS */
768 if (sabi_config->commands.set_linux != 0xff) {
769 retval = sabi_set_command(sabi_config->commands.set_linux,
770 0x81);
771 if (retval) {
772 pr_warn("Linux mode was not set!\n");
773 goto error_no_platform;
774 }
775 }
776
777 /* knock up a platform device to hang stuff off of */
778 sdev = platform_device_register_simple("samsung", -1, NULL, 0);
779 if (IS_ERR(sdev))
780 goto error_no_platform;
781
782 /* create a backlight device to talk to this one */
783 memset(&props, 0, sizeof(struct backlight_properties));
784 props.type = BACKLIGHT_PLATFORM;
785 props.max_brightness = sabi_config->max_brightness;
786 backlight_device = backlight_device_register("samsung", &sdev->dev,
787 NULL, &backlight_ops,
788 &props);
789 if (IS_ERR(backlight_device))
790 goto error_no_backlight;
791
792 backlight_device->props.brightness = read_brightness();
793 backlight_device->props.power = FB_BLANK_UNBLANK;
794 backlight_update_status(backlight_device);
795
796 retval = init_wireless(sdev);
797 if (retval)
798 goto error_no_rfk;
799
800 retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
801 if (retval)
802 goto error_file_create;
803
804exit:
805 return 0;
806
807error_file_create:
808 destroy_wireless();
809
810error_no_rfk:
811 backlight_device_unregister(backlight_device);
812
813error_no_backlight:
814 platform_device_unregister(sdev);
815
816error_no_platform:
817 iounmap(sabi_iface);
818
819error_no_signature:
820 iounmap(f0000_segment);
821 return -EINVAL;
822}
823
824static void __exit samsung_exit(void)
825{
826 /* Turn off "Linux" mode in the BIOS */
827 if (sabi_config->commands.set_linux != 0xff)
828 sabi_set_command(sabi_config->commands.set_linux, 0x80);
829
830 device_remove_file(&sdev->dev, &dev_attr_performance_level);
831 backlight_device_unregister(backlight_device);
832 destroy_wireless();
833 iounmap(sabi_iface);
834 iounmap(f0000_segment);
835 platform_device_unregister(sdev);
836}
837
838module_init(samsung_init);
839module_exit(samsung_exit);
840
841MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
842MODULE_DESCRIPTION("Samsung Backlight driver");
843MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 42d6c930cc87..33167b43ac7e 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
912 unsigned long irqflags, 912 unsigned long irqflags,
913 const char *devname, void *dev_id) 913 const char *devname, void *dev_id)
914{ 914{
915 unsigned int irq; 915 int irq, retval;
916 int retval;
917 916
918 irq = bind_evtchn_to_irq(evtchn); 917 irq = bind_evtchn_to_irq(evtchn);
919 if (irq < 0) 918 if (irq < 0)
@@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
955 irq_handler_t handler, 954 irq_handler_t handler,
956 unsigned long irqflags, const char *devname, void *dev_id) 955 unsigned long irqflags, const char *devname, void *dev_id)
957{ 956{
958 unsigned int irq; 957 int irq, retval;
959 int retval;
960 958
961 irq = bind_virq_to_irq(virq, cpu); 959 irq = bind_virq_to_irq(virq, cpu);
962 if (irq < 0) 960 if (irq < 0)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 95143dd6904d..1ac94125bf93 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
61 xen_mm_unpin_all(); 61 xen_mm_unpin_all();
62} 62}
63 63
64#ifdef CONFIG_HIBERNATION 64#ifdef CONFIG_HIBERNATE_CALLBACKS
65static int xen_suspend(void *data) 65static int xen_suspend(void *data)
66{ 66{
67 struct suspend_info *si = data; 67 struct suspend_info *si = data;
@@ -173,7 +173,7 @@ out:
173#endif 173#endif
174 shutting_down = SHUTDOWN_INVALID; 174 shutting_down = SHUTDOWN_INVALID;
175} 175}
176#endif /* CONFIG_HIBERNATION */ 176#endif /* CONFIG_HIBERNATE_CALLBACKS */
177 177
178struct shutdown_handler { 178struct shutdown_handler {
179 const char *command; 179 const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
202 { "poweroff", do_poweroff }, 202 { "poweroff", do_poweroff },
203 { "halt", do_poweroff }, 203 { "halt", do_poweroff },
204 { "reboot", do_reboot }, 204 { "reboot", do_reboot },
205#ifdef CONFIG_HIBERNATION 205#ifdef CONFIG_HIBERNATE_CALLBACKS
206 { "suspend", do_suspend }, 206 { "suspend", do_suspend },
207#endif 207#endif
208 {NULL, NULL}, 208 {NULL, NULL},
diff --git a/fs/cifs/README b/fs/cifs/README
index fe1683590828..74ab165fc646 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to
685 support and want to map the uid and gid fields 685 support and want to map the uid and gid fields
686 to values supplied at mount (rather than the 686 to values supplied at mount (rather than the
687 actual values, then set this to zero. (default 1) 687 actual values, then set this to zero. (default 1)
688Experimental When set to 1 used to enable certain experimental
689 features (currently enables multipage writes
690 when signing is enabled, the multipage write
691 performance enhancement was disabled when
692 signing turned on in case buffer was modified
693 just before it was sent, also this flag will
694 be used to use the new experimental directory change
695 notification code). When set to 2 enables
696 an additional experimental feature, "raw ntlmssp"
697 session establishment support (which allows
698 specifying "sec=ntlmssp" on mount). The Linux cifs
699 module will use ntlmv2 authentication encapsulated
700 in "raw ntlmssp" (not using SPNEGO) when
701 "sec=ntlmssp" is specified on mount.
702 This support also requires building cifs with
703 the CONFIG_CIFS_EXPERIMENTAL configuration flag.
704 688
705These experimental features and tracing can be enabled by changing flags in 689These experimental features and tracing can be enabled by changing flags in
706/proc/fs/cifs (after the cifs module has been installed or built into the 690/proc/fs/cifs (after the cifs module has been installed or built into the
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e654dfd092c3..53d57a3fe427 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
50 */ 50 */
51struct cifs_server_key { 51struct cifs_server_key {
52 uint16_t family; /* address family */ 52 uint16_t family; /* address family */
53 uint16_t port; /* IP port */ 53 __be16 port; /* IP port */
54 union { 54 union {
55 struct in_addr ipv4_addr; 55 struct in_addr ipv4_addr;
56 struct in6_addr ipv6_addr; 56 struct in6_addr ipv6_addr;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 65829d32128c..30d01bc90855 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
423static const struct file_operations traceSMB_proc_fops; 423static const struct file_operations traceSMB_proc_fops;
424static const struct file_operations cifs_multiuser_mount_proc_fops; 424static const struct file_operations cifs_multiuser_mount_proc_fops;
425static const struct file_operations cifs_security_flags_proc_fops; 425static const struct file_operations cifs_security_flags_proc_fops;
426static const struct file_operations cifs_experimental_proc_fops;
427static const struct file_operations cifs_linux_ext_proc_fops; 426static const struct file_operations cifs_linux_ext_proc_fops;
428 427
429void 428void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
441 proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); 440 proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
442 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); 441 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
443 proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); 442 proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
444 proc_create("Experimental", 0, proc_fs_cifs,
445 &cifs_experimental_proc_fops);
446 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, 443 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
447 &cifs_linux_ext_proc_fops); 444 &cifs_linux_ext_proc_fops);
448 proc_create("MultiuserMount", 0, proc_fs_cifs, 445 proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
469 remove_proc_entry("OplockEnabled", proc_fs_cifs); 466 remove_proc_entry("OplockEnabled", proc_fs_cifs);
470 remove_proc_entry("SecurityFlags", proc_fs_cifs); 467 remove_proc_entry("SecurityFlags", proc_fs_cifs);
471 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); 468 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
472 remove_proc_entry("Experimental", proc_fs_cifs);
473 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); 469 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
474 remove_proc_entry("fs/cifs", NULL); 470 remove_proc_entry("fs/cifs", NULL);
475} 471}
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
550 .write = cifs_oplock_proc_write, 546 .write = cifs_oplock_proc_write,
551}; 547};
552 548
553static int cifs_experimental_proc_show(struct seq_file *m, void *v)
554{
555 seq_printf(m, "%d\n", experimEnabled);
556 return 0;
557}
558
559static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, cifs_experimental_proc_show, NULL);
562}
563
564static ssize_t cifs_experimental_proc_write(struct file *file,
565 const char __user *buffer, size_t count, loff_t *ppos)
566{
567 char c;
568 int rc;
569
570 rc = get_user(c, buffer);
571 if (rc)
572 return rc;
573 if (c == '0' || c == 'n' || c == 'N')
574 experimEnabled = 0;
575 else if (c == '1' || c == 'y' || c == 'Y')
576 experimEnabled = 1;
577 else if (c == '2')
578 experimEnabled = 2;
579
580 return count;
581}
582
583static const struct file_operations cifs_experimental_proc_fops = {
584 .owner = THIS_MODULE,
585 .open = cifs_experimental_proc_open,
586 .read = seq_read,
587 .llseek = seq_lseek,
588 .release = single_release,
589 .write = cifs_experimental_proc_write,
590};
591
592static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) 549static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
593{ 550{
594 seq_printf(m, "%d\n", linuxExtEnabled); 551 seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4dfba8283165..33d221394aca 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
113 MAX_MECH_STR_LEN + 113 MAX_MECH_STR_LEN +
114 UID_KEY_LEN + (sizeof(uid_t) * 2) + 114 UID_KEY_LEN + (sizeof(uid_t) * 2) +
115 CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + 115 CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
116 USER_KEY_LEN + strlen(sesInfo->userName) + 116 USER_KEY_LEN + strlen(sesInfo->user_name) +
117 PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; 117 PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
118 118
119 spnego_key = ERR_PTR(-ENOMEM); 119 spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
153 sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); 153 sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
154 154
155 dp = description + strlen(description); 155 dp = description + strlen(description);
156 sprintf(dp, ";user=%s", sesInfo->userName); 156 sprintf(dp, ";user=%s", sesInfo->user_name);
157 157
158 dp = description + strlen(description); 158 dp = description + strlen(description);
159 sprintf(dp, ";pid=0x%x", current->pid); 159 sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index fc0fd4fde306..23d43cde4306 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
90 case UNI_COLON: 90 case UNI_COLON:
91 *target = ':'; 91 *target = ':';
92 break; 92 break;
93 case UNI_ASTERIK: 93 case UNI_ASTERISK:
94 *target = '*'; 94 *target = '*';
95 break; 95 break;
96 case UNI_QUESTION: 96 case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
264 * names are little endian 16 bit Unicode on the wire 264 * names are little endian 16 bit Unicode on the wire
265 */ 265 */
266int 266int
267cifsConvertToUCS(__le16 *target, const char *source, int maxlen, 267cifsConvertToUCS(__le16 *target, const char *source, int srclen,
268 const struct nls_table *cp, int mapChars) 268 const struct nls_table *cp, int mapChars)
269{ 269{
270 int i, j, charlen; 270 int i, j, charlen;
271 int len_remaining = maxlen;
272 char src_char; 271 char src_char;
273 __u16 temp; 272 __le16 dst_char;
273 wchar_t tmp;
274 274
275 if (!mapChars) 275 if (!mapChars)
276 return cifs_strtoUCS(target, source, PATH_MAX, cp); 276 return cifs_strtoUCS(target, source, PATH_MAX, cp);
277 277
278 for (i = 0, j = 0; i < maxlen; j++) { 278 for (i = 0, j = 0; i < srclen; j++) {
279 src_char = source[i]; 279 src_char = source[i];
280 switch (src_char) { 280 switch (src_char) {
281 case 0: 281 case 0:
282 put_unaligned_le16(0, &target[j]); 282 put_unaligned(0, &target[j]);
283 goto ctoUCS_out; 283 goto ctoUCS_out;
284 case ':': 284 case ':':
285 temp = UNI_COLON; 285 dst_char = cpu_to_le16(UNI_COLON);
286 break; 286 break;
287 case '*': 287 case '*':
288 temp = UNI_ASTERIK; 288 dst_char = cpu_to_le16(UNI_ASTERISK);
289 break; 289 break;
290 case '?': 290 case '?':
291 temp = UNI_QUESTION; 291 dst_char = cpu_to_le16(UNI_QUESTION);
292 break; 292 break;
293 case '<': 293 case '<':
294 temp = UNI_LESSTHAN; 294 dst_char = cpu_to_le16(UNI_LESSTHAN);
295 break; 295 break;
296 case '>': 296 case '>':
297 temp = UNI_GRTRTHAN; 297 dst_char = cpu_to_le16(UNI_GRTRTHAN);
298 break; 298 break;
299 case '|': 299 case '|':
300 temp = UNI_PIPE; 300 dst_char = cpu_to_le16(UNI_PIPE);
301 break; 301 break;
302 /* 302 /*
303 * FIXME: We can not handle remapping backslash (UNI_SLASH) 303 * FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
305 * as they use backslash as separator. 305 * as they use backslash as separator.
306 */ 306 */
307 default: 307 default:
308 charlen = cp->char2uni(source+i, len_remaining, 308 charlen = cp->char2uni(source + i, srclen - i, &tmp);
309 &temp); 309 dst_char = cpu_to_le16(tmp);
310
310 /* 311 /*
311 * if no match, use question mark, which at least in 312 * if no match, use question mark, which at least in
312 * some cases serves as wild card 313 * some cases serves as wild card
313 */ 314 */
314 if (charlen < 1) { 315 if (charlen < 1) {
315 temp = 0x003f; 316 dst_char = cpu_to_le16(0x003f);
316 charlen = 1; 317 charlen = 1;
317 } 318 }
318 len_remaining -= charlen;
319 /* 319 /*
320 * character may take more than one byte in the source 320 * character may take more than one byte in the source
321 * string, but will take exactly two bytes in the 321 * string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
324 i += charlen; 324 i += charlen;
325 continue; 325 continue;
326 } 326 }
327 put_unaligned_le16(temp, &target[j]); 327 put_unaligned(dst_char, &target[j]);
328 i++; /* move to next char in source string */ 328 i++; /* move to next char in source string */
329 len_remaining--;
330 } 329 }
331 330
332ctoUCS_out: 331ctoUCS_out:
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 7fe6b52df507..644dd882a560 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -44,7 +44,7 @@
44 * reserved symbols (along with \ and /), otherwise illegal to store 44 * reserved symbols (along with \ and /), otherwise illegal to store
45 * in filenames in NTFS 45 * in filenames in NTFS
46 */ 46 */
47#define UNI_ASTERIK (__u16) ('*' + 0xF000) 47#define UNI_ASTERISK (__u16) ('*' + 0xF000)
48#define UNI_QUESTION (__u16) ('?' + 0xF000) 48#define UNI_QUESTION (__u16) ('?' + 0xF000)
49#define UNI_COLON (__u16) (':' + 0xF000) 49#define UNI_COLON (__u16) (':' + 0xF000)
50#define UNI_GRTRTHAN (__u16) ('>' + 0xF000) 50#define UNI_GRTRTHAN (__u16) ('>' + 0xF000)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a51585f9852b..d1a016be73ba 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -30,12 +30,13 @@
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/random.h> 31#include <linux/random.h>
32 32
33/* Calculate and return the CIFS signature based on the mac key and SMB PDU */ 33/*
34/* the 16 byte signature must be allocated by the caller */ 34 * Calculate and return the CIFS signature based on the mac key and SMB PDU.
35/* Note we only use the 1st eight bytes */ 35 * The 16 byte signature must be allocated by the caller. Note we only use the
36/* Note that the smb header signature field on input contains the 36 * 1st eight bytes and that the smb header signature field on input contains
37 sequence number before this function is called */ 37 * the sequence number before this function is called. Also, this function
38 38 * should be called with the server->srv_mutex held.
39 */
39static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 40static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
40 struct TCP_Server_Info *server, char *signature) 41 struct TCP_Server_Info *server, char *signature)
41{ 42{
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
209 cpu_to_le32(expected_sequence_number); 210 cpu_to_le32(expected_sequence_number);
210 cifs_pdu->Signature.Sequence.Reserved = 0; 211 cifs_pdu->Signature.Sequence.Reserved = 0;
211 212
213 mutex_lock(&server->srv_mutex);
212 rc = cifs_calculate_signature(cifs_pdu, server, 214 rc = cifs_calculate_signature(cifs_pdu, server,
213 what_we_think_sig_should_be); 215 what_we_think_sig_should_be);
216 mutex_unlock(&server->srv_mutex);
214 217
215 if (rc) 218 if (rc)
216 return rc; 219 return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
469 return rc; 472 return rc;
470 } 473 }
471 474
472 /* convert ses->userName to unicode and uppercase */ 475 /* convert ses->user_name to unicode and uppercase */
473 len = strlen(ses->userName); 476 len = strlen(ses->user_name);
474 user = kmalloc(2 + (len * 2), GFP_KERNEL); 477 user = kmalloc(2 + (len * 2), GFP_KERNEL);
475 if (user == NULL) { 478 if (user == NULL) {
476 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); 479 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
477 rc = -ENOMEM; 480 rc = -ENOMEM;
478 goto calc_exit_2; 481 goto calc_exit_2;
479 } 482 }
480 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); 483 len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
481 UniStrupr(user); 484 UniStrupr(user);
482 485
483 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, 486 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f2970136d17d..5c412b33cd7c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,6 @@ int cifsFYI = 0;
53int cifsERROR = 1; 53int cifsERROR = 1;
54int traceSMB = 0; 54int traceSMB = 0;
55unsigned int oplockEnabled = 1; 55unsigned int oplockEnabled = 1;
56unsigned int experimEnabled = 0;
57unsigned int linuxExtEnabled = 1; 56unsigned int linuxExtEnabled = 1;
58unsigned int lookupCacheEnabled = 1; 57unsigned int lookupCacheEnabled = 1;
59unsigned int multiuser_mount = 0; 58unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
127 kfree(cifs_sb); 126 kfree(cifs_sb);
128 return rc; 127 return rc;
129 } 128 }
129 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
130 130
131#ifdef CONFIG_CIFS_DFS_UPCALL 131#ifdef CONFIG_CIFS_DFS_UPCALL
132 /* copy mount params to sb for use in submounts */ 132 /* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
409 409
410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) 410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
411 seq_printf(s, ",multiuser"); 411 seq_printf(s, ",multiuser");
412 else if (tcon->ses->userName) 412 else if (tcon->ses->user_name)
413 seq_printf(s, ",username=%s", tcon->ses->userName); 413 seq_printf(s, ",username=%s", tcon->ses->user_name);
414 414
415 if (tcon->ses->domainName) 415 if (tcon->ses->domainName)
416 seq_printf(s, ",domain=%s", tcon->ses->domainName); 416 seq_printf(s, ",domain=%s", tcon->ses->domainName);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 17afb0fbcaed..a5d1106fcbde 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -37,10 +37,9 @@
37 37
38#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) 38#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
39#define MAX_SERVER_SIZE 15 39#define MAX_SERVER_SIZE 15
40#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ 40#define MAX_SHARE_SIZE 80
41#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null 41#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
42 termination then *2 for unicode versions */ 42#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
43#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
44 43
45#define CIFS_MIN_RCV_POOL 4 44#define CIFS_MIN_RCV_POOL 4
46 45
@@ -92,7 +91,8 @@ enum statusEnum {
92 CifsNew = 0, 91 CifsNew = 0,
93 CifsGood, 92 CifsGood,
94 CifsExiting, 93 CifsExiting,
95 CifsNeedReconnect 94 CifsNeedReconnect,
95 CifsNeedNegotiate
96}; 96};
97 97
98enum securityEnum { 98enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
274 int capabilities; 274 int capabilities;
275 char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for 275 char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
276 TCP names - will ipv6 and sctp addresses fit? */ 276 TCP names - will ipv6 and sctp addresses fit? */
277 char userName[MAX_USERNAME_SIZE + 1]; 277 char *user_name;
278 char *domainName; 278 char *domainName;
279 char *password; 279 char *password;
280 struct session_key auth_key; 280 struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
817 have the uid/password or Kerberos credential 817 have the uid/password or Kerberos credential
818 or equivalent for current user */ 818 or equivalent for current user */
819GLOBAL_EXTERN unsigned int oplockEnabled; 819GLOBAL_EXTERN unsigned int oplockEnabled;
820GLOBAL_EXTERN unsigned int experimEnabled;
821GLOBAL_EXTERN unsigned int lookupCacheEnabled; 820GLOBAL_EXTERN unsigned int lookupCacheEnabled;
822GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent 821GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
823 with more secure ntlmssp2 challenge/resp */ 822 with more secure ntlmssp2 challenge/resp */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2644a5d6cc67..df959bae6728 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
142 */ 142 */
143 while (server->tcpStatus == CifsNeedReconnect) { 143 while (server->tcpStatus == CifsNeedReconnect) {
144 wait_event_interruptible_timeout(server->response_q, 144 wait_event_interruptible_timeout(server->response_q,
145 (server->tcpStatus == CifsGood), 10 * HZ); 145 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
146 146
147 /* is TCP session is reestablished now ?*/ 147 /* are we still trying to reconnect? */
148 if (server->tcpStatus != CifsNeedReconnect) 148 if (server->tcpStatus != CifsNeedReconnect)
149 break; 149 break;
150 150
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
729 return rc; 729 return rc;
730 730
731 /* set up echo request */ 731 /* set up echo request */
732 smb->hdr.Tid = cpu_to_le16(0xffff); 732 smb->hdr.Tid = 0xffff;
733 smb->hdr.WordCount = 1; 733 smb->hdr.WordCount = 1;
734 put_unaligned_le16(1, &smb->EchoCount); 734 put_unaligned_le16(1, &smb->EchoCount);
735 put_bcc_le(1, &smb->hdr); 735 put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1884 __constant_cpu_to_le16(CIFS_WRLCK)) 1884 __constant_cpu_to_le16(CIFS_WRLCK))
1885 pLockData->fl_type = F_WRLCK; 1885 pLockData->fl_type = F_WRLCK;
1886 1886
1887 pLockData->fl_start = parm_data->start; 1887 pLockData->fl_start = le64_to_cpu(parm_data->start);
1888 pLockData->fl_end = parm_data->start + 1888 pLockData->fl_end = pLockData->fl_start +
1889 parm_data->length - 1; 1889 le64_to_cpu(parm_data->length) - 1;
1890 pLockData->fl_pid = parm_data->pid; 1890 pLockData->fl_pid = le32_to_cpu(parm_data->pid);
1891 } 1891 }
1892 } 1892 }
1893 1893
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6e2b2addfc78..db9d55b507d0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
199 } 199 }
200 spin_unlock(&GlobalMid_Lock); 200 spin_unlock(&GlobalMid_Lock);
201 201
202 while ((server->tcpStatus != CifsExiting) && 202 while (server->tcpStatus == CifsNeedReconnect) {
203 (server->tcpStatus != CifsGood)) {
204 try_to_freeze(); 203 try_to_freeze();
205 204
206 /* we should try only the port we connected to before */ 205 /* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
212 atomic_inc(&tcpSesReconnectCount); 211 atomic_inc(&tcpSesReconnectCount);
213 spin_lock(&GlobalMid_Lock); 212 spin_lock(&GlobalMid_Lock);
214 if (server->tcpStatus != CifsExiting) 213 if (server->tcpStatus != CifsExiting)
215 server->tcpStatus = CifsGood; 214 server->tcpStatus = CifsNeedNegotiate;
216 spin_unlock(&GlobalMid_Lock); 215 spin_unlock(&GlobalMid_Lock);
217 } 216 }
218 } 217 }
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
248 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); 247 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
249 data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); 248 data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
250 249
251 remaining = total_data_size - data_in_this_rsp; 250 if (total_data_size == data_in_this_rsp)
252
253 if (remaining == 0)
254 return 0; 251 return 0;
255 else if (remaining < 0) { 252 else if (total_data_size < data_in_this_rsp) {
256 cFYI(1, "total data %d smaller than data in frame %d", 253 cFYI(1, "total data %d smaller than data in frame %d",
257 total_data_size, data_in_this_rsp); 254 total_data_size, data_in_this_rsp);
258 return -EINVAL; 255 return -EINVAL;
259 } else {
260 cFYI(1, "missing %d bytes from transact2, check next response",
261 remaining);
262 if (total_data_size > maxBufSize) {
263 cERROR(1, "TotalDataSize %d is over maximum buffer %d",
264 total_data_size, maxBufSize);
265 return -EINVAL;
266 }
267 return remaining;
268 } 256 }
257
258 remaining = total_data_size - data_in_this_rsp;
259
260 cFYI(1, "missing %d bytes from transact2, check next response",
261 remaining);
262 if (total_data_size > maxBufSize) {
263 cERROR(1, "TotalDataSize %d is over maximum buffer %d",
264 total_data_size, maxBufSize);
265 return -EINVAL;
266 }
267 return remaining;
269} 268}
270 269
271static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) 270static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
421 pdu_length = 4; /* enough to get RFC1001 header */ 420 pdu_length = 4; /* enough to get RFC1001 header */
422 421
423incomplete_rcv: 422incomplete_rcv:
424 if (echo_retries > 0 && 423 if (echo_retries > 0 && server->tcpStatus == CifsGood &&
425 time_after(jiffies, server->lstrp + 424 time_after(jiffies, server->lstrp +
426 (echo_retries * SMB_ECHO_INTERVAL))) { 425 (echo_retries * SMB_ECHO_INTERVAL))) {
427 cERROR(1, "Server %s has not responded in %d seconds. " 426 cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
881 /* null user, ie anonymous, authentication */ 880 /* null user, ie anonymous, authentication */
882 vol->nullauth = 1; 881 vol->nullauth = 1;
883 } 882 }
884 if (strnlen(value, 200) < 200) { 883 if (strnlen(value, MAX_USERNAME_SIZE) <
884 MAX_USERNAME_SIZE) {
885 vol->username = value; 885 vol->username = value;
886 } else { 886 } else {
887 printk(KERN_WARNING "CIFS: username too long\n"); 887 printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
1472static bool 1472static bool
1473match_port(struct TCP_Server_Info *server, struct sockaddr *addr) 1473match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1474{ 1474{
1475 unsigned short int port, *sport; 1475 __be16 port, *sport;
1476 1476
1477 switch (addr->sa_family) { 1477 switch (addr->sa_family) {
1478 case AF_INET: 1478 case AF_INET:
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1765 module_put(THIS_MODULE); 1765 module_put(THIS_MODULE);
1766 goto out_err_crypto_release; 1766 goto out_err_crypto_release;
1767 } 1767 }
1768 tcp_ses->tcpStatus = CifsNeedNegotiate;
1768 1769
1769 /* thread spawned, put it on the list */ 1770 /* thread spawned, put it on the list */
1770 spin_lock(&cifs_tcp_ses_lock); 1771 spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1808 break; 1809 break;
1809 default: 1810 default:
1810 /* anything else takes username/password */ 1811 /* anything else takes username/password */
1811 if (strncmp(ses->userName, vol->username, 1812 if (ses->user_name == NULL)
1813 continue;
1814 if (strncmp(ses->user_name, vol->username,
1812 MAX_USERNAME_SIZE)) 1815 MAX_USERNAME_SIZE))
1813 continue; 1816 continue;
1814 if (strlen(vol->username) != 0 && 1817 if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1851 cifs_put_tcp_session(server); 1854 cifs_put_tcp_session(server);
1852} 1855}
1853 1856
1857static bool warned_on_ntlm; /* globals init to false automatically */
1858
1854static struct cifsSesInfo * 1859static struct cifsSesInfo *
1855cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) 1860cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1856{ 1861{
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1906 else 1911 else
1907 sprintf(ses->serverName, "%pI4", &addr->sin_addr); 1912 sprintf(ses->serverName, "%pI4", &addr->sin_addr);
1908 1913
1909 if (volume_info->username) 1914 if (volume_info->username) {
1910 strncpy(ses->userName, volume_info->username, 1915 ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
1911 MAX_USERNAME_SIZE); 1916 if (!ses->user_name)
1917 goto get_ses_fail;
1918 }
1912 1919
1913 /* volume_info->password freed at unmount */ 1920 /* volume_info->password freed at unmount */
1914 if (volume_info->password) { 1921 if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1923 } 1930 }
1924 ses->cred_uid = volume_info->cred_uid; 1931 ses->cred_uid = volume_info->cred_uid;
1925 ses->linux_uid = volume_info->linux_uid; 1932 ses->linux_uid = volume_info->linux_uid;
1933
1934 /* ntlmv2 is much stronger than ntlm security, and has been broadly
1935 supported for many years, time to update default security mechanism */
1936 if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
1937 warned_on_ntlm = true;
1938 cERROR(1, "default security mechanism requested. The default "
1939 "security mechanism will be upgraded from ntlm to "
1940 "ntlmv2 in kernel release 2.6.41");
1941 }
1926 ses->overrideSecFlg = volume_info->secFlg; 1942 ses->overrideSecFlg = volume_info->secFlg;
1927 1943
1928 mutex_lock(&ses->session_mutex); 1944 mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
2276generic_ip_connect(struct TCP_Server_Info *server) 2292generic_ip_connect(struct TCP_Server_Info *server)
2277{ 2293{
2278 int rc = 0; 2294 int rc = 0;
2279 unsigned short int sport; 2295 __be16 sport;
2280 int slen, sfamily; 2296 int slen, sfamily;
2281 struct socket *socket = server->ssocket; 2297 struct socket *socket = server->ssocket;
2282 struct sockaddr *saddr; 2298 struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
2361static int 2377static int
2362ip_connect(struct TCP_Server_Info *server) 2378ip_connect(struct TCP_Server_Info *server)
2363{ 2379{
2364 unsigned short int *sport; 2380 __be16 *sport;
2365 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; 2381 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2366 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 2382 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2367 2383
@@ -2826,7 +2842,7 @@ try_mount_again:
2826 2842
2827remote_path_check: 2843remote_path_check:
2828 /* check if a whole path (including prepath) is not remote */ 2844 /* check if a whole path (including prepath) is not remote */
2829 if (!rc && cifs_sb->prepathlen && tcon) { 2845 if (!rc && tcon) {
2830 /* build_path_to_root works only when we have a valid tcon */ 2846 /* build_path_to_root works only when we have a valid tcon */
2831 full_path = cifs_build_path_to_root(cifs_sb, tcon); 2847 full_path = cifs_build_path_to_root(cifs_sb, tcon);
2832 if (full_path == NULL) { 2848 if (full_path == NULL) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c27d236738fc..faf59529e847 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -575,8 +575,10 @@ reopen_error_exit:
575 575
576int cifs_close(struct inode *inode, struct file *file) 576int cifs_close(struct inode *inode, struct file *file)
577{ 577{
578 cifsFileInfo_put(file->private_data); 578 if (file->private_data != NULL) {
579 file->private_data = NULL; 579 cifsFileInfo_put(file->private_data);
580 file->private_data = NULL;
581 }
580 582
581 /* return code from the ->release op is always ignored */ 583 /* return code from the ->release op is always ignored */
582 return 0; 584 return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
970 total_written += bytes_written) { 972 total_written += bytes_written) {
971 rc = -EAGAIN; 973 rc = -EAGAIN;
972 while (rc == -EAGAIN) { 974 while (rc == -EAGAIN) {
975 struct kvec iov[2];
976 unsigned int len;
977
973 if (open_file->invalidHandle) { 978 if (open_file->invalidHandle) {
974 /* we could deadlock if we called 979 /* we could deadlock if we called
975 filemap_fdatawait from here so tell 980 filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
979 if (rc != 0) 984 if (rc != 0)
980 break; 985 break;
981 } 986 }
982 if (experimEnabled || (pTcon->ses->server && 987
983 ((pTcon->ses->server->secMode & 988 len = min((size_t)cifs_sb->wsize,
984 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 989 write_size - total_written);
985 == 0))) { 990 /* iov[0] is reserved for smb header */
986 struct kvec iov[2]; 991 iov[1].iov_base = (char *)write_data + total_written;
987 unsigned int len; 992 iov[1].iov_len = len;
988 993 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
989 len = min((size_t)cifs_sb->wsize, 994 *poffset, &bytes_written, iov, 1, 0);
990 write_size - total_written);
991 /* iov[0] is reserved for smb header */
992 iov[1].iov_base = (char *)write_data +
993 total_written;
994 iov[1].iov_len = len;
995 rc = CIFSSMBWrite2(xid, pTcon,
996 open_file->netfid, len,
997 *poffset, &bytes_written,
998 iov, 1, 0);
999 } else
1000 rc = CIFSSMBWrite(xid, pTcon,
1001 open_file->netfid,
1002 min_t(const int, cifs_sb->wsize,
1003 write_size - total_written),
1004 *poffset, &bytes_written,
1005 write_data + total_written,
1006 NULL, 0);
1007 } 995 }
1008 if (rc || (bytes_written == 0)) { 996 if (rc || (bytes_written == 0)) {
1009 if (total_written) 997 if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
1240 } 1228 }
1241 1229
1242 tcon = tlink_tcon(open_file->tlink); 1230 tcon = tlink_tcon(open_file->tlink);
1243 if (!experimEnabled && tcon->ses->server->secMode &
1244 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1245 cifsFileInfo_put(open_file);
1246 kfree(iov);
1247 return generic_writepages(mapping, wbc);
1248 }
1249 cifsFileInfo_put(open_file); 1231 cifsFileInfo_put(open_file);
1250 1232
1251 xid = GetXid(); 1233 xid = GetXid();
@@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1980 return total_read; 1962 return total_read;
1981} 1963}
1982 1964
1965/*
1966 * If the page is mmap'ed into a process' page tables, then we need to make
1967 * sure that it doesn't change while being written back.
1968 */
1969static int
1970cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1971{
1972 struct page *page = vmf->page;
1973
1974 lock_page(page);
1975 return VM_FAULT_LOCKED;
1976}
1977
1978static struct vm_operations_struct cifs_file_vm_ops = {
1979 .fault = filemap_fault,
1980 .page_mkwrite = cifs_page_mkwrite,
1981};
1982
1983int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) 1983int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1984{ 1984{
1985 int rc, xid; 1985 int rc, xid;
@@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1991 cifs_invalidate_mapping(inode); 1991 cifs_invalidate_mapping(inode);
1992 1992
1993 rc = generic_file_mmap(file, vma); 1993 rc = generic_file_mmap(file, vma);
1994 if (rc == 0)
1995 vma->vm_ops = &cifs_file_vm_ops;
1994 FreeXid(xid); 1996 FreeXid(xid);
1995 return rc; 1997 return rc;
1996} 1998}
@@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2007 return rc; 2009 return rc;
2008 } 2010 }
2009 rc = generic_file_mmap(file, vma); 2011 rc = generic_file_mmap(file, vma);
2012 if (rc == 0)
2013 vma->vm_ops = &cifs_file_vm_ops;
2010 FreeXid(xid); 2014 FreeXid(xid);
2011 return rc; 2015 return rc;
2012} 2016}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e8804d373404..ce417a9764a3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
239 if (rc != 0) 239 if (rc != 0)
240 return rc; 240 return rc;
241 241
242 if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { 242 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
243 CIFSSMBClose(xid, tcon, netfid); 243 CIFSSMBClose(xid, tcon, netfid);
244 /* it's not a symlink */ 244 /* it's not a symlink */
245 return -EINVAL; 245 return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
316 if (rc != 0) 316 if (rc != 0)
317 goto out; 317 goto out;
318 318
319 if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { 319 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
320 CIFSSMBClose(xid, pTcon, netfid); 320 CIFSSMBClose(xid, pTcon, netfid);
321 /* it's not a symlink */ 321 /* it's not a symlink */
322 goto out; 322 goto out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2a930a752a78..0c684ae4c071 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password)); 100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password); 101 kfree(buf_to_free->password);
102 } 102 }
103 kfree(buf_to_free->user_name);
103 kfree(buf_to_free->domainName); 104 kfree(buf_to_free->domainName);
104 kfree(buf_to_free); 105 kfree(buf_to_free);
105} 106}
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
520 (struct smb_com_transaction_change_notify_rsp *)buf; 521 (struct smb_com_transaction_change_notify_rsp *)buf;
521 struct file_notify_information *pnotify; 522 struct file_notify_information *pnotify;
522 __u32 data_offset = 0; 523 __u32 data_offset = 0;
523 if (pSMBr->ByteCount > sizeof(struct file_notify_information)) { 524 if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
524 data_offset = le32_to_cpu(pSMBr->DataOffset); 525 data_offset = le32_to_cpu(pSMBr->DataOffset);
525 526
526 pnotify = (struct file_notify_information *) 527 pnotify = (struct file_notify_information *)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 16765703131b..f6728eb6f4b9 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
219 bcc_ptr++; 219 bcc_ptr++;
220 } */ 220 } */
221 /* copy user */ 221 /* copy user */
222 if (ses->userName == NULL) { 222 if (ses->user_name == NULL) {
223 /* null user mount */ 223 /* null user mount */
224 *bcc_ptr = 0; 224 *bcc_ptr = 0;
225 *(bcc_ptr+1) = 0; 225 *(bcc_ptr+1) = 0;
226 } else { 226 } else {
227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
228 MAX_USERNAME_SIZE, nls_cp); 228 MAX_USERNAME_SIZE, nls_cp);
229 } 229 }
230 bcc_ptr += 2 * bytes_ret; 230 bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
244 /* copy user */ 244 /* copy user */
245 /* BB what about null user mounts - check that we do this BB */ 245 /* BB what about null user mounts - check that we do this BB */
246 /* copy user */ 246 /* copy user */
247 if (ses->userName == NULL) { 247 if (ses->user_name != NULL)
248 /* BB what about null user mounts - check that we do this BB */ 248 strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
249 } else { 249 /* else null user mount */
250 strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); 250
251 } 251 bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
252 bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
253 *bcc_ptr = 0; 252 *bcc_ptr = 0;
254 bcc_ptr++; /* account for null termination */ 253 bcc_ptr++; /* account for null termination */
255 254
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
405 /* BB spec says that if AvId field of MsvAvTimestamp is populated then 404 /* BB spec says that if AvId field of MsvAvTimestamp is populated then
406 we must set the MIC field of the AUTHENTICATE_MESSAGE */ 405 we must set the MIC field of the AUTHENTICATE_MESSAGE */
407 ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); 406 ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
408 tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); 407 tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
409 tilen = cpu_to_le16(pblob->TargetInfoArray.Length); 408 tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
410 if (tilen) { 409 if (tilen) {
411 ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); 410 ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
412 if (!ses->auth_key.response) { 411 if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
523 tmp += len; 522 tmp += len;
524 } 523 }
525 524
526 if (ses->userName == NULL) { 525 if (ses->user_name == NULL) {
527 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 526 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
528 sec_blob->UserName.Length = 0; 527 sec_blob->UserName.Length = 0;
529 sec_blob->UserName.MaximumLength = 0; 528 sec_blob->UserName.MaximumLength = 0;
530 tmp += 2; 529 tmp += 2;
531 } else { 530 } else {
532 int len; 531 int len;
533 len = cifs_strtoUCS((__le16 *)tmp, ses->userName, 532 len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
534 MAX_USERNAME_SIZE, nls_cp); 533 MAX_USERNAME_SIZE, nls_cp);
535 len *= 2; /* unicode is 2 bytes each */ 534 len *= 2; /* unicode is 2 bytes each */
536 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 535 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index e25e99bf7ee1..d0f53538a57f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -86,8 +86,8 @@
86 86
87#ifdef CONFIG_QUOTA 87#ifdef CONFIG_QUOTA
88/* Amount of blocks needed for quota update - we know that the structure was 88/* Amount of blocks needed for quota update - we know that the structure was
89 * allocated so we need to update only inode+data */ 89 * allocated so we need to update only data block */
90#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0) 90#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
91/* Amount of blocks needed for quota insert/delete - we do some block writes 91/* Amount of blocks needed for quota insert/delete - we do some block writes
92 * but inode, sb and group updates are done only once */ 92 * but inode, sb and group updates are done only once */
93#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ 93#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 4673bc05274f..e9473cbe80df 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
125 * the parent directory's parent as well, and so on recursively, if 125 * the parent directory's parent as well, and so on recursively, if
126 * they are also freshly created. 126 * they are also freshly created.
127 */ 127 */
128static void ext4_sync_parent(struct inode *inode) 128static int ext4_sync_parent(struct inode *inode)
129{ 129{
130 struct writeback_control wbc;
130 struct dentry *dentry = NULL; 131 struct dentry *dentry = NULL;
132 int ret = 0;
131 133
132 while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { 134 while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
133 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); 135 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
136 if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) 138 if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
137 break; 139 break;
138 inode = dentry->d_parent->d_inode; 140 inode = dentry->d_parent->d_inode;
139 sync_mapping_buffers(inode->i_mapping); 141 ret = sync_mapping_buffers(inode->i_mapping);
142 if (ret)
143 break;
144 memset(&wbc, 0, sizeof(wbc));
145 wbc.sync_mode = WB_SYNC_ALL;
146 wbc.nr_to_write = 0; /* only write out the inode */
147 ret = sync_inode(inode, &wbc);
148 if (ret)
149 break;
140 } 150 }
151 return ret;
141} 152}
142 153
143/* 154/*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
176 if (!journal) { 187 if (!journal) {
177 ret = generic_file_fsync(file, datasync); 188 ret = generic_file_fsync(file, datasync);
178 if (!ret && !list_empty(&inode->i_dentry)) 189 if (!ret && !list_empty(&inode->i_dentry))
179 ext4_sync_parent(inode); 190 ret = ext4_sync_parent(inode);
180 goto out; 191 goto out;
181 } 192 }
182 193
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ad8e303c0d29..f2fa5e8a582c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2502 * for partial write. 2502 * for partial write.
2503 */ 2503 */
2504 set_buffer_new(bh); 2504 set_buffer_new(bh);
2505 set_buffer_mapped(bh);
2505 } 2506 }
2506 return 0; 2507 return 0;
2507} 2508}
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
4429 Indirect chain[4]; 4430 Indirect chain[4];
4430 Indirect *partial; 4431 Indirect *partial;
4431 __le32 nr = 0; 4432 __le32 nr = 0;
4432 int n; 4433 int n = 0;
4433 ext4_lblk_t last_block; 4434 ext4_lblk_t last_block, max_block;
4434 unsigned blocksize = inode->i_sb->s_blocksize; 4435 unsigned blocksize = inode->i_sb->s_blocksize;
4435 4436
4436 trace_ext4_truncate_enter(inode); 4437 trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
4455 4456
4456 last_block = (inode->i_size + blocksize-1) 4457 last_block = (inode->i_size + blocksize-1)
4457 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4458 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4459 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
4460 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
4458 4461
4459 if (inode->i_size & (blocksize - 1)) 4462 if (inode->i_size & (blocksize - 1))
4460 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4463 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
4461 goto out_stop; 4464 goto out_stop;
4462 4465
4463 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4466 if (last_block != max_block) {
4464 if (n == 0) 4467 n = ext4_block_to_path(inode, last_block, offsets, NULL);
4465 goto out_stop; /* error */ 4468 if (n == 0)
4469 goto out_stop; /* error */
4470 }
4466 4471
4467 /* 4472 /*
4468 * OK. This truncate is going to happen. We add the inode to the 4473 * OK. This truncate is going to happen. We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
4493 */ 4498 */
4494 ei->i_disksize = inode->i_size; 4499 ei->i_disksize = inode->i_size;
4495 4500
4496 if (n == 1) { /* direct blocks */ 4501 if (last_block == max_block) {
4502 /*
4503 * It is unnecessary to free any data blocks if last_block is
4504 * equal to the indirect block limit.
4505 */
4506 goto out_unlock;
4507 } else if (n == 1) { /* direct blocks */
4497 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4508 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
4498 i_data + EXT4_NDIR_BLOCKS); 4509 i_data + EXT4_NDIR_BLOCKS);
4499 goto do_indirects; 4510 goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
4553 ; 4564 ;
4554 } 4565 }
4555 4566
4567out_unlock:
4556 up_write(&ei->i_data_sem); 4568 up_write(&ei->i_data_sem);
4557 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4569 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4558 ext4_mark_inode_dirty(handle, inode); 4570 ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5398 /* if nrblocks are contiguous */ 5410 /* if nrblocks are contiguous */
5399 if (chunk) { 5411 if (chunk) {
5400 /* 5412 /*
5401 * With N contiguous data blocks, it need at most 5413 * With N contiguous data blocks, we need at most
5402 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 5414 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
5403 * 2 dindirect blocks 5415 * 2 dindirect blocks, and 1 tindirect block
5404 * 1 tindirect block
5405 */ 5416 */
5406 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 5417 return DIV_ROUND_UP(nrblocks,
5407 return indirects + 3; 5418 EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
5408 } 5419 }
5409 /* 5420 /*
5410 * if nrblocks are not contiguous, worse case, each block touch 5421 * if nrblocks are not contiguous, worse case, each block touch
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 056474b7b8e0..8553dfb310af 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
242 * journal_end calls result in the superblock being marked dirty, so 242 * journal_end calls result in the superblock being marked dirty, so
243 * that sync() will call the filesystem's write_super callback if 243 * that sync() will call the filesystem's write_super callback if
244 * appropriate. 244 * appropriate.
245 *
246 * To avoid j_barrier hold in userspace when a user calls freeze(),
247 * ext4 prevents a new handle from being started by s_frozen, which
248 * is in an upper layer.
245 */ 249 */
246handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) 250handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
247{ 251{
248 journal_t *journal; 252 journal_t *journal;
253 handle_t *handle;
249 254
250 if (sb->s_flags & MS_RDONLY) 255 if (sb->s_flags & MS_RDONLY)
251 return ERR_PTR(-EROFS); 256 return ERR_PTR(-EROFS);
252 257
253 vfs_check_frozen(sb, SB_FREEZE_TRANS);
254 /* Special case here: if the journal has aborted behind our
255 * backs (eg. EIO in the commit thread), then we still need to
256 * take the FS itself readonly cleanly. */
257 journal = EXT4_SB(sb)->s_journal; 258 journal = EXT4_SB(sb)->s_journal;
258 if (journal) { 259 handle = ext4_journal_current_handle();
259 if (is_journal_aborted(journal)) { 260
260 ext4_abort(sb, "Detected aborted journal"); 261 /*
261 return ERR_PTR(-EROFS); 262 * If a handle has been started, it should be allowed to
262 } 263 * finish, otherwise deadlock could happen between freeze
263 return jbd2_journal_start(journal, nblocks); 264 * and others(e.g. truncate) due to the restart of the
265 * journal handle if the filesystem is forzen and active
266 * handles are not stopped.
267 */
268 if (!handle)
269 vfs_check_frozen(sb, SB_FREEZE_TRANS);
270
271 if (!journal)
272 return ext4_get_nojournal();
273 /*
274 * Special case here: if the journal has aborted behind our
275 * backs (eg. EIO in the commit thread), then we still need to
276 * take the FS itself readonly cleanly.
277 */
278 if (is_journal_aborted(journal)) {
279 ext4_abort(sb, "Detected aborted journal");
280 return ERR_PTR(-EROFS);
264 } 281 }
265 return ext4_get_nojournal(); 282 return jbd2_journal_start(journal, nblocks);
266} 283}
267 284
268/* 285/*
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
2975 mutex_unlock(&ext4_li_info->li_list_mtx); 2992 mutex_unlock(&ext4_li_info->li_list_mtx);
2976 2993
2977 sbi->s_li_request = elr; 2994 sbi->s_li_request = elr;
2995 /*
2996 * set elr to NULL here since it has been inserted to
2997 * the request_list and the removal and free of it is
2998 * handled by ext4_clear_request_list from now on.
2999 */
3000 elr = NULL;
2978 3001
2979 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 3002 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
2980 ret = ext4_run_lazyinit_thread(); 3003 ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3385 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3408 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3386 spin_lock_init(&sbi->s_next_gen_lock); 3409 spin_lock_init(&sbi->s_next_gen_lock);
3387 3410
3411 init_timer(&sbi->s_err_report);
3412 sbi->s_err_report.function = print_daily_error_info;
3413 sbi->s_err_report.data = (unsigned long) sb;
3414
3388 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3415 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3389 ext4_count_free_blocks(sb)); 3416 ext4_count_free_blocks(sb));
3390 if (!err) { 3417 if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
3646 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 3673 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
3647 *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 3674 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
3648 3675
3649 init_timer(&sbi->s_err_report);
3650 sbi->s_err_report.function = print_daily_error_info;
3651 sbi->s_err_report.data = (unsigned long) sb;
3652 if (es->s_error_count) 3676 if (es->s_error_count)
3653 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 3677 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
3654 3678
@@ -3672,6 +3696,7 @@ failed_mount_wq:
3672 sbi->s_journal = NULL; 3696 sbi->s_journal = NULL;
3673 } 3697 }
3674failed_mount3: 3698failed_mount3:
3699 del_timer(&sbi->s_err_report);
3675 if (sbi->s_flex_groups) { 3700 if (sbi->s_flex_groups) {
3676 if (is_vmalloc_addr(sbi->s_flex_groups)) 3701 if (is_vmalloc_addr(sbi->s_flex_groups))
3677 vfree(sbi->s_flex_groups); 3702 vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
4138/* 4163/*
4139 * LVM calls this function before a (read-only) snapshot is created. This 4164 * LVM calls this function before a (read-only) snapshot is created. This
4140 * gives us a chance to flush the journal completely and mark the fs clean. 4165 * gives us a chance to flush the journal completely and mark the fs clean.
4166 *
4167 * Note that only this function cannot bring a filesystem to be in a clean
4168 * state independently, because ext4 prevents a new handle from being started
4169 * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
4170 * the upper layer.
4141 */ 4171 */
4142static int ext4_freeze(struct super_block *sb) 4172static int ext4_freeze(struct super_block *sb)
4143{ 4173{
@@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4614 4644
4615static int ext4_quota_off(struct super_block *sb, int type) 4645static int ext4_quota_off(struct super_block *sb, int type)
4616{ 4646{
4647 struct inode *inode = sb_dqopt(sb)->files[type];
4648 handle_t *handle;
4649
4617 /* Force all delayed allocation blocks to be allocated. 4650 /* Force all delayed allocation blocks to be allocated.
4618 * Caller already holds s_umount sem */ 4651 * Caller already holds s_umount sem */
4619 if (test_opt(sb, DELALLOC)) 4652 if (test_opt(sb, DELALLOC))
4620 sync_filesystem(sb); 4653 sync_filesystem(sb);
4621 4654
4655 /* Update modification times of quota files when userspace can
4656 * start looking at them */
4657 handle = ext4_journal_start(inode, 1);
4658 if (IS_ERR(handle))
4659 goto out;
4660 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4661 ext4_mark_inode_dirty(handle, inode);
4662 ext4_journal_stop(handle);
4663
4664out:
4622 return dquot_quota_off(sb, type); 4665 return dquot_quota_off(sb, type);
4623} 4666}
4624 4667
@@ -4714,9 +4757,8 @@ out:
4714 if (inode->i_size < off + len) { 4757 if (inode->i_size < off + len) {
4715 i_size_write(inode, off + len); 4758 i_size_write(inode, off + len);
4716 EXT4_I(inode)->i_disksize = inode->i_size; 4759 EXT4_I(inode)->i_disksize = inode->i_size;
4760 ext4_mark_inode_dirty(handle, inode);
4717 } 4761 }
4718 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4719 ext4_mark_inode_dirty(handle, inode);
4720 mutex_unlock(&inode->i_mutex); 4762 mutex_unlock(&inode->i_mutex);
4721 return len; 4763 return len;
4722} 4764}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 20af62f4304b..6e28000a4b21 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
105 int ret; 105 int ret;
106 struct timespec now = current_kernel_time(); 106 struct timespec now = current_kernel_time();
107 107
108 *cbh = NULL;
109
108 if (is_journal_aborted(journal)) 110 if (is_journal_aborted(journal))
109 return 0; 111 return 0;
110 112
@@ -806,7 +808,7 @@ wait_for_iobuf:
806 if (err) 808 if (err)
807 __jbd2_journal_abort_hard(journal); 809 __jbd2_journal_abort_hard(journal);
808 } 810 }
809 if (!err && !is_journal_aborted(journal)) 811 if (cbh)
810 err = journal_wait_on_commit_record(journal, cbh); 812 err = journal_wait_on_commit_record(journal, cbh);
811 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 813 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
812 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && 814 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index aba8ebaec25c..e0ec3db1c395 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
2413 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); 2413 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
2414 if (!new_dev) 2414 if (!new_dev)
2415 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ 2415 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
2416 bd = bdget(device);
2416 spin_lock(&devname_cache_lock); 2417 spin_lock(&devname_cache_lock);
2417 if (devcache[i]) { 2418 if (devcache[i]) {
2418 if (devcache[i]->device == device) { 2419 if (devcache[i]->device == device) {
2419 kfree(new_dev); 2420 kfree(new_dev);
2421 bdput(bd);
2420 ret = devcache[i]->devname; 2422 ret = devcache[i]->devname;
2421 spin_unlock(&devname_cache_lock); 2423 spin_unlock(&devname_cache_lock);
2422 return ret; 2424 return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
2425 } 2427 }
2426 devcache[i] = new_dev; 2428 devcache[i] = new_dev;
2427 devcache[i]->device = device; 2429 devcache[i]->device = device;
2428 bd = bdget(device);
2429 if (bd) { 2430 if (bd) {
2430 bdevname(bd, devcache[i]->devname); 2431 bdevname(bd, devcache[i]->devname);
2431 bdput(bd); 2432 bdput(bd);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7dba2ed03429..d99bcf59e4c2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
1030 .show = show_vfsmnt 1030 .show = show_vfsmnt
1031}; 1031};
1032 1032
1033static int uuid_is_nil(u8 *uuid)
1034{
1035 int i;
1036 u8 *cp = (u8 *)uuid;
1037
1038 for (i = 0; i < 16; i++) {
1039 if (*cp++)
1040 return 0;
1041 }
1042 return 1;
1043}
1044
1045static int show_mountinfo(struct seq_file *m, void *v) 1033static int show_mountinfo(struct seq_file *m, void *v)
1046{ 1034{
1047 struct proc_mounts *p = m->private; 1035 struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
1085 if (IS_MNT_UNBINDABLE(mnt)) 1073 if (IS_MNT_UNBINDABLE(mnt))
1086 seq_puts(m, " unbindable"); 1074 seq_puts(m, " unbindable");
1087 1075
1088 if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
1089 /* print the uuid */
1090 seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
1091
1092 /* Filesystem specific data */ 1076 /* Filesystem specific data */
1093 seq_puts(m, " - "); 1077 seq_puts(m, " - ");
1094 show_type(m, sb); 1078 show_type(m, sb);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af0c6279a4a7..e4cbc11a74ab 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
542 if (!nfs_need_commit(nfsi)) 542 if (!nfs_need_commit(nfsi))
543 return 0; 543 return 0;
544 544
545 spin_lock(&inode->i_lock);
545 ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 546 ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
546 if (ret > 0) 547 if (ret > 0)
547 nfsi->ncommit -= ret; 548 nfsi->ncommit -= ret;
549 spin_unlock(&inode->i_lock);
550
548 if (nfs_need_commit(NFS_I(inode))) 551 if (nfs_need_commit(NFS_I(inode)))
549 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 552 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
553
550 return ret; 554 return ret;
551} 555}
552#else 556#else
@@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how)
1483 res = nfs_commit_set_lock(NFS_I(inode), may_wait); 1487 res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1484 if (res <= 0) 1488 if (res <= 0)
1485 goto out_mark_dirty; 1489 goto out_mark_dirty;
1486 spin_lock(&inode->i_lock);
1487 res = nfs_scan_commit(inode, &head, 0, 0); 1490 res = nfs_scan_commit(inode, &head, 0, 0);
1488 spin_unlock(&inode->i_lock);
1489 if (res) { 1491 if (res) {
1490 int error; 1492 int error;
1491 1493
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 0c6d81670137..7c831a2731fa 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
38 exp_readlock(); 38 exp_readlock();
39 nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); 39 nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
40 fh_put(&fh); 40 fh_put(&fh);
41 rqstp->rq_client = NULL;
42 exp_readunlock(); 41 exp_readunlock();
43 /* We return nlm error codes as nlm doesn't know 42 /* We return nlm error codes as nlm doesn't know
44 * about nfsd, but nfsd does know about nlm.. 43 * about nfsd, but nfsd does know about nlm..
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b36ec3eb8ea..aa309aa93fe8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
397 397
398static void free_generic_stateid(struct nfs4_stateid *stp) 398static void free_generic_stateid(struct nfs4_stateid *stp)
399{ 399{
400 int oflag = nfs4_access_bmap_to_omode(stp); 400 int oflag;
401 401
402 nfs4_file_put_access(stp->st_file, oflag); 402 if (stp->st_access_bmap) {
403 put_nfs4_file(stp->st_file); 403 oflag = nfs4_access_bmap_to_omode(stp);
404 nfs4_file_put_access(stp->st_file, oflag);
405 put_nfs4_file(stp->st_file);
406 }
404 kmem_cache_free(stateid_slab, stp); 407 kmem_cache_free(stateid_slab, stp);
405} 408}
406 409
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5ea402023ebd..9ef9ed2cfe2e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
293 size_t nbytes, offset; 293 size_t nbytes, offset;
294 gfp_t gfp_mask = xb_to_gfp(flags); 294 gfp_t gfp_mask = xb_to_gfp(flags);
295 unsigned short page_count, i; 295 unsigned short page_count, i;
296 pgoff_t first;
297 xfs_off_t end; 296 xfs_off_t end;
298 int error; 297 int error;
299 298
@@ -333,7 +332,6 @@ use_alloc_page:
333 return error; 332 return error;
334 333
335 offset = bp->b_offset; 334 offset = bp->b_offset;
336 first = bp->b_file_offset >> PAGE_SHIFT;
337 bp->b_flags |= _XBF_PAGES; 335 bp->b_flags |= _XBF_PAGES;
338 336
339 for (i = 0; i < bp->b_page_count; i++) { 337 for (i = 0; i < bp->b_page_count; i++) {
@@ -657,8 +655,6 @@ xfs_buf_readahead(
657 xfs_off_t ioff, 655 xfs_off_t ioff,
658 size_t isize) 656 size_t isize)
659{ 657{
660 struct backing_dev_info *bdi;
661
662 if (bdi_read_congested(target->bt_bdi)) 658 if (bdi_read_congested(target->bt_bdi))
663 return; 659 return;
664 660
@@ -919,8 +915,6 @@ xfs_buf_lock(
919 915
920 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 916 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
921 xfs_log_force(bp->b_target->bt_mount, 0); 917 xfs_log_force(bp->b_target->bt_mount, 0);
922 if (atomic_read(&bp->b_io_remaining))
923 blk_flush_plug(current);
924 down(&bp->b_sema); 918 down(&bp->b_sema);
925 XB_SET_OWNER(bp); 919 XB_SET_OWNER(bp);
926 920
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
1309{ 1303{
1310 trace_xfs_buf_iowait(bp, _RET_IP_); 1304 trace_xfs_buf_iowait(bp, _RET_IP_);
1311 1305
1312 if (atomic_read(&bp->b_io_remaining))
1313 blk_flush_plug(current);
1314 wait_for_completion(&bp->b_iowait); 1306 wait_for_completion(&bp->b_iowait);
1315 1307
1316 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1308 trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
1747 do { 1739 do {
1748 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1740 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1749 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); 1741 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1750 int count = 0;
1751 struct list_head tmp; 1742 struct list_head tmp;
1743 struct blk_plug plug;
1752 1744
1753 if (unlikely(freezing(current))) { 1745 if (unlikely(freezing(current))) {
1754 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1746 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
1764 1756
1765 xfs_buf_delwri_split(target, &tmp, age); 1757 xfs_buf_delwri_split(target, &tmp, age);
1766 list_sort(NULL, &tmp, xfs_buf_cmp); 1758 list_sort(NULL, &tmp, xfs_buf_cmp);
1759
1760 blk_start_plug(&plug);
1767 while (!list_empty(&tmp)) { 1761 while (!list_empty(&tmp)) {
1768 struct xfs_buf *bp; 1762 struct xfs_buf *bp;
1769 bp = list_first_entry(&tmp, struct xfs_buf, b_list); 1763 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1770 list_del_init(&bp->b_list); 1764 list_del_init(&bp->b_list);
1771 xfs_bdstrat_cb(bp); 1765 xfs_bdstrat_cb(bp);
1772 count++;
1773 } 1766 }
1774 if (count) 1767 blk_finish_plug(&plug);
1775 blk_flush_plug(current);
1776
1777 } while (!kthread_should_stop()); 1768 } while (!kthread_should_stop());
1778 1769
1779 return 0; 1770 return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
1793 int pincount = 0; 1784 int pincount = 0;
1794 LIST_HEAD(tmp_list); 1785 LIST_HEAD(tmp_list);
1795 LIST_HEAD(wait_list); 1786 LIST_HEAD(wait_list);
1787 struct blk_plug plug;
1796 1788
1797 xfs_buf_runall_queues(xfsconvertd_workqueue); 1789 xfs_buf_runall_queues(xfsconvertd_workqueue);
1798 xfs_buf_runall_queues(xfsdatad_workqueue); 1790 xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
1807 * we do that after issuing all the IO. 1799 * we do that after issuing all the IO.
1808 */ 1800 */
1809 list_sort(NULL, &tmp_list, xfs_buf_cmp); 1801 list_sort(NULL, &tmp_list, xfs_buf_cmp);
1802
1803 blk_start_plug(&plug);
1810 while (!list_empty(&tmp_list)) { 1804 while (!list_empty(&tmp_list)) {
1811 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); 1805 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1812 ASSERT(target == bp->b_target); 1806 ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
1817 } 1811 }
1818 xfs_bdstrat_cb(bp); 1812 xfs_bdstrat_cb(bp);
1819 } 1813 }
1814 blk_finish_plug(&plug);
1820 1815
1821 if (wait) { 1816 if (wait) {
1822 /* Expedite and wait for IO to complete. */ 1817 /* Wait for IO to complete. */
1823 blk_flush_plug(current);
1824 while (!list_empty(&wait_list)) { 1818 while (!list_empty(&wait_list)) {
1825 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1819 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1826 1820
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 508e06fd7d1e..3ca795609113 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -28,53 +28,47 @@
28/* 28/*
29 * XFS logging functions 29 * XFS logging functions
30 */ 30 */
31static int 31static void
32__xfs_printk( 32__xfs_printk(
33 const char *level, 33 const char *level,
34 const struct xfs_mount *mp, 34 const struct xfs_mount *mp,
35 struct va_format *vaf) 35 struct va_format *vaf)
36{ 36{
37 if (mp && mp->m_fsname) 37 if (mp && mp->m_fsname)
38 return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); 38 printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
39 return printk("%sXFS: %pV\n", level, vaf); 39 printk("%sXFS: %pV\n", level, vaf);
40} 40}
41 41
42int xfs_printk( 42void xfs_printk(
43 const char *level, 43 const char *level,
44 const struct xfs_mount *mp, 44 const struct xfs_mount *mp,
45 const char *fmt, ...) 45 const char *fmt, ...)
46{ 46{
47 struct va_format vaf; 47 struct va_format vaf;
48 va_list args; 48 va_list args;
49 int r;
50 49
51 va_start(args, fmt); 50 va_start(args, fmt);
52 51
53 vaf.fmt = fmt; 52 vaf.fmt = fmt;
54 vaf.va = &args; 53 vaf.va = &args;
55 54
56 r = __xfs_printk(level, mp, &vaf); 55 __xfs_printk(level, mp, &vaf);
57 va_end(args); 56 va_end(args);
58
59 return r;
60} 57}
61 58
62#define define_xfs_printk_level(func, kern_level) \ 59#define define_xfs_printk_level(func, kern_level) \
63int func(const struct xfs_mount *mp, const char *fmt, ...) \ 60void func(const struct xfs_mount *mp, const char *fmt, ...) \
64{ \ 61{ \
65 struct va_format vaf; \ 62 struct va_format vaf; \
66 va_list args; \ 63 va_list args; \
67 int r; \
68 \ 64 \
69 va_start(args, fmt); \ 65 va_start(args, fmt); \
70 \ 66 \
71 vaf.fmt = fmt; \ 67 vaf.fmt = fmt; \
72 vaf.va = &args; \ 68 vaf.va = &args; \
73 \ 69 \
74 r = __xfs_printk(kern_level, mp, &vaf); \ 70 __xfs_printk(kern_level, mp, &vaf); \
75 va_end(args); \ 71 va_end(args); \
76 \
77 return r; \
78} \ 72} \
79 73
80define_xfs_printk_level(xfs_emerg, KERN_EMERG); 74define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
88define_xfs_printk_level(xfs_debug, KERN_DEBUG); 82define_xfs_printk_level(xfs_debug, KERN_DEBUG);
89#endif 83#endif
90 84
91int 85void
92xfs_alert_tag( 86xfs_alert_tag(
93 const struct xfs_mount *mp, 87 const struct xfs_mount *mp,
94 int panic_tag, 88 int panic_tag,
@@ -97,7 +91,6 @@ xfs_alert_tag(
97 struct va_format vaf; 91 struct va_format vaf;
98 va_list args; 92 va_list args;
99 int do_panic = 0; 93 int do_panic = 0;
100 int r;
101 94
102 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { 95 if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
103 xfs_printk(KERN_ALERT, mp, 96 xfs_printk(KERN_ALERT, mp,
@@ -110,12 +103,10 @@ xfs_alert_tag(
110 vaf.fmt = fmt; 103 vaf.fmt = fmt;
111 vaf.va = &args; 104 vaf.va = &args;
112 105
113 r = __xfs_printk(KERN_ALERT, mp, &vaf); 106 __xfs_printk(KERN_ALERT, mp, &vaf);
114 va_end(args); 107 va_end(args);
115 108
116 BUG_ON(do_panic); 109 BUG_ON(do_panic);
117
118 return r;
119} 110}
120 111
121void 112void
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h
index e77ffa16745b..f1b3fc1b6c4e 100644
--- a/fs/xfs/linux-2.6/xfs_message.h
+++ b/fs/xfs/linux-2.6/xfs_message.h
@@ -3,32 +3,34 @@
3 3
4struct xfs_mount; 4struct xfs_mount;
5 5
6extern int xfs_printk(const char *level, const struct xfs_mount *mp, 6extern void xfs_printk(const char *level, const struct xfs_mount *mp,
7 const char *fmt, ...) 7 const char *fmt, ...)
8 __attribute__ ((format (printf, 3, 4))); 8 __attribute__ ((format (printf, 3, 4)));
9extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) 9extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
10 __attribute__ ((format (printf, 2, 3))); 10 __attribute__ ((format (printf, 2, 3)));
11extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) 11extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
12 __attribute__ ((format (printf, 2, 3))); 12 __attribute__ ((format (printf, 2, 3)));
13extern int xfs_alert_tag(const struct xfs_mount *mp, int tag, 13extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
14 const char *fmt, ...) 14 const char *fmt, ...)
15 __attribute__ ((format (printf, 3, 4))); 15 __attribute__ ((format (printf, 3, 4)));
16extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) 16extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
17 __attribute__ ((format (printf, 2, 3))); 17 __attribute__ ((format (printf, 2, 3)));
18extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...) 18extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
19 __attribute__ ((format (printf, 2, 3))); 19 __attribute__ ((format (printf, 2, 3)));
20extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) 20extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
21 __attribute__ ((format (printf, 2, 3))); 21 __attribute__ ((format (printf, 2, 3)));
22extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) 22extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
23 __attribute__ ((format (printf, 2, 3))); 23 __attribute__ ((format (printf, 2, 3)));
24extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...) 24extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
25 __attribute__ ((format (printf, 2, 3))); 25 __attribute__ ((format (printf, 2, 3)));
26 26
27#ifdef DEBUG 27#ifdef DEBUG
28extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) 28extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
29 __attribute__ ((format (printf, 2, 3))); 29 __attribute__ ((format (printf, 2, 3)));
30#else 30#else
31#define xfs_debug(mp, fmt, ...) (0) 31static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
32{
33}
32#endif 34#endif
33 35
34extern void assfail(char *expr, char *f, int l); 36extern void assfail(char *expr, char *f, int l);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1ba5c451da36..b38e58d02299 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -816,75 +816,6 @@ xfs_setup_devices(
816 return 0; 816 return 0;
817} 817}
818 818
819/*
820 * XFS AIL push thread support
821 */
822void
823xfsaild_wakeup(
824 struct xfs_ail *ailp,
825 xfs_lsn_t threshold_lsn)
826{
827 /* only ever move the target forwards */
828 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
829 ailp->xa_target = threshold_lsn;
830 wake_up_process(ailp->xa_task);
831 }
832}
833
834STATIC int
835xfsaild(
836 void *data)
837{
838 struct xfs_ail *ailp = data;
839 xfs_lsn_t last_pushed_lsn = 0;
840 long tout = 0; /* milliseconds */
841
842 while (!kthread_should_stop()) {
843 /*
844 * for short sleeps indicating congestion, don't allow us to
845 * get woken early. Otherwise all we do is bang on the AIL lock
846 * without making progress.
847 */
848 if (tout && tout <= 20)
849 __set_current_state(TASK_KILLABLE);
850 else
851 __set_current_state(TASK_INTERRUPTIBLE);
852 schedule_timeout(tout ?
853 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
854
855 /* swsusp */
856 try_to_freeze();
857
858 ASSERT(ailp->xa_mount->m_log);
859 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
860 continue;
861
862 tout = xfsaild_push(ailp, &last_pushed_lsn);
863 }
864
865 return 0;
866} /* xfsaild */
867
868int
869xfsaild_start(
870 struct xfs_ail *ailp)
871{
872 ailp->xa_target = 0;
873 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
874 ailp->xa_mount->m_fsname);
875 if (IS_ERR(ailp->xa_task))
876 return -PTR_ERR(ailp->xa_task);
877 return 0;
878}
879
880void
881xfsaild_stop(
882 struct xfs_ail *ailp)
883{
884 kthread_stop(ailp->xa_task);
885}
886
887
888/* Catch misguided souls that try to use this interface on XFS */ 819/* Catch misguided souls that try to use this interface on XFS */
889STATIC struct inode * 820STATIC struct inode *
890xfs_fs_alloc_inode( 821xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
1191 return -error; 1122 return -error;
1192 1123
1193 if (laptop_mode) { 1124 if (laptop_mode) {
1194 int prev_sync_seq = mp->m_sync_seq;
1195
1196 /* 1125 /*
1197 * The disk must be active because we're syncing. 1126 * The disk must be active because we're syncing.
1198 * We schedule xfssyncd now (now that the disk is 1127 * We schedule xfssyncd now (now that the disk is
1199 * active) instead of later (when it might not be). 1128 * active) instead of later (when it might not be).
1200 */ 1129 */
1201 wake_up_process(mp->m_sync_task); 1130 flush_delayed_work_sync(&mp->m_sync_work);
1202 /*
1203 * We have to wait for the sync iteration to complete.
1204 * If we don't, the disk activity caused by the sync
1205 * will come after the sync is completed, and that
1206 * triggers another sync from laptop mode.
1207 */
1208 wait_event(mp->m_wait_single_sync_task,
1209 mp->m_sync_seq != prev_sync_seq);
1210 } 1131 }
1211 1132
1212 return 0; 1133 return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
1490 spin_lock_init(&mp->m_sb_lock); 1411 spin_lock_init(&mp->m_sb_lock);
1491 mutex_init(&mp->m_growlock); 1412 mutex_init(&mp->m_growlock);
1492 atomic_set(&mp->m_active_trans, 0); 1413 atomic_set(&mp->m_active_trans, 0);
1493 INIT_LIST_HEAD(&mp->m_sync_list);
1494 spin_lock_init(&mp->m_sync_lock);
1495 init_waitqueue_head(&mp->m_wait_single_sync_task);
1496 1414
1497 mp->m_super = sb; 1415 mp->m_super = sb;
1498 sb->s_fs_info = mp; 1416 sb->s_fs_info = mp;
@@ -1799,6 +1717,38 @@ xfs_destroy_zones(void)
1799} 1717}
1800 1718
1801STATIC int __init 1719STATIC int __init
1720xfs_init_workqueues(void)
1721{
1722 /*
1723 * max_active is set to 8 to give enough concurency to allow
1724 * multiple work operations on each CPU to run. This allows multiple
1725 * filesystems to be running sync work concurrently, and scales with
1726 * the number of CPUs in the system.
1727 */
1728 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1729 if (!xfs_syncd_wq)
1730 goto out;
1731
1732 xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1733 if (!xfs_ail_wq)
1734 goto out_destroy_syncd;
1735
1736 return 0;
1737
1738out_destroy_syncd:
1739 destroy_workqueue(xfs_syncd_wq);
1740out:
1741 return -ENOMEM;
1742}
1743
1744STATIC void
1745xfs_destroy_workqueues(void)
1746{
1747 destroy_workqueue(xfs_ail_wq);
1748 destroy_workqueue(xfs_syncd_wq);
1749}
1750
1751STATIC int __init
1802init_xfs_fs(void) 1752init_xfs_fs(void)
1803{ 1753{
1804 int error; 1754 int error;
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
1813 if (error) 1763 if (error)
1814 goto out; 1764 goto out;
1815 1765
1816 error = xfs_mru_cache_init(); 1766 error = xfs_init_workqueues();
1817 if (error) 1767 if (error)
1818 goto out_destroy_zones; 1768 goto out_destroy_zones;
1819 1769
1770 error = xfs_mru_cache_init();
1771 if (error)
1772 goto out_destroy_wq;
1773
1820 error = xfs_filestream_init(); 1774 error = xfs_filestream_init();
1821 if (error) 1775 if (error)
1822 goto out_mru_cache_uninit; 1776 goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
1833 if (error) 1787 if (error)
1834 goto out_cleanup_procfs; 1788 goto out_cleanup_procfs;
1835 1789
1790 error = xfs_init_workqueues();
1791 if (error)
1792 goto out_sysctl_unregister;
1793
1836 vfs_initquota(); 1794 vfs_initquota();
1837 1795
1838 error = register_filesystem(&xfs_fs_type); 1796 error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
1850 xfs_filestream_uninit(); 1808 xfs_filestream_uninit();
1851 out_mru_cache_uninit: 1809 out_mru_cache_uninit:
1852 xfs_mru_cache_uninit(); 1810 xfs_mru_cache_uninit();
1811 out_destroy_wq:
1812 xfs_destroy_workqueues();
1853 out_destroy_zones: 1813 out_destroy_zones:
1854 xfs_destroy_zones(); 1814 xfs_destroy_zones();
1855 out: 1815 out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
1866 xfs_buf_terminate(); 1826 xfs_buf_terminate();
1867 xfs_filestream_uninit(); 1827 xfs_filestream_uninit();
1868 xfs_mru_cache_uninit(); 1828 xfs_mru_cache_uninit();
1829 xfs_destroy_workqueues();
1869 xfs_destroy_zones(); 1830 xfs_destroy_zones();
1870} 1831}
1871 1832
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 9cf35a688f53..e4f9c1b0836c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -22,6 +22,7 @@
22#include "xfs_log.h" 22#include "xfs_log.h"
23#include "xfs_inum.h" 23#include "xfs_inum.h"
24#include "xfs_trans.h" 24#include "xfs_trans.h"
25#include "xfs_trans_priv.h"
25#include "xfs_sb.h" 26#include "xfs_sb.h"
26#include "xfs_ag.h" 27#include "xfs_ag.h"
27#include "xfs_mount.h" 28#include "xfs_mount.h"
@@ -39,6 +40,8 @@
39#include <linux/kthread.h> 40#include <linux/kthread.h>
40#include <linux/freezer.h> 41#include <linux/freezer.h>
41 42
43struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
44
42/* 45/*
43 * The inode lookup is done in batches to keep the amount of lock traffic and 46 * The inode lookup is done in batches to keep the amount of lock traffic and
44 * radix tree lookups to a minimum. The batch size is a trade off between 47 * radix tree lookups to a minimum. The batch size is a trade off between
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
431 xfs_unmountfs_writesb(mp); 434 xfs_unmountfs_writesb(mp);
432} 435}
433 436
434/* 437static void
435 * Enqueue a work item to be picked up by the vfs xfssyncd thread. 438xfs_syncd_queue_sync(
436 * Doing this has two advantages: 439 struct xfs_mount *mp)
437 * - It saves on stack space, which is tight in certain situations
438 * - It can be used (with care) as a mechanism to avoid deadlocks.
439 * Flushing while allocating in a full filesystem requires both.
440 */
441STATIC void
442xfs_syncd_queue_work(
443 struct xfs_mount *mp,
444 void *data,
445 void (*syncer)(struct xfs_mount *, void *),
446 struct completion *completion)
447{
448 struct xfs_sync_work *work;
449
450 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
451 INIT_LIST_HEAD(&work->w_list);
452 work->w_syncer = syncer;
453 work->w_data = data;
454 work->w_mount = mp;
455 work->w_completion = completion;
456 spin_lock(&mp->m_sync_lock);
457 list_add_tail(&work->w_list, &mp->m_sync_list);
458 spin_unlock(&mp->m_sync_lock);
459 wake_up_process(mp->m_sync_task);
460}
461
462/*
463 * Flush delayed allocate data, attempting to free up reserved space
464 * from existing allocations. At this point a new allocation attempt
465 * has failed with ENOSPC and we are in the process of scratching our
466 * heads, looking about for more room...
467 */
468STATIC void
469xfs_flush_inodes_work(
470 struct xfs_mount *mp,
471 void *arg)
472{
473 struct inode *inode = arg;
474 xfs_sync_data(mp, SYNC_TRYLOCK);
475 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
476 iput(inode);
477}
478
479void
480xfs_flush_inodes(
481 xfs_inode_t *ip)
482{ 440{
483 struct inode *inode = VFS_I(ip); 441 queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
484 DECLARE_COMPLETION_ONSTACK(completion); 442 msecs_to_jiffies(xfs_syncd_centisecs * 10));
485
486 igrab(inode);
487 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
488 wait_for_completion(&completion);
489 xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
490} 443}
491 444
492/* 445/*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
496 */ 449 */
497STATIC void 450STATIC void
498xfs_sync_worker( 451xfs_sync_worker(
499 struct xfs_mount *mp, 452 struct work_struct *work)
500 void *unused)
501{ 453{
454 struct xfs_mount *mp = container_of(to_delayed_work(work),
455 struct xfs_mount, m_sync_work);
502 int error; 456 int error;
503 457
504 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 458 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
508 error = xfs_fs_log_dummy(mp); 462 error = xfs_fs_log_dummy(mp);
509 else 463 else
510 xfs_log_force(mp, 0); 464 xfs_log_force(mp, 0);
511 xfs_reclaim_inodes(mp, 0);
512 error = xfs_qm_sync(mp, SYNC_TRYLOCK); 465 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
466
467 /* start pushing all the metadata that is currently dirty */
468 xfs_ail_push_all(mp->m_ail);
513 } 469 }
514 mp->m_sync_seq++; 470
515 wake_up(&mp->m_wait_single_sync_task); 471 /* queue us up again */
472 xfs_syncd_queue_sync(mp);
516} 473}
517 474
518STATIC int 475/*
519xfssyncd( 476 * Queue a new inode reclaim pass if there are reclaimable inodes and there
520 void *arg) 477 * isn't a reclaim pass already in progress. By default it runs every 5s based
478 * on the xfs syncd work default of 30s. Perhaps this should have it's own
479 * tunable, but that can be done if this method proves to be ineffective or too
480 * aggressive.
481 */
482static void
483xfs_syncd_queue_reclaim(
484 struct xfs_mount *mp)
521{ 485{
522 struct xfs_mount *mp = arg;
523 long timeleft;
524 xfs_sync_work_t *work, *n;
525 LIST_HEAD (tmp);
526
527 set_freezable();
528 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
529 for (;;) {
530 if (list_empty(&mp->m_sync_list))
531 timeleft = schedule_timeout_interruptible(timeleft);
532 /* swsusp */
533 try_to_freeze();
534 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
535 break;
536 486
537 spin_lock(&mp->m_sync_lock); 487 /*
538 /* 488 * We can have inodes enter reclaim after we've shut down the syncd
539 * We can get woken by laptop mode, to do a sync - 489 * workqueue during unmount, so don't allow reclaim work to be queued
540 * that's the (only!) case where the list would be 490 * during unmount.
541 * empty with time remaining. 491 */
542 */ 492 if (!(mp->m_super->s_flags & MS_ACTIVE))
543 if (!timeleft || list_empty(&mp->m_sync_list)) { 493 return;
544 if (!timeleft)
545 timeleft = xfs_syncd_centisecs *
546 msecs_to_jiffies(10);
547 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
548 list_add_tail(&mp->m_sync_work.w_list,
549 &mp->m_sync_list);
550 }
551 list_splice_init(&mp->m_sync_list, &tmp);
552 spin_unlock(&mp->m_sync_lock);
553 494
554 list_for_each_entry_safe(work, n, &tmp, w_list) { 495 rcu_read_lock();
555 (*work->w_syncer)(mp, work->w_data); 496 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
556 list_del(&work->w_list); 497 queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
557 if (work == &mp->m_sync_work) 498 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
558 continue;
559 if (work->w_completion)
560 complete(work->w_completion);
561 kmem_free(work);
562 }
563 } 499 }
500 rcu_read_unlock();
501}
564 502
565 return 0; 503/*
504 * This is a fast pass over the inode cache to try to get reclaim moving on as
505 * many inodes as possible in a short period of time. It kicks itself every few
506 * seconds, as well as being kicked by the inode cache shrinker when memory
507 * goes low. It scans as quickly as possible avoiding locked inodes or those
508 * already being flushed, and once done schedules a future pass.
509 */
510STATIC void
511xfs_reclaim_worker(
512 struct work_struct *work)
513{
514 struct xfs_mount *mp = container_of(to_delayed_work(work),
515 struct xfs_mount, m_reclaim_work);
516
517 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
518 xfs_syncd_queue_reclaim(mp);
519}
520
521/*
522 * Flush delayed allocate data, attempting to free up reserved space
523 * from existing allocations. At this point a new allocation attempt
524 * has failed with ENOSPC and we are in the process of scratching our
525 * heads, looking about for more room.
526 *
527 * Queue a new data flush if there isn't one already in progress and
528 * wait for completion of the flush. This means that we only ever have one
529 * inode flush in progress no matter how many ENOSPC events are occurring and
530 * so will prevent the system from bogging down due to every concurrent
531 * ENOSPC event scanning all the active inodes in the system for writeback.
532 */
533void
534xfs_flush_inodes(
535 struct xfs_inode *ip)
536{
537 struct xfs_mount *mp = ip->i_mount;
538
539 queue_work(xfs_syncd_wq, &mp->m_flush_work);
540 flush_work_sync(&mp->m_flush_work);
541}
542
543STATIC void
544xfs_flush_worker(
545 struct work_struct *work)
546{
547 struct xfs_mount *mp = container_of(work,
548 struct xfs_mount, m_flush_work);
549
550 xfs_sync_data(mp, SYNC_TRYLOCK);
551 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
566} 552}
567 553
568int 554int
569xfs_syncd_init( 555xfs_syncd_init(
570 struct xfs_mount *mp) 556 struct xfs_mount *mp)
571{ 557{
572 mp->m_sync_work.w_syncer = xfs_sync_worker; 558 INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
573 mp->m_sync_work.w_mount = mp; 559 INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
574 mp->m_sync_work.w_completion = NULL; 560 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
575 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname); 561
576 if (IS_ERR(mp->m_sync_task)) 562 xfs_syncd_queue_sync(mp);
577 return -PTR_ERR(mp->m_sync_task); 563 xfs_syncd_queue_reclaim(mp);
564
578 return 0; 565 return 0;
579} 566}
580 567
@@ -582,7 +569,9 @@ void
582xfs_syncd_stop( 569xfs_syncd_stop(
583 struct xfs_mount *mp) 570 struct xfs_mount *mp)
584{ 571{
585 kthread_stop(mp->m_sync_task); 572 cancel_delayed_work_sync(&mp->m_sync_work);
573 cancel_delayed_work_sync(&mp->m_reclaim_work);
574 cancel_work_sync(&mp->m_flush_work);
586} 575}
587 576
588void 577void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
601 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), 590 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
602 XFS_ICI_RECLAIM_TAG); 591 XFS_ICI_RECLAIM_TAG);
603 spin_unlock(&ip->i_mount->m_perag_lock); 592 spin_unlock(&ip->i_mount->m_perag_lock);
593
594 /* schedule periodic background inode reclaim */
595 xfs_syncd_queue_reclaim(ip->i_mount);
596
604 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, 597 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
605 -1, _RET_IP_); 598 -1, _RET_IP_);
606 } 599 }
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
1017} 1010}
1018 1011
1019/* 1012/*
1020 * Shrinker infrastructure. 1013 * Inode cache shrinker.
1014 *
1015 * When called we make sure that there is a background (fast) inode reclaim in
1016 * progress, while we will throttle the speed of reclaim via doiing synchronous
1017 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1018 * them to be cleaned, which we hope will not be very long due to the
1019 * background walker having already kicked the IO off on those dirty inodes.
1021 */ 1020 */
1022static int 1021static int
1023xfs_reclaim_inode_shrink( 1022xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
1032 1031
1033 mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1032 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1034 if (nr_to_scan) { 1033 if (nr_to_scan) {
1034 /* kick background reclaimer and push the AIL */
1035 xfs_syncd_queue_reclaim(mp);
1036 xfs_ail_push_all(mp->m_ail);
1037
1035 if (!(gfp_mask & __GFP_FS)) 1038 if (!(gfp_mask & __GFP_FS))
1036 return -1; 1039 return -1;
1037 1040
1038 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); 1041 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
1042 &nr_to_scan);
1039 /* terminate if we don't exhaust the scan */ 1043 /* terminate if we don't exhaust the scan */
1040 if (nr_to_scan > 0) 1044 if (nr_to_scan > 0)
1041 return -1; 1045 return -1;
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 32ba6628290c..e3a6ad27415f 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
32#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ 32#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
33#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ 33#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
34 34
35extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
36
35int xfs_syncd_init(struct xfs_mount *mp); 37int xfs_syncd_init(struct xfs_mount *mp);
36void xfs_syncd_stop(struct xfs_mount *mp); 38void xfs_syncd_stop(struct xfs_mount *mp);
37 39
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 254ee062bd7d..69228aa8605a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
461 struct xfs_quotainfo *q = mp->m_quotainfo; 461 struct xfs_quotainfo *q = mp->m_quotainfo;
462 int recl; 462 int recl;
463 struct xfs_dquot *dqp; 463 struct xfs_dquot *dqp;
464 int niters;
465 int error; 464 int error;
466 465
467 if (!q) 466 if (!q)
468 return 0; 467 return 0;
469 niters = 0;
470again: 468again:
471 mutex_lock(&q->qi_dqlist_lock); 469 mutex_lock(&q->qi_dqlist_lock);
472 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { 470 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
1314{ 1312{
1315 xfs_buf_t *bp; 1313 xfs_buf_t *bp;
1316 int error; 1314 int error;
1317 int notcommitted;
1318 int incr;
1319 int type; 1315 int type;
1320 1316
1321 ASSERT(blkcnt > 0); 1317 ASSERT(blkcnt > 0);
1322 notcommitted = 0;
1323 incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
1324 XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
1325 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 1318 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1326 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 1319 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1327 error = 0; 1320 error = 0;
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index c9446f1c726d..567b29b9f1b3 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone;
65 * block in the dquot/xqm code. 65 * block in the dquot/xqm code.
66 */ 66 */
67#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 67#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
68/*
69 * When doing a quotacheck, we log dquot clusters of this many FSBs at most
70 * in a single transaction. We don't want to ask for too huge a log reservation.
71 */
72#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
73 68
74typedef xfs_dqhash_t xfs_dqlist_t; 69typedef xfs_dqhash_t xfs_dqlist_t;
75 70
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 0d62a07b7fd8..2dadb15d5ca9 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
313{ 313{
314 int error; 314 int error;
315 uint qf; 315 uint qf;
316 uint accflags;
317 __int64_t sbflags; 316 __int64_t sbflags;
318 317
319 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); 318 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
320 /* 319 /*
321 * Switching on quota accounting must be done at mount time. 320 * Switching on quota accounting must be done at mount time.
322 */ 321 */
323 accflags = flags & XFS_ALL_QUOTA_ACCT;
324 flags &= ~(XFS_ALL_QUOTA_ACCT); 322 flags &= ~(XFS_ALL_QUOTA_ACCT);
325 323
326 sbflags = 0; 324 sbflags = 0;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4bc3c649aee4..27d64d752eab 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2395,17 +2395,33 @@ xfs_free_extent(
2395 memset(&args, 0, sizeof(xfs_alloc_arg_t)); 2395 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2396 args.tp = tp; 2396 args.tp = tp;
2397 args.mp = tp->t_mountp; 2397 args.mp = tp->t_mountp;
2398
2399 /*
2400 * validate that the block number is legal - the enables us to detect
2401 * and handle a silent filesystem corruption rather than crashing.
2402 */
2398 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2403 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2399 ASSERT(args.agno < args.mp->m_sb.sb_agcount); 2404 if (args.agno >= args.mp->m_sb.sb_agcount)
2405 return EFSCORRUPTED;
2406
2400 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2407 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2408 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2409 return EFSCORRUPTED;
2410
2401 args.pag = xfs_perag_get(args.mp, args.agno); 2411 args.pag = xfs_perag_get(args.mp, args.agno);
2402 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) 2412 ASSERT(args.pag);
2413
2414 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2415 if (error)
2403 goto error0; 2416 goto error0;
2404#ifdef DEBUG 2417
2405 ASSERT(args.agbp != NULL); 2418 /* validate the extent size is legal now we have the agf locked */
2406 ASSERT((args.agbno + len) <= 2419 if (args.agbno + len >
2407 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); 2420 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2408#endif 2421 error = EFSCORRUPTED;
2422 goto error0;
2423 }
2424
2409 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); 2425 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2410error0: 2426error0:
2411 xfs_perag_put(args.pag); 2427 xfs_perag_put(args.pag);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 46cc40131d4a..576fdfe81d60 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -198,6 +198,41 @@ xfs_inode_item_size(
198} 198}
199 199
200/* 200/*
201 * xfs_inode_item_format_extents - convert in-core extents to on-disk form
202 *
203 * For either the data or attr fork in extent format, we need to endian convert
204 * the in-core extent as we place them into the on-disk inode. In this case, we
205 * need to do this conversion before we write the extents into the log. Because
206 * we don't have the disk inode to write into here, we allocate a buffer and
207 * format the extents into it via xfs_iextents_copy(). We free the buffer in
208 * the unlock routine after the copy for the log has been made.
209 *
210 * In the case of the data fork, the in-core and on-disk fork sizes can be
211 * different due to delayed allocation extents. We only log on-disk extents
212 * here, so always use the physical fork size to determine the size of the
213 * buffer we need to allocate.
214 */
215STATIC void
216xfs_inode_item_format_extents(
217 struct xfs_inode *ip,
218 struct xfs_log_iovec *vecp,
219 int whichfork,
220 int type)
221{
222 xfs_bmbt_rec_t *ext_buffer;
223
224 ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
225 if (whichfork == XFS_DATA_FORK)
226 ip->i_itemp->ili_extents_buf = ext_buffer;
227 else
228 ip->i_itemp->ili_aextents_buf = ext_buffer;
229
230 vecp->i_addr = ext_buffer;
231 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
232 vecp->i_type = type;
233}
234
235/*
201 * This is called to fill in the vector of log iovecs for the 236 * This is called to fill in the vector of log iovecs for the
202 * given inode log item. It fills the first item with an inode 237 * given inode log item. It fills the first item with an inode
203 * log format structure, the second with the on-disk inode structure, 238 * log format structure, the second with the on-disk inode structure,
@@ -213,7 +248,6 @@ xfs_inode_item_format(
213 struct xfs_inode *ip = iip->ili_inode; 248 struct xfs_inode *ip = iip->ili_inode;
214 uint nvecs; 249 uint nvecs;
215 size_t data_bytes; 250 size_t data_bytes;
216 xfs_bmbt_rec_t *ext_buffer;
217 xfs_mount_t *mp; 251 xfs_mount_t *mp;
218 252
219 vecp->i_addr = &iip->ili_format; 253 vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
320 } else 354 } else
321#endif 355#endif
322 { 356 {
323 /* 357 xfs_inode_item_format_extents(ip, vecp,
324 * There are delayed allocation extents 358 XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
325 * in the inode, or we need to convert
326 * the extents to on disk format.
327 * Use xfs_iextents_copy()
328 * to copy only the real extents into
329 * a separate buffer. We'll free the
330 * buffer in the unlock routine.
331 */
332 ext_buffer = kmem_alloc(ip->i_df.if_bytes,
333 KM_SLEEP);
334 iip->ili_extents_buf = ext_buffer;
335 vecp->i_addr = ext_buffer;
336 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
337 XFS_DATA_FORK);
338 vecp->i_type = XLOG_REG_TYPE_IEXT;
339 } 359 }
340 ASSERT(vecp->i_len <= ip->i_df.if_bytes); 360 ASSERT(vecp->i_len <= ip->i_df.if_bytes);
341 iip->ili_format.ilf_dsize = vecp->i_len; 361 iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
445 */ 465 */
446 vecp->i_addr = ip->i_afp->if_u1.if_extents; 466 vecp->i_addr = ip->i_afp->if_u1.if_extents;
447 vecp->i_len = ip->i_afp->if_bytes; 467 vecp->i_len = ip->i_afp->if_bytes;
468 vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
448#else 469#else
449 ASSERT(iip->ili_aextents_buf == NULL); 470 ASSERT(iip->ili_aextents_buf == NULL);
450 /* 471 xfs_inode_item_format_extents(ip, vecp,
451 * Need to endian flip before logging 472 XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
452 */
453 ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
454 KM_SLEEP);
455 iip->ili_aextents_buf = ext_buffer;
456 vecp->i_addr = ext_buffer;
457 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
458 XFS_ATTR_FORK);
459#endif 473#endif
460 vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
461 iip->ili_format.ilf_asize = vecp->i_len; 474 iip->ili_format.ilf_asize = vecp->i_len;
462 vecp++; 475 vecp++;
463 nvecs++; 476 nvecs++;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dc1882adaf54..751e94fe1f77 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -204,7 +204,6 @@ xfs_bulkstat(
204 xfs_agi_t *agi; /* agi header data */ 204 xfs_agi_t *agi; /* agi header data */
205 xfs_agino_t agino; /* inode # in allocation group */ 205 xfs_agino_t agino; /* inode # in allocation group */
206 xfs_agnumber_t agno; /* allocation group number */ 206 xfs_agnumber_t agno; /* allocation group number */
207 xfs_daddr_t bno; /* inode cluster start daddr */
208 int chunkidx; /* current index into inode chunk */ 207 int chunkidx; /* current index into inode chunk */
209 int clustidx; /* current index into inode cluster */ 208 int clustidx; /* current index into inode cluster */
210 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 209 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
463 mp->m_sb.sb_inopblog); 462 mp->m_sb.sb_inopblog);
464 } 463 }
465 ino = XFS_AGINO_TO_INO(mp, agno, agino); 464 ino = XFS_AGINO_TO_INO(mp, agno, agino);
466 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
467 /* 465 /*
468 * Skip if this inode is free. 466 * Skip if this inode is free.
469 */ 467 */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 25efa9b8a602..b612ce4520ae 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
761 break; 761 break;
762 case XLOG_STATE_COVER_NEED: 762 case XLOG_STATE_COVER_NEED:
763 case XLOG_STATE_COVER_NEED2: 763 case XLOG_STATE_COVER_NEED2:
764 if (!xfs_trans_ail_tail(log->l_ailp) && 764 if (!xfs_ail_min_lsn(log->l_ailp) &&
765 xlog_iclogs_empty(log)) { 765 xlog_iclogs_empty(log)) {
766 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 766 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
767 log->l_covered_state = XLOG_STATE_COVER_DONE; 767 log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
801 xfs_lsn_t tail_lsn; 801 xfs_lsn_t tail_lsn;
802 struct log *log = mp->m_log; 802 struct log *log = mp->m_log;
803 803
804 tail_lsn = xfs_trans_ail_tail(mp->m_ail); 804 tail_lsn = xfs_ail_min_lsn(mp->m_ail);
805 if (!tail_lsn) 805 if (!tail_lsn)
806 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 806 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
807 807
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
1239 * the filesystem is shutting down. 1239 * the filesystem is shutting down.
1240 */ 1240 */
1241 if (!XLOG_FORCED_SHUTDOWN(log)) 1241 if (!XLOG_FORCED_SHUTDOWN(log))
1242 xfs_trans_ail_push(log->l_ailp, threshold_lsn); 1242 xfs_ail_push(log->l_ailp, threshold_lsn);
1243} 1243}
1244 1244
1245/* 1245/*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
3407 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3407 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
3408} 3408}
3409 3409
3410/*
3411 * Check to make sure the grant write head didn't just over lap the tail. If
3412 * the cycles are the same, we can't be overlapping. Otherwise, make sure that
3413 * the cycles differ by exactly one and check the byte count.
3414 *
3415 * This check is run unlocked, so can give false positives. Rather than assert
3416 * on failures, use a warn-once flag and a panic tag to allow the admin to
3417 * determine if they want to panic the machine when such an error occurs. For
3418 * debug kernels this will have the same effect as using an assert but, unlinke
3419 * an assert, it can be turned off at runtime.
3420 */
3410STATIC void 3421STATIC void
3411xlog_verify_grant_tail( 3422xlog_verify_grant_tail(
3412 struct log *log) 3423 struct log *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
3414 int tail_cycle, tail_blocks; 3425 int tail_cycle, tail_blocks;
3415 int cycle, space; 3426 int cycle, space;
3416 3427
3417 /*
3418 * Check to make sure the grant write head didn't just over lap the
3419 * tail. If the cycles are the same, we can't be overlapping.
3420 * Otherwise, make sure that the cycles differ by exactly one and
3421 * check the byte count.
3422 */
3423 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); 3428 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3424 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3429 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3425 if (tail_cycle != cycle) { 3430 if (tail_cycle != cycle) {
3426 ASSERT(cycle - 1 == tail_cycle); 3431 if (cycle - 1 != tail_cycle &&
3427 ASSERT(space <= BBTOB(tail_blocks)); 3432 !(log->l_flags & XLOG_TAIL_WARN)) {
3433 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3434 "%s: cycle - 1 != tail_cycle", __func__);
3435 log->l_flags |= XLOG_TAIL_WARN;
3436 }
3437
3438 if (space > BBTOB(tail_blocks) &&
3439 !(log->l_flags & XLOG_TAIL_WARN)) {
3440 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3441 "%s: space > BBTOB(tail_blocks)", __func__);
3442 log->l_flags |= XLOG_TAIL_WARN;
3443 }
3428 } 3444 }
3429} 3445}
3430 3446
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ffae692c9832..5864850e9e34 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
144#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ 144#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
145#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being 145#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
146 shutdown */ 146 shutdown */
147#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
147 148
148#ifdef __KERNEL__ 149#ifdef __KERNEL__
149/* 150/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a62e8971539d..19af0ab0d0c6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
203 struct mutex m_icsb_mutex; /* balancer sync lock */ 203 struct mutex m_icsb_mutex; /* balancer sync lock */
204#endif 204#endif
205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 205 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
206 struct task_struct *m_sync_task; /* generalised sync thread */ 206 struct delayed_work m_sync_work; /* background sync work */
207 xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ 207 struct delayed_work m_reclaim_work; /* background inode reclaim */
208 struct list_head m_sync_list; /* sync thread work item list */ 208 struct work_struct m_flush_work; /* background inode flush */
209 spinlock_t m_sync_lock; /* work item list lock */
210 int m_sync_seq; /* sync thread generation no. */
211 wait_queue_head_t m_wait_single_sync_task;
212 __int64_t m_update_flags; /* sb flags we need to update 209 __int64_t m_update_flags; /* sb flags we need to update
213 on the next remount,rw */ 210 on the next remount,rw */
214 struct shrinker m_inode_shrink; /* inode reclaim shrinker */ 211 struct shrinker m_inode_shrink; /* inode reclaim shrinker */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e29..acdb92f14d51 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,74 +28,138 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 31struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
35 32
36#ifdef DEBUG 33#ifdef DEBUG
37STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); 34/*
38#else 35 * Check that the list is sorted as it should be.
36 */
37STATIC void
38xfs_ail_check(
39 struct xfs_ail *ailp,
40 xfs_log_item_t *lip)
41{
42 xfs_log_item_t *prev_lip;
43
44 if (list_empty(&ailp->xa_ail))
45 return;
46
47 /*
48 * Check the next and previous entries are valid.
49 */
50 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
51 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
52 if (&prev_lip->li_ail != &ailp->xa_ail)
53 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
54
55 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
56 if (&prev_lip->li_ail != &ailp->xa_ail)
57 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
58
59
60#ifdef XFS_TRANS_DEBUG
61 /*
62 * Walk the list checking lsn ordering, and that every entry has the
63 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
64 * when specifically debugging the transaction subsystem.
65 */
66 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
67 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
68 if (&prev_lip->li_ail != &ailp->xa_ail)
69 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
70 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
71 prev_lip = lip;
72 }
73#endif /* XFS_TRANS_DEBUG */
74}
75#else /* !DEBUG */
39#define xfs_ail_check(a,l) 76#define xfs_ail_check(a,l)
40#endif /* DEBUG */ 77#endif /* DEBUG */
41 78
79/*
80 * Return a pointer to the first item in the AIL. If the AIL is empty, then
81 * return NULL.
82 */
83static xfs_log_item_t *
84xfs_ail_min(
85 struct xfs_ail *ailp)
86{
87 if (list_empty(&ailp->xa_ail))
88 return NULL;
89
90 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
91}
92
93 /*
94 * Return a pointer to the last item in the AIL. If the AIL is empty, then
95 * return NULL.
96 */
97static xfs_log_item_t *
98xfs_ail_max(
99 struct xfs_ail *ailp)
100{
101 if (list_empty(&ailp->xa_ail))
102 return NULL;
103
104 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
105}
106
107/*
108 * Return a pointer to the item which follows the given item in the AIL. If
109 * the given item is the last item in the list, then return NULL.
110 */
111static xfs_log_item_t *
112xfs_ail_next(
113 struct xfs_ail *ailp,
114 xfs_log_item_t *lip)
115{
116 if (lip->li_ail.next == &ailp->xa_ail)
117 return NULL;
118
119 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
120}
42 121
43/* 122/*
44 * This is called by the log manager code to determine the LSN 123 * This is called by the log manager code to determine the LSN of the tail of
45 * of the tail of the log. This is exactly the LSN of the first 124 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
46 * item in the AIL. If the AIL is empty, then this function 125 * is empty, then this function returns 0.
47 * returns 0.
48 * 126 *
49 * We need the AIL lock in order to get a coherent read of the 127 * We need the AIL lock in order to get a coherent read of the lsn of the last
50 * lsn of the last item in the AIL. 128 * item in the AIL.
51 */ 129 */
52xfs_lsn_t 130xfs_lsn_t
53xfs_trans_ail_tail( 131xfs_ail_min_lsn(
54 struct xfs_ail *ailp) 132 struct xfs_ail *ailp)
55{ 133{
56 xfs_lsn_t lsn; 134 xfs_lsn_t lsn = 0;
57 xfs_log_item_t *lip; 135 xfs_log_item_t *lip;
58 136
59 spin_lock(&ailp->xa_lock); 137 spin_lock(&ailp->xa_lock);
60 lip = xfs_ail_min(ailp); 138 lip = xfs_ail_min(ailp);
61 if (lip == NULL) { 139 if (lip)
62 lsn = (xfs_lsn_t)0;
63 } else {
64 lsn = lip->li_lsn; 140 lsn = lip->li_lsn;
65 }
66 spin_unlock(&ailp->xa_lock); 141 spin_unlock(&ailp->xa_lock);
67 142
68 return lsn; 143 return lsn;
69} 144}
70 145
71/* 146/*
72 * xfs_trans_push_ail 147 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
73 *
74 * This routine is called to move the tail of the AIL forward. It does this by
75 * trying to flush items in the AIL whose lsns are below the given
76 * threshold_lsn.
77 *
78 * the push is run asynchronously in a separate thread, so we return the tail
79 * of the log right now instead of the tail after the push. This means we will
80 * either continue right away, or we will sleep waiting on the async thread to
81 * do its work.
82 *
83 * We do this unlocked - we only need to know whether there is anything in the
84 * AIL at the time we are called. We don't need to access the contents of
85 * any of the objects, so the lock is not needed.
86 */ 148 */
87void 149static xfs_lsn_t
88xfs_trans_ail_push( 150xfs_ail_max_lsn(
89 struct xfs_ail *ailp, 151 struct xfs_ail *ailp)
90 xfs_lsn_t threshold_lsn)
91{ 152{
92 xfs_log_item_t *lip; 153 xfs_lsn_t lsn = 0;
154 xfs_log_item_t *lip;
93 155
94 lip = xfs_ail_min(ailp); 156 spin_lock(&ailp->xa_lock);
95 if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 157 lip = xfs_ail_max(ailp);
96 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) 158 if (lip)
97 xfsaild_wakeup(ailp, threshold_lsn); 159 lsn = lip->li_lsn;
98 } 160 spin_unlock(&ailp->xa_lock);
161
162 return lsn;
99} 163}
100 164
101/* 165/*
@@ -236,16 +300,57 @@ out:
236} 300}
237 301
238/* 302/*
239 * xfsaild_push does the work of pushing on the AIL. Returning a timeout of 303 * splice the log item list into the AIL at the given LSN.
240 * zero indicates that the caller should sleep until woken.
241 */ 304 */
242long 305static void
243xfsaild_push( 306xfs_ail_splice(
244 struct xfs_ail *ailp, 307 struct xfs_ail *ailp,
245 xfs_lsn_t *last_lsn) 308 struct list_head *list,
309 xfs_lsn_t lsn)
246{ 310{
247 long tout = 0; 311 xfs_log_item_t *next_lip;
248 xfs_lsn_t last_pushed_lsn = *last_lsn; 312
313 /* If the list is empty, just insert the item. */
314 if (list_empty(&ailp->xa_ail)) {
315 list_splice(list, &ailp->xa_ail);
316 return;
317 }
318
319 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
320 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
321 break;
322 }
323
324 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
325 XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
326
327 list_splice_init(list, &next_lip->li_ail);
328}
329
330/*
331 * Delete the given item from the AIL. Return a pointer to the item.
332 */
333static void
334xfs_ail_delete(
335 struct xfs_ail *ailp,
336 xfs_log_item_t *lip)
337{
338 xfs_ail_check(ailp, lip);
339 list_del(&lip->li_ail);
340 xfs_trans_ail_cursor_clear(ailp, lip);
341}
342
343/*
344 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
345 * to run at a later time if there is more work to do to complete the push.
346 */
347STATIC void
348xfs_ail_worker(
349 struct work_struct *work)
350{
351 struct xfs_ail *ailp = container_of(to_delayed_work(work),
352 struct xfs_ail, xa_work);
353 long tout;
249 xfs_lsn_t target = ailp->xa_target; 354 xfs_lsn_t target = ailp->xa_target;
250 xfs_lsn_t lsn; 355 xfs_lsn_t lsn;
251 xfs_log_item_t *lip; 356 xfs_log_item_t *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
256 361
257 spin_lock(&ailp->xa_lock); 362 spin_lock(&ailp->xa_lock);
258 xfs_trans_ail_cursor_init(ailp, cur); 363 xfs_trans_ail_cursor_init(ailp, cur);
259 lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); 364 lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
260 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 365 if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
261 /* 366 /*
262 * AIL is empty or our push has reached the end. 367 * AIL is empty or our push has reached the end.
263 */ 368 */
264 xfs_trans_ail_cursor_done(ailp, cur); 369 xfs_trans_ail_cursor_done(ailp, cur);
265 spin_unlock(&ailp->xa_lock); 370 spin_unlock(&ailp->xa_lock);
266 *last_lsn = 0; 371 ailp->xa_last_pushed_lsn = 0;
267 return tout; 372 return;
268 } 373 }
269 374
270 XFS_STATS_INC(xs_push_ail); 375 XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
301 case XFS_ITEM_SUCCESS: 406 case XFS_ITEM_SUCCESS:
302 XFS_STATS_INC(xs_push_ail_success); 407 XFS_STATS_INC(xs_push_ail_success);
303 IOP_PUSH(lip); 408 IOP_PUSH(lip);
304 last_pushed_lsn = lsn; 409 ailp->xa_last_pushed_lsn = lsn;
305 break; 410 break;
306 411
307 case XFS_ITEM_PUSHBUF: 412 case XFS_ITEM_PUSHBUF:
308 XFS_STATS_INC(xs_push_ail_pushbuf); 413 XFS_STATS_INC(xs_push_ail_pushbuf);
309 IOP_PUSHBUF(lip); 414 IOP_PUSHBUF(lip);
310 last_pushed_lsn = lsn; 415 ailp->xa_last_pushed_lsn = lsn;
311 push_xfsbufd = 1; 416 push_xfsbufd = 1;
312 break; 417 break;
313 418
@@ -319,7 +424,7 @@ xfsaild_push(
319 424
320 case XFS_ITEM_LOCKED: 425 case XFS_ITEM_LOCKED:
321 XFS_STATS_INC(xs_push_ail_locked); 426 XFS_STATS_INC(xs_push_ail_locked);
322 last_pushed_lsn = lsn; 427 ailp->xa_last_pushed_lsn = lsn;
323 stuck++; 428 stuck++;
324 break; 429 break;
325 430
@@ -374,9 +479,23 @@ xfsaild_push(
374 wake_up_process(mp->m_ddev_targp->bt_task); 479 wake_up_process(mp->m_ddev_targp->bt_task);
375 } 480 }
376 481
482 /* assume we have more work to do in a short while */
483 tout = 10;
377 if (!count) { 484 if (!count) {
378 /* We're past our target or empty, so idle */ 485 /* We're past our target or empty, so idle */
379 last_pushed_lsn = 0; 486 ailp->xa_last_pushed_lsn = 0;
487
488 /*
489 * Check for an updated push target before clearing the
490 * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
491 * work to do. Wait a bit longer before starting that work.
492 */
493 smp_rmb();
494 if (ailp->xa_target == target) {
495 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
496 return;
497 }
498 tout = 50;
380 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 499 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
381 /* 500 /*
382 * We reached the target so wait a bit longer for I/O to 501 * We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
384 * start the next scan from the start of the AIL. 503 * start the next scan from the start of the AIL.
385 */ 504 */
386 tout = 50; 505 tout = 50;
387 last_pushed_lsn = 0; 506 ailp->xa_last_pushed_lsn = 0;
388 } else if ((stuck * 100) / count > 90) { 507 } else if ((stuck * 100) / count > 90) {
389 /* 508 /*
390 * Either there is a lot of contention on the AIL or we 509 * Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
396 * continuing from where we were. 515 * continuing from where we were.
397 */ 516 */
398 tout = 20; 517 tout = 20;
399 } else {
400 /* more to do, but wait a short while before continuing */
401 tout = 10;
402 } 518 }
403 *last_lsn = last_pushed_lsn; 519
404 return tout; 520 /* There is more to do, requeue us. */
521 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
522 msecs_to_jiffies(tout));
523}
524
525/*
526 * This routine is called to move the tail of the AIL forward. It does this by
527 * trying to flush items in the AIL whose lsns are below the given
528 * threshold_lsn.
529 *
530 * The push is run asynchronously in a workqueue, which means the caller needs
531 * to handle waiting on the async flush for space to become available.
532 * We don't want to interrupt any push that is in progress, hence we only queue
533 * work if we set the pushing bit approriately.
534 *
535 * We do this unlocked - we only need to know whether there is anything in the
536 * AIL at the time we are called. We don't need to access the contents of
537 * any of the objects, so the lock is not needed.
538 */
539void
540xfs_ail_push(
541 struct xfs_ail *ailp,
542 xfs_lsn_t threshold_lsn)
543{
544 xfs_log_item_t *lip;
545
546 lip = xfs_ail_min(ailp);
547 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
548 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
549 return;
550
551 /*
552 * Ensure that the new target is noticed in push code before it clears
553 * the XFS_AIL_PUSHING_BIT.
554 */
555 smp_wmb();
556 ailp->xa_target = threshold_lsn;
557 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
558 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
405} 559}
406 560
561/*
562 * Push out all items in the AIL immediately
563 */
564void
565xfs_ail_push_all(
566 struct xfs_ail *ailp)
567{
568 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
569
570 if (threshold_lsn)
571 xfs_ail_push(ailp, threshold_lsn);
572}
407 573
408/* 574/*
409 * This is to be called when an item is unlocked that may have 575 * This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
615 xfs_mount_t *mp) 781 xfs_mount_t *mp)
616{ 782{
617 struct xfs_ail *ailp; 783 struct xfs_ail *ailp;
618 int error;
619 784
620 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 785 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
621 if (!ailp) 786 if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
624 ailp->xa_mount = mp; 789 ailp->xa_mount = mp;
625 INIT_LIST_HEAD(&ailp->xa_ail); 790 INIT_LIST_HEAD(&ailp->xa_ail);
626 spin_lock_init(&ailp->xa_lock); 791 spin_lock_init(&ailp->xa_lock);
627 error = xfsaild_start(ailp); 792 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
628 if (error)
629 goto out_free_ailp;
630 mp->m_ail = ailp; 793 mp->m_ail = ailp;
631 return 0; 794 return 0;
632
633out_free_ailp:
634 kmem_free(ailp);
635 return error;
636} 795}
637 796
638void 797void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
641{ 800{
642 struct xfs_ail *ailp = mp->m_ail; 801 struct xfs_ail *ailp = mp->m_ail;
643 802
644 xfsaild_stop(ailp); 803 cancel_delayed_work_sync(&ailp->xa_work);
645 kmem_free(ailp); 804 kmem_free(ailp);
646} 805}
647
648/*
649 * splice the log item list into the AIL at the given LSN.
650 */
651STATIC void
652xfs_ail_splice(
653 struct xfs_ail *ailp,
654 struct list_head *list,
655 xfs_lsn_t lsn)
656{
657 xfs_log_item_t *next_lip;
658
659 /*
660 * If the list is empty, just insert the item.
661 */
662 if (list_empty(&ailp->xa_ail)) {
663 list_splice(list, &ailp->xa_ail);
664 return;
665 }
666
667 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
668 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
669 break;
670 }
671
672 ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
673 (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
674
675 list_splice_init(list, &next_lip->li_ail);
676 return;
677}
678
679/*
680 * Delete the given item from the AIL. Return a pointer to the item.
681 */
682STATIC void
683xfs_ail_delete(
684 struct xfs_ail *ailp,
685 xfs_log_item_t *lip)
686{
687 xfs_ail_check(ailp, lip);
688 list_del(&lip->li_ail);
689 xfs_trans_ail_cursor_clear(ailp, lip);
690}
691
692/*
693 * Return a pointer to the first item in the AIL.
694 * If the AIL is empty, then return NULL.
695 */
696STATIC xfs_log_item_t *
697xfs_ail_min(
698 struct xfs_ail *ailp)
699{
700 if (list_empty(&ailp->xa_ail))
701 return NULL;
702
703 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
704}
705
706/*
707 * Return a pointer to the item which follows
708 * the given item in the AIL. If the given item
709 * is the last item in the list, then return NULL.
710 */
711STATIC xfs_log_item_t *
712xfs_ail_next(
713 struct xfs_ail *ailp,
714 xfs_log_item_t *lip)
715{
716 if (lip->li_ail.next == &ailp->xa_ail)
717 return NULL;
718
719 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
720}
721
722#ifdef DEBUG
723/*
724 * Check that the list is sorted as it should be.
725 */
726STATIC void
727xfs_ail_check(
728 struct xfs_ail *ailp,
729 xfs_log_item_t *lip)
730{
731 xfs_log_item_t *prev_lip;
732
733 if (list_empty(&ailp->xa_ail))
734 return;
735
736 /*
737 * Check the next and previous entries are valid.
738 */
739 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
740 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
741 if (&prev_lip->li_ail != &ailp->xa_ail)
742 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
743
744 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
745 if (&prev_lip->li_ail != &ailp->xa_ail)
746 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
747
748
749#ifdef XFS_TRANS_DEBUG
750 /*
751 * Walk the list checking lsn ordering, and that every entry has the
752 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
753 * when specifically debugging the transaction subsystem.
754 */
755 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
756 list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
757 if (&prev_lip->li_ail != &ailp->xa_ail)
758 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
759 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
760 prev_lip = lip;
761 }
762#endif /* XFS_TRANS_DEBUG */
763}
764#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c238fa3..6b164e9e9a1f 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
65struct xfs_ail { 65struct xfs_ail {
66 struct xfs_mount *xa_mount; 66 struct xfs_mount *xa_mount;
67 struct list_head xa_ail; 67 struct list_head xa_ail;
68 uint xa_gen;
69 struct task_struct *xa_task;
70 xfs_lsn_t xa_target; 68 xfs_lsn_t xa_target;
71 struct xfs_ail_cursor xa_cursors; 69 struct xfs_ail_cursor xa_cursors;
72 spinlock_t xa_lock; 70 spinlock_t xa_lock;
71 struct delayed_work xa_work;
72 xfs_lsn_t xa_last_pushed_lsn;
73 unsigned long xa_flags;
73}; 74};
74 75
76#define XFS_AIL_PUSHING_BIT 0
77
75/* 78/*
76 * From xfs_trans_ail.c 79 * From xfs_trans_ail.c
77 */ 80 */
81
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83
78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
79 struct xfs_log_item **log_items, int nr_items, 85 struct xfs_log_item **log_items, int nr_items,
80 xfs_lsn_t lsn) __releases(ailp->xa_lock); 86 xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
98 xfs_trans_ail_delete_bulk(ailp, &lip, 1); 104 xfs_trans_ail_delete_bulk(ailp, &lip, 1);
99} 105}
100 106
101void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); 107void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
108void xfs_ail_push_all(struct xfs_ail *);
109xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
110
102void xfs_trans_unlocked_item(struct xfs_ail *, 111void xfs_trans_unlocked_item(struct xfs_ail *,
103 xfs_log_item_t *); 112 xfs_log_item_t *);
104 113
105xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
106
107struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, 114struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
108 struct xfs_ail_cursor *cur, 115 struct xfs_ail_cursor *cur,
109 xfs_lsn_t lsn); 116 xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
112void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, 119void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
113 struct xfs_ail_cursor *cur); 120 struct xfs_ail_cursor *cur);
114 121
115long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
116void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
117int xfsaild_start(struct xfs_ail *);
118void xfsaild_stop(struct xfs_ail *);
119
120#if BITS_PER_LONG != 64 122#if BITS_PER_LONG != 64
121static inline void 123static inline void
122xfs_trans_ail_copy_lsn( 124xfs_trans_ail_copy_lsn(
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 8e20540043f5..089fe43211a4 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -12,6 +12,7 @@
12/** 12/**
13 * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data 13 * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
14 * @oscillator_frequency: - oscillator frequency in Hz 14 * @oscillator_frequency: - oscillator frequency in Hz
15 * @irq_flags: - IRQF configuration flags
15 * @board_specific_setup: - called before probing the chip (power,reset) 16 * @board_specific_setup: - called before probing the chip (power,reset)
16 * @transceiver_enable: - called to power on/off the transceiver 17 * @transceiver_enable: - called to power on/off the transceiver
17 * @power_enable: - called to power on/off the mcp *and* the 18 * @power_enable: - called to power on/off the mcp *and* the
@@ -24,6 +25,7 @@
24 25
25struct mcp251x_platform_data { 26struct mcp251x_platform_data {
26 unsigned long oscillator_frequency; 27 unsigned long oscillator_frequency;
28 unsigned long irq_flags;
27 int (*board_specific_setup)(struct spi_device *spi); 29 int (*board_specific_setup)(struct spi_device *spi);
28 int (*transceiver_enable)(int enable); 30 int (*transceiver_enable)(int enable);
29 int (*power_enable) (int enable); 31 int (*power_enable) (int enable);
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index ad1b19aa6508..aef23309a742 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones,
86 */ 86 */
87static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) 87static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
88{ 88{
89 return pdev->dev.platform_data; 89 return pdev->mfd_cell;
90} 90}
91 91
92/* 92/*
93 * Given a platform device that's been created by mfd_add_devices(), fetch 93 * Given a platform device that's been created by mfd_add_devices(), fetch
94 * the .mfd_data entry from the mfd_cell that created it. 94 * the .mfd_data entry from the mfd_cell that created it.
95 * Otherwise just return the platform_data pointer.
96 * This maintains compatibility with platform drivers whose devices aren't
97 * created by the mfd layer, and expect platform_data to contain what would've
98 * otherwise been in mfd_data.
95 */ 99 */
96static inline void *mfd_get_data(struct platform_device *pdev) 100static inline void *mfd_get_data(struct platform_device *pdev)
97{ 101{
98 return mfd_get_cell(pdev)->mfd_data; 102 const struct mfd_cell *cell = mfd_get_cell(pdev);
103
104 if (cell)
105 return cell->mfd_data;
106 else
107 return pdev->dev.platform_data;
99} 108}
100 109
101extern int mfd_add_devices(struct device *parent, int id, 110extern int mfd_add_devices(struct device *parent, int id,
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 94b48bd40dd7..c75471db576e 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -51,7 +51,7 @@ struct mutex {
51 spinlock_t wait_lock; 51 spinlock_t wait_lock;
52 struct list_head wait_list; 52 struct list_head wait_list;
53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) 53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner; 54 struct task_struct *owner;
55#endif 55#endif
56#ifdef CONFIG_DEBUG_MUTEXES 56#ifdef CONFIG_DEBUG_MUTEXES
57 const char *name; 57 const char *name;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eeec00abb664..7fa95df60146 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -270,7 +270,8 @@ struct nf_afinfo {
270 unsigned int dataoff, 270 unsigned int dataoff,
271 unsigned int len, 271 unsigned int len,
272 u_int8_t protocol); 272 u_int8_t protocol);
273 int (*route)(struct dst_entry **dst, struct flowi *fl); 273 int (*route)(struct net *net, struct dst_entry **dst,
274 struct flowi *fl, bool strict);
274 void (*saveroute)(const struct sk_buff *skb, 275 void (*saveroute)(const struct sk_buff *skb,
275 struct nf_queue_entry *entry); 276 struct nf_queue_entry *entry);
276 int (*reroute)(struct sk_buff *skb, 277 int (*reroute)(struct sk_buff *skb,
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ec333d83f3b4..5a262e3ae715 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -293,7 +293,7 @@ struct ip_set {
293 /* Lock protecting the set data */ 293 /* Lock protecting the set data */
294 rwlock_t lock; 294 rwlock_t lock;
295 /* References to the set */ 295 /* References to the set */
296 atomic_t ref; 296 u32 ref;
297 /* The core set type */ 297 /* The core set type */
298 struct ip_set_type *type; 298 struct ip_set_type *type;
299 /* The type variant doing the real job */ 299 /* The type variant doing the real job */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ec9d9bea1e37..a0196ac79051 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
515 if (h->netmask != HOST_MASK) 515 if (h->netmask != HOST_MASK)
516 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); 516 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
517#endif 517#endif
518 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 518 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
519 htonl(atomic_read(&set->ref) - 1));
520 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); 519 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
521 if (with_timeout(h->timeout)) 520 if (with_timeout(h->timeout))
522 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); 521 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d96db9825708..744942c95fec 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -14,6 +14,8 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/mod_devicetable.h> 15#include <linux/mod_devicetable.h>
16 16
17struct mfd_cell;
18
17struct platform_device { 19struct platform_device {
18 const char * name; 20 const char * name;
19 int id; 21 int id;
@@ -23,6 +25,9 @@ struct platform_device {
23 25
24 const struct platform_device_id *id_entry; 26 const struct platform_device_id *id_entry;
25 27
28 /* MFD cell pointer */
29 struct mfd_cell *mfd_cell;
30
26 /* arch specific additions */ 31 /* arch specific additions */
27 struct pdev_archdata archdata; 32 struct pdev_archdata archdata;
28}; 33};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e43e5b0ab0b5..d9ca3aa511ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
360extern signed long schedule_timeout_killable(signed long timeout); 360extern signed long schedule_timeout_killable(signed long timeout);
361extern signed long schedule_timeout_uninterruptible(signed long timeout); 361extern signed long schedule_timeout_uninterruptible(signed long timeout);
362asmlinkage void schedule(void); 362asmlinkage void schedule(void);
363extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 363extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
364 364
365struct nsproxy; 365struct nsproxy;
366struct user_namespace; 366struct user_namespace;
@@ -1038,8 +1038,12 @@ struct sched_domain;
1038#define WF_FORK 0x02 /* child wakeup after fork */ 1038#define WF_FORK 0x02 /* child wakeup after fork */
1039 1039
1040#define ENQUEUE_WAKEUP 1 1040#define ENQUEUE_WAKEUP 1
1041#define ENQUEUE_WAKING 2 1041#define ENQUEUE_HEAD 2
1042#define ENQUEUE_HEAD 4 1042#ifdef CONFIG_SMP
1043#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1044#else
1045#define ENQUEUE_WAKING 0
1046#endif
1043 1047
1044#define DEQUEUE_SLEEP 1 1048#define DEQUEUE_SLEEP 1
1045 1049
@@ -1057,12 +1061,11 @@ struct sched_class {
1057 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1061 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1058 1062
1059#ifdef CONFIG_SMP 1063#ifdef CONFIG_SMP
1060 int (*select_task_rq)(struct rq *rq, struct task_struct *p, 1064 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1061 int sd_flag, int flags);
1062 1065
1063 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1066 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1064 void (*post_schedule) (struct rq *this_rq); 1067 void (*post_schedule) (struct rq *this_rq);
1065 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1068 void (*task_waking) (struct task_struct *task);
1066 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1069 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1067 1070
1068 void (*set_cpus_allowed)(struct task_struct *p, 1071 void (*set_cpus_allowed)(struct task_struct *p,
@@ -1190,10 +1193,10 @@ struct task_struct {
1190 int lock_depth; /* BKL lock depth */ 1193 int lock_depth; /* BKL lock depth */
1191 1194
1192#ifdef CONFIG_SMP 1195#ifdef CONFIG_SMP
1193#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1196 struct task_struct *wake_entry;
1194 int oncpu; 1197 int on_cpu;
1195#endif
1196#endif 1198#endif
1199 int on_rq;
1197 1200
1198 int prio, static_prio, normal_prio; 1201 int prio, static_prio, normal_prio;
1199 unsigned int rt_priority; 1202 unsigned int rt_priority;
@@ -1261,6 +1264,7 @@ struct task_struct {
1261 1264
1262 /* Revert to default priority/policy when forking */ 1265 /* Revert to default priority/policy when forking */
1263 unsigned sched_reset_on_fork:1; 1266 unsigned sched_reset_on_fork:1;
1267 unsigned sched_contributes_to_load:1;
1264 1268
1265 pid_t pid; 1269 pid_t pid;
1266 pid_t tgid; 1270 pid_t tgid;
@@ -2179,8 +2183,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2179extern char *get_task_comm(char *to, struct task_struct *tsk); 2183extern char *get_task_comm(char *to, struct task_struct *tsk);
2180 2184
2181#ifdef CONFIG_SMP 2185#ifdef CONFIG_SMP
2186void scheduler_ipi(void);
2182extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2187extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2183#else 2188#else
2189static inline void scheduler_ipi(void) { }
2184static inline unsigned long wait_task_inactive(struct task_struct *p, 2190static inline unsigned long wait_task_inactive(struct task_struct *p,
2185 long match_state) 2191 long match_state)
2186{ 2192{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a89e3612875..083ffea7ba18 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
249extern int hibernate(void); 249extern int hibernate(void);
250extern bool system_entering_hibernation(void); 250extern bool system_entering_hibernation(void);
251#else /* CONFIG_HIBERNATION */ 251#else /* CONFIG_HIBERNATION */
252static inline void register_nosave_region(unsigned long b, unsigned long e) {}
253static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
252static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 254static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
253static inline void swsusp_set_page_free(struct page *p) {} 255static inline void swsusp_set_page_free(struct page *p) {}
254static inline void swsusp_unset_page_free(struct page *p) {} 256static inline void swsusp_unset_page_free(struct page *p) {}
@@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; }
297 299
298extern struct mutex pm_mutex; 300extern struct mutex pm_mutex;
299 301
300#ifndef CONFIG_HIBERNATION 302#ifndef CONFIG_HIBERNATE_CALLBACKS
301static inline void register_nosave_region(unsigned long b, unsigned long e)
302{
303}
304static inline void register_nosave_region_late(unsigned long b, unsigned long e)
305{
306}
307
308static inline void lock_system_sleep(void) {} 303static inline void lock_system_sleep(void) {}
309static inline void unlock_system_sleep(void) {} 304static inline void unlock_system_sleep(void) {}
310 305
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 814b434db749..d516f00c8e0f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
52 */ 52 */
53 if (likely(skb->dev && skb->dev->nd_net)) 53 if (likely(skb->dev && skb->dev->nd_net))
54 return dev_net(skb->dev); 54 return dev_net(skb->dev);
55 if (skb_dst(skb)->dev) 55 if (skb_dst(skb) && skb_dst(skb)->dev)
56 return dev_net(skb_dst(skb)->dev); 56 return dev_net(skb_dst(skb)->dev);
57 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", 57 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
58 __func__, __LINE__); 58 __func__, __LINE__);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cb13239fe8e3..025d4cc7bbf8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action {
1753 * that TX/RX_STOP can pass NULL for this parameter. 1753 * that TX/RX_STOP can pass NULL for this parameter.
1754 * The @buf_size parameter is only valid when the action is set to 1754 * The @buf_size parameter is only valid when the action is set to
1755 * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder 1755 * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
1756 * buffer size (number of subframes) for this session -- aggregates 1756 * buffer size (number of subframes) for this session -- the driver
1757 * containing more subframes than this may not be transmitted to the peer. 1757 * may neither send aggregates containing more subframes than this
1758 * nor send aggregates in a way that lost frames would exceed the
1759 * buffer size. If just limiting the aggregate size, this would be
1760 * possible with a buf_size of 8:
1761 * - TX: 1.....7
1762 * - RX: 2....7 (lost frame #1)
1763 * - TX: 8..1...
1764 * which is invalid since #1 was now re-transmitted well past the
1765 * buffer size of 8. Correct ways to retransmit #1 would be:
1766 * - TX: 1 or 18 or 81
1767 * Even "189" would be wrong since 1 could be lost again.
1768 *
1758 * Returns a negative error code on failure. 1769 * Returns a negative error code on failure.
1759 * The callback can sleep. 1770 * The callback can sleep.
1760 * 1771 *
diff --git a/include/net/route.h b/include/net/route.h
index f88429cad52a..8fce0621cad1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -64,6 +64,7 @@ struct rtable {
64 64
65 __be32 rt_dst; /* Path destination */ 65 __be32 rt_dst; /* Path destination */
66 __be32 rt_src; /* Path source */ 66 __be32 rt_src; /* Path source */
67 int rt_route_iif;
67 int rt_iif; 68 int rt_iif;
68 int rt_oif; 69 int rt_oif;
69 __u32 rt_mark; 70 __u32 rt_mark;
@@ -80,12 +81,12 @@ struct rtable {
80 81
81static inline bool rt_is_input_route(struct rtable *rt) 82static inline bool rt_is_input_route(struct rtable *rt)
82{ 83{
83 return rt->rt_iif != 0; 84 return rt->rt_route_iif != 0;
84} 85}
85 86
86static inline bool rt_is_output_route(struct rtable *rt) 87static inline bool rt_is_output_route(struct rtable *rt)
87{ 88{
88 return rt->rt_iif == 0; 89 return rt->rt_route_iif == 0;
89} 90}
90 91
91struct ip_rt_acct { 92struct ip_rt_acct {
diff --git a/init/Kconfig b/init/Kconfig
index 56240e724d9a..32745bfe059e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -827,6 +827,11 @@ config SCHED_AUTOGROUP
827 desktop applications. Task group autogeneration is currently based 827 desktop applications. Task group autogeneration is currently based
828 upon task session. 828 upon task session.
829 829
830config SCHED_TTWU_QUEUE
831 bool
832 depends on !SPARC32
833 default y
834
830config MM_OWNER 835config MM_OWNER
831 bool 836 bool
832 837
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index ec815a960b5d..73da83aff418 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
75 return; 75 return;
76 76
77 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 77 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
78 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 78 DEBUG_LOCKS_WARN_ON(lock->owner != current);
79 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 79 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
80 mutex_clear_owner(lock); 80 mutex_clear_owner(lock);
81} 81}
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 57d527a16f9d..0799fd3e4cfa 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
29 29
30static inline void mutex_set_owner(struct mutex *lock) 30static inline void mutex_set_owner(struct mutex *lock)
31{ 31{
32 lock->owner = current_thread_info(); 32 lock->owner = current;
33} 33}
34 34
35static inline void mutex_clear_owner(struct mutex *lock) 35static inline void mutex_clear_owner(struct mutex *lock)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index c4195fa98900..fe4706cb0c5b 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
160 */ 160 */
161 161
162 for (;;) { 162 for (;;) {
163 struct thread_info *owner; 163 struct task_struct *owner;
164 164
165 /* 165 /*
166 * If we own the BKL, then don't spin. The owner of 166 * If we own the BKL, then don't spin. The owner of
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 67578ca48f94..4115fbf83b12 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -19,7 +19,7 @@
19#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
20static inline void mutex_set_owner(struct mutex *lock) 20static inline void mutex_set_owner(struct mutex *lock)
21{ 21{
22 lock->owner = current_thread_info(); 22 lock->owner = current;
23} 23}
24 24
25static inline void mutex_clear_owner(struct mutex *lock) 25static inline void mutex_clear_owner(struct mutex *lock)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4603f08dc47b..6de9a8fc3417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
18 18
19 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
20 20
21config HIBERNATE_CALLBACKS
22 bool
23
21config HIBERNATION 24config HIBERNATION
22 bool "Hibernation (aka 'suspend to disk')" 25 bool "Hibernation (aka 'suspend to disk')"
23 depends on SWAP && ARCH_HIBERNATION_POSSIBLE 26 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
27 select HIBERNATE_CALLBACKS
24 select LZO_COMPRESS 28 select LZO_COMPRESS
25 select LZO_DECOMPRESS 29 select LZO_DECOMPRESS
26 ---help--- 30 ---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
85 89
86config PM_SLEEP 90config PM_SLEEP
87 def_bool y 91 def_bool y
88 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE 92 depends on SUSPEND || HIBERNATE_CALLBACKS
89 93
90config PM_SLEEP_SMP 94config PM_SLEEP_SMP
91 def_bool y 95 def_bool y
diff --git a/kernel/sched.c b/kernel/sched.c
index 506cb8147c70..0cfe0310ed5d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -312,6 +312,9 @@ struct cfs_rq {
312 312
313 u64 exec_clock; 313 u64 exec_clock;
314 u64 min_vruntime; 314 u64 min_vruntime;
315#ifndef CONFIG_64BIT
316 u64 min_vruntime_copy;
317#endif
315 318
316 struct rb_root tasks_timeline; 319 struct rb_root tasks_timeline;
317 struct rb_node *rb_leftmost; 320 struct rb_node *rb_leftmost;
@@ -554,6 +557,10 @@ struct rq {
554 unsigned int ttwu_count; 557 unsigned int ttwu_count;
555 unsigned int ttwu_local; 558 unsigned int ttwu_local;
556#endif 559#endif
560
561#ifdef CONFIG_SMP
562 struct task_struct *wake_list;
563#endif
557}; 564};
558 565
559static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 566static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -597,7 +604,7 @@ static inline int cpu_of(struct rq *rq)
597 * Return the group to which this tasks belongs. 604 * Return the group to which this tasks belongs.
598 * 605 *
599 * We use task_subsys_state_check() and extend the RCU verification 606 * We use task_subsys_state_check() and extend the RCU verification
600 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() 607 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
601 * holds that lock for each task it moves into the cgroup. Therefore 608 * holds that lock for each task it moves into the cgroup. Therefore
602 * by holding that lock, we pin the task to the current cgroup. 609 * by holding that lock, we pin the task to the current cgroup.
603 */ 610 */
@@ -607,7 +614,7 @@ static inline struct task_group *task_group(struct task_struct *p)
607 struct cgroup_subsys_state *css; 614 struct cgroup_subsys_state *css;
608 615
609 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 616 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
610 lockdep_is_held(&task_rq(p)->lock)); 617 lockdep_is_held(&p->pi_lock));
611 tg = container_of(css, struct task_group, css); 618 tg = container_of(css, struct task_group, css);
612 619
613 return autogroup_task_group(p, tg); 620 return autogroup_task_group(p, tg);
@@ -839,18 +846,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
839 return rq->curr == p; 846 return rq->curr == p;
840} 847}
841 848
842#ifndef __ARCH_WANT_UNLOCKED_CTXSW
843static inline int task_running(struct rq *rq, struct task_struct *p) 849static inline int task_running(struct rq *rq, struct task_struct *p)
844{ 850{
851#ifdef CONFIG_SMP
852 return p->on_cpu;
853#else
845 return task_current(rq, p); 854 return task_current(rq, p);
855#endif
846} 856}
847 857
858#ifndef __ARCH_WANT_UNLOCKED_CTXSW
848static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 859static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
849{ 860{
861#ifdef CONFIG_SMP
862 /*
863 * We can optimise this out completely for !SMP, because the
864 * SMP rebalancing from interrupt is the only thing that cares
865 * here.
866 */
867 next->on_cpu = 1;
868#endif
850} 869}
851 870
852static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 871static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
853{ 872{
873#ifdef CONFIG_SMP
874 /*
875 * After ->on_cpu is cleared, the task can be moved to a different CPU.
876 * We must ensure this doesn't happen until the switch is completely
877 * finished.
878 */
879 smp_wmb();
880 prev->on_cpu = 0;
881#endif
854#ifdef CONFIG_DEBUG_SPINLOCK 882#ifdef CONFIG_DEBUG_SPINLOCK
855 /* this is a valid case when another task releases the spinlock */ 883 /* this is a valid case when another task releases the spinlock */
856 rq->lock.owner = current; 884 rq->lock.owner = current;
@@ -866,15 +894,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
866} 894}
867 895
868#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 896#else /* __ARCH_WANT_UNLOCKED_CTXSW */
869static inline int task_running(struct rq *rq, struct task_struct *p)
870{
871#ifdef CONFIG_SMP
872 return p->oncpu;
873#else
874 return task_current(rq, p);
875#endif
876}
877
878static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 897static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
879{ 898{
880#ifdef CONFIG_SMP 899#ifdef CONFIG_SMP
@@ -883,7 +902,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
883 * SMP rebalancing from interrupt is the only thing that cares 902 * SMP rebalancing from interrupt is the only thing that cares
884 * here. 903 * here.
885 */ 904 */
886 next->oncpu = 1; 905 next->on_cpu = 1;
887#endif 906#endif
888#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 907#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
889 raw_spin_unlock_irq(&rq->lock); 908 raw_spin_unlock_irq(&rq->lock);
@@ -896,12 +915,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
896{ 915{
897#ifdef CONFIG_SMP 916#ifdef CONFIG_SMP
898 /* 917 /*
899 * After ->oncpu is cleared, the task can be moved to a different CPU. 918 * After ->on_cpu is cleared, the task can be moved to a different CPU.
900 * We must ensure this doesn't happen until the switch is completely 919 * We must ensure this doesn't happen until the switch is completely
901 * finished. 920 * finished.
902 */ 921 */
903 smp_wmb(); 922 smp_wmb();
904 prev->oncpu = 0; 923 prev->on_cpu = 0;
905#endif 924#endif
906#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW 925#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
907 local_irq_enable(); 926 local_irq_enable();
@@ -910,23 +929,15 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
910#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 929#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
911 930
912/* 931/*
913 * Check whether the task is waking, we use this to synchronize ->cpus_allowed 932 * __task_rq_lock - lock the rq @p resides on.
914 * against ttwu().
915 */
916static inline int task_is_waking(struct task_struct *p)
917{
918 return unlikely(p->state == TASK_WAKING);
919}
920
921/*
922 * __task_rq_lock - lock the runqueue a given task resides on.
923 * Must be called interrupts disabled.
924 */ 933 */
925static inline struct rq *__task_rq_lock(struct task_struct *p) 934static inline struct rq *__task_rq_lock(struct task_struct *p)
926 __acquires(rq->lock) 935 __acquires(rq->lock)
927{ 936{
928 struct rq *rq; 937 struct rq *rq;
929 938
939 lockdep_assert_held(&p->pi_lock);
940
930 for (;;) { 941 for (;;) {
931 rq = task_rq(p); 942 rq = task_rq(p);
932 raw_spin_lock(&rq->lock); 943 raw_spin_lock(&rq->lock);
@@ -937,22 +948,22 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
937} 948}
938 949
939/* 950/*
940 * task_rq_lock - lock the runqueue a given task resides on and disable 951 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
941 * interrupts. Note the ordering: we can safely lookup the task_rq without
942 * explicitly disabling preemption.
943 */ 952 */
944static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 953static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
954 __acquires(p->pi_lock)
945 __acquires(rq->lock) 955 __acquires(rq->lock)
946{ 956{
947 struct rq *rq; 957 struct rq *rq;
948 958
949 for (;;) { 959 for (;;) {
950 local_irq_save(*flags); 960 raw_spin_lock_irqsave(&p->pi_lock, *flags);
951 rq = task_rq(p); 961 rq = task_rq(p);
952 raw_spin_lock(&rq->lock); 962 raw_spin_lock(&rq->lock);
953 if (likely(rq == task_rq(p))) 963 if (likely(rq == task_rq(p)))
954 return rq; 964 return rq;
955 raw_spin_unlock_irqrestore(&rq->lock, *flags); 965 raw_spin_unlock(&rq->lock);
966 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
956 } 967 }
957} 968}
958 969
@@ -962,10 +973,13 @@ static void __task_rq_unlock(struct rq *rq)
962 raw_spin_unlock(&rq->lock); 973 raw_spin_unlock(&rq->lock);
963} 974}
964 975
965static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 976static inline void
977task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
966 __releases(rq->lock) 978 __releases(rq->lock)
979 __releases(p->pi_lock)
967{ 980{
968 raw_spin_unlock_irqrestore(&rq->lock, *flags); 981 raw_spin_unlock(&rq->lock);
982 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
969} 983}
970 984
971/* 985/*
@@ -1774,7 +1788,6 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1774 update_rq_clock(rq); 1788 update_rq_clock(rq);
1775 sched_info_queued(p); 1789 sched_info_queued(p);
1776 p->sched_class->enqueue_task(rq, p, flags); 1790 p->sched_class->enqueue_task(rq, p, flags);
1777 p->se.on_rq = 1;
1778} 1791}
1779 1792
1780static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1793static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1782,7 +1795,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1782 update_rq_clock(rq); 1795 update_rq_clock(rq);
1783 sched_info_dequeued(p); 1796 sched_info_dequeued(p);
1784 p->sched_class->dequeue_task(rq, p, flags); 1797 p->sched_class->dequeue_task(rq, p, flags);
1785 p->se.on_rq = 0;
1786} 1798}
1787 1799
1788/* 1800/*
@@ -2117,7 +2129,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2117 * A queue event has occurred, and we're going to schedule. In 2129 * A queue event has occurred, and we're going to schedule. In
2118 * this case, we can save a useless back to back clock update. 2130 * this case, we can save a useless back to back clock update.
2119 */ 2131 */
2120 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) 2132 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
2121 rq->skip_clock_update = 1; 2133 rq->skip_clock_update = 1;
2122} 2134}
2123 2135
@@ -2163,6 +2175,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2163 */ 2175 */
2164 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 2176 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2165 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2177 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2178
2179#ifdef CONFIG_LOCKDEP
2180 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2181 lockdep_is_held(&task_rq(p)->lock)));
2182#endif
2166#endif 2183#endif
2167 2184
2168 trace_sched_migrate_task(p, new_cpu); 2185 trace_sched_migrate_task(p, new_cpu);
@@ -2183,19 +2200,6 @@ struct migration_arg {
2183static int migration_cpu_stop(void *data); 2200static int migration_cpu_stop(void *data);
2184 2201
2185/* 2202/*
2186 * The task's runqueue lock must be held.
2187 * Returns true if you have to wait for migration thread.
2188 */
2189static bool migrate_task(struct task_struct *p, struct rq *rq)
2190{
2191 /*
2192 * If the task is not on a runqueue (and not running), then
2193 * the next wake-up will properly place the task.
2194 */
2195 return p->se.on_rq || task_running(rq, p);
2196}
2197
2198/*
2199 * wait_task_inactive - wait for a thread to unschedule. 2203 * wait_task_inactive - wait for a thread to unschedule.
2200 * 2204 *
2201 * If @match_state is nonzero, it's the @p->state value just checked and 2205 * If @match_state is nonzero, it's the @p->state value just checked and
@@ -2252,11 +2256,11 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2252 rq = task_rq_lock(p, &flags); 2256 rq = task_rq_lock(p, &flags);
2253 trace_sched_wait_task(p); 2257 trace_sched_wait_task(p);
2254 running = task_running(rq, p); 2258 running = task_running(rq, p);
2255 on_rq = p->se.on_rq; 2259 on_rq = p->on_rq;
2256 ncsw = 0; 2260 ncsw = 0;
2257 if (!match_state || p->state == match_state) 2261 if (!match_state || p->state == match_state)
2258 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2262 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2259 task_rq_unlock(rq, &flags); 2263 task_rq_unlock(rq, p, &flags);
2260 2264
2261 /* 2265 /*
2262 * If it changed from the expected state, bail out now. 2266 * If it changed from the expected state, bail out now.
@@ -2331,7 +2335,7 @@ EXPORT_SYMBOL_GPL(kick_process);
2331 2335
2332#ifdef CONFIG_SMP 2336#ifdef CONFIG_SMP
2333/* 2337/*
2334 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. 2338 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
2335 */ 2339 */
2336static int select_fallback_rq(int cpu, struct task_struct *p) 2340static int select_fallback_rq(int cpu, struct task_struct *p)
2337{ 2341{
@@ -2364,12 +2368,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2364} 2368}
2365 2369
2366/* 2370/*
2367 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. 2371 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
2368 */ 2372 */
2369static inline 2373static inline
2370int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) 2374int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2371{ 2375{
2372 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); 2376 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2373 2377
2374 /* 2378 /*
2375 * In order not to call set_task_cpu() on a blocking task we need 2379 * In order not to call set_task_cpu() on a blocking task we need
@@ -2395,27 +2399,60 @@ static void update_avg(u64 *avg, u64 sample)
2395} 2399}
2396#endif 2400#endif
2397 2401
2398static inline void ttwu_activate(struct task_struct *p, struct rq *rq, 2402static void
2399 bool is_sync, bool is_migrate, bool is_local, 2403ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2400 unsigned long en_flags)
2401{ 2404{
2405#ifdef CONFIG_SCHEDSTATS
2406 struct rq *rq = this_rq();
2407
2408#ifdef CONFIG_SMP
2409 int this_cpu = smp_processor_id();
2410
2411 if (cpu == this_cpu) {
2412 schedstat_inc(rq, ttwu_local);
2413 schedstat_inc(p, se.statistics.nr_wakeups_local);
2414 } else {
2415 struct sched_domain *sd;
2416
2417 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2418 for_each_domain(this_cpu, sd) {
2419 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2420 schedstat_inc(sd, ttwu_wake_remote);
2421 break;
2422 }
2423 }
2424 }
2425#endif /* CONFIG_SMP */
2426
2427 schedstat_inc(rq, ttwu_count);
2402 schedstat_inc(p, se.statistics.nr_wakeups); 2428 schedstat_inc(p, se.statistics.nr_wakeups);
2403 if (is_sync) 2429
2430 if (wake_flags & WF_SYNC)
2404 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2431 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2405 if (is_migrate) 2432
2433 if (cpu != task_cpu(p))
2406 schedstat_inc(p, se.statistics.nr_wakeups_migrate); 2434 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2407 if (is_local)
2408 schedstat_inc(p, se.statistics.nr_wakeups_local);
2409 else
2410 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2411 2435
2436#endif /* CONFIG_SCHEDSTATS */
2437}
2438
2439static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2440{
2412 activate_task(rq, p, en_flags); 2441 activate_task(rq, p, en_flags);
2442 p->on_rq = 1;
2443
2444 /* if a worker is waking up, notify workqueue */
2445 if (p->flags & PF_WQ_WORKER)
2446 wq_worker_waking_up(p, cpu_of(rq));
2413} 2447}
2414 2448
2415static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, 2449/*
2416 int wake_flags, bool success) 2450 * Mark the task runnable and perform wakeup-preemption.
2451 */
2452static void
2453ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2417{ 2454{
2418 trace_sched_wakeup(p, success); 2455 trace_sched_wakeup(p, true);
2419 check_preempt_curr(rq, p, wake_flags); 2456 check_preempt_curr(rq, p, wake_flags);
2420 2457
2421 p->state = TASK_RUNNING; 2458 p->state = TASK_RUNNING;
@@ -2434,9 +2471,99 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2434 rq->idle_stamp = 0; 2471 rq->idle_stamp = 0;
2435 } 2472 }
2436#endif 2473#endif
2437 /* if a worker is waking up, notify workqueue */ 2474}
2438 if ((p->flags & PF_WQ_WORKER) && success) 2475
2439 wq_worker_waking_up(p, cpu_of(rq)); 2476static void
2477ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2478{
2479#ifdef CONFIG_SMP
2480 if (p->sched_contributes_to_load)
2481 rq->nr_uninterruptible--;
2482#endif
2483
2484 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2485 ttwu_do_wakeup(rq, p, wake_flags);
2486}
2487
2488/*
2489 * Called in case the task @p isn't fully descheduled from its runqueue,
2490 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2491 * since all we need to do is flip p->state to TASK_RUNNING, since
2492 * the task is still ->on_rq.
2493 */
2494static int ttwu_remote(struct task_struct *p, int wake_flags)
2495{
2496 struct rq *rq;
2497 int ret = 0;
2498
2499 rq = __task_rq_lock(p);
2500 if (p->on_rq) {
2501 ttwu_do_wakeup(rq, p, wake_flags);
2502 ret = 1;
2503 }
2504 __task_rq_unlock(rq);
2505
2506 return ret;
2507}
2508
2509#ifdef CONFIG_SMP
2510static void sched_ttwu_pending(void)
2511{
2512 struct rq *rq = this_rq();
2513 struct task_struct *list = xchg(&rq->wake_list, NULL);
2514
2515 if (!list)
2516 return;
2517
2518 raw_spin_lock(&rq->lock);
2519
2520 while (list) {
2521 struct task_struct *p = list;
2522 list = list->wake_entry;
2523 ttwu_do_activate(rq, p, 0);
2524 }
2525
2526 raw_spin_unlock(&rq->lock);
2527}
2528
2529void scheduler_ipi(void)
2530{
2531 sched_ttwu_pending();
2532}
2533
2534static void ttwu_queue_remote(struct task_struct *p, int cpu)
2535{
2536 struct rq *rq = cpu_rq(cpu);
2537 struct task_struct *next = rq->wake_list;
2538
2539 for (;;) {
2540 struct task_struct *old = next;
2541
2542 p->wake_entry = next;
2543 next = cmpxchg(&rq->wake_list, old, p);
2544 if (next == old)
2545 break;
2546 }
2547
2548 if (!next)
2549 smp_send_reschedule(cpu);
2550}
2551#endif
2552
2553static void ttwu_queue(struct task_struct *p, int cpu)
2554{
2555 struct rq *rq = cpu_rq(cpu);
2556
2557#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE)
2558 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2559 ttwu_queue_remote(p, cpu);
2560 return;
2561 }
2562#endif
2563
2564 raw_spin_lock(&rq->lock);
2565 ttwu_do_activate(rq, p, 0);
2566 raw_spin_unlock(&rq->lock);
2440} 2567}
2441 2568
2442/** 2569/**
@@ -2454,92 +2581,64 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2454 * Returns %true if @p was woken up, %false if it was already running 2581 * Returns %true if @p was woken up, %false if it was already running
2455 * or @state didn't match @p's state. 2582 * or @state didn't match @p's state.
2456 */ 2583 */
2457static int try_to_wake_up(struct task_struct *p, unsigned int state, 2584static int
2458 int wake_flags) 2585try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2459{ 2586{
2460 int cpu, orig_cpu, this_cpu, success = 0;
2461 unsigned long flags; 2587 unsigned long flags;
2462 unsigned long en_flags = ENQUEUE_WAKEUP; 2588 int cpu, success = 0;
2463 struct rq *rq;
2464
2465 this_cpu = get_cpu();
2466 2589
2467 smp_wmb(); 2590 smp_wmb();
2468 rq = task_rq_lock(p, &flags); 2591 raw_spin_lock_irqsave(&p->pi_lock, flags);
2469 if (!(p->state & state)) 2592 if (!(p->state & state))
2470 goto out; 2593 goto out;
2471 2594
2472 if (p->se.on_rq) 2595 success = 1; /* we're going to change ->state */
2473 goto out_running;
2474
2475 cpu = task_cpu(p); 2596 cpu = task_cpu(p);
2476 orig_cpu = cpu;
2477 2597
2478#ifdef CONFIG_SMP 2598 if (p->on_rq && ttwu_remote(p, wake_flags))
2479 if (unlikely(task_running(rq, p))) 2599 goto stat;
2480 goto out_activate;
2481 2600
2601#ifdef CONFIG_SMP
2482 /* 2602 /*
2483 * In order to handle concurrent wakeups and release the rq->lock 2603 * If the owning (remote) cpu is still in the middle of schedule() with
2484 * we put the task in TASK_WAKING state. 2604 * this task as prev, wait until its done referencing the task.
2485 *
2486 * First fix up the nr_uninterruptible count:
2487 */ 2605 */
2488 if (task_contributes_to_load(p)) { 2606 while (p->on_cpu) {
2489 if (likely(cpu_online(orig_cpu))) 2607#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2490 rq->nr_uninterruptible--; 2608 /*
2491 else 2609 * If called from interrupt context we could have landed in the
2492 this_rq()->nr_uninterruptible--; 2610 * middle of schedule(), in this case we should take care not
2493 } 2611 * to spin on ->on_cpu if p is current, since that would
2494 p->state = TASK_WAKING; 2612 * deadlock.
2495 2613 */
2496 if (p->sched_class->task_waking) { 2614 if (p == current) {
2497 p->sched_class->task_waking(rq, p); 2615 ttwu_queue(p, cpu);
2498 en_flags |= ENQUEUE_WAKING; 2616 goto stat;
2617 }
2618#endif
2619 cpu_relax();
2499 } 2620 }
2500
2501 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2502 if (cpu != orig_cpu)
2503 set_task_cpu(p, cpu);
2504 __task_rq_unlock(rq);
2505
2506 rq = cpu_rq(cpu);
2507 raw_spin_lock(&rq->lock);
2508
2509 /* 2621 /*
2510 * We migrated the task without holding either rq->lock, however 2622 * Pairs with the smp_wmb() in finish_lock_switch().
2511 * since the task is not on the task list itself, nobody else
2512 * will try and migrate the task, hence the rq should match the
2513 * cpu we just moved it to.
2514 */ 2623 */
2515 WARN_ON(task_cpu(p) != cpu); 2624 smp_rmb();
2516 WARN_ON(p->state != TASK_WAKING);
2517 2625
2518#ifdef CONFIG_SCHEDSTATS 2626 p->sched_contributes_to_load = !!task_contributes_to_load(p);
2519 schedstat_inc(rq, ttwu_count); 2627 p->state = TASK_WAKING;
2520 if (cpu == this_cpu)
2521 schedstat_inc(rq, ttwu_local);
2522 else {
2523 struct sched_domain *sd;
2524 for_each_domain(this_cpu, sd) {
2525 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2526 schedstat_inc(sd, ttwu_wake_remote);
2527 break;
2528 }
2529 }
2530 }
2531#endif /* CONFIG_SCHEDSTATS */
2532 2628
2533out_activate: 2629 if (p->sched_class->task_waking)
2630 p->sched_class->task_waking(p);
2631
2632 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2633 if (task_cpu(p) != cpu)
2634 set_task_cpu(p, cpu);
2534#endif /* CONFIG_SMP */ 2635#endif /* CONFIG_SMP */
2535 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, 2636
2536 cpu == this_cpu, en_flags); 2637 ttwu_queue(p, cpu);
2537 success = 1; 2638stat:
2538out_running: 2639 ttwu_stat(p, cpu, wake_flags);
2539 ttwu_post_activation(p, rq, wake_flags, success);
2540out: 2640out:
2541 task_rq_unlock(rq, &flags); 2641 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2542 put_cpu();
2543 2642
2544 return success; 2643 return success;
2545} 2644}
@@ -2548,31 +2647,34 @@ out:
2548 * try_to_wake_up_local - try to wake up a local task with rq lock held 2647 * try_to_wake_up_local - try to wake up a local task with rq lock held
2549 * @p: the thread to be awakened 2648 * @p: the thread to be awakened
2550 * 2649 *
2551 * Put @p on the run-queue if it's not already there. The caller must 2650 * Put @p on the run-queue if it's not already there. The caller must
2552 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2651 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2553 * the current task. this_rq() stays locked over invocation. 2652 * the current task.
2554 */ 2653 */
2555static void try_to_wake_up_local(struct task_struct *p) 2654static void try_to_wake_up_local(struct task_struct *p)
2556{ 2655{
2557 struct rq *rq = task_rq(p); 2656 struct rq *rq = task_rq(p);
2558 bool success = false;
2559 2657
2560 BUG_ON(rq != this_rq()); 2658 BUG_ON(rq != this_rq());
2561 BUG_ON(p == current); 2659 BUG_ON(p == current);
2562 lockdep_assert_held(&rq->lock); 2660 lockdep_assert_held(&rq->lock);
2563 2661
2662 if (!raw_spin_trylock(&p->pi_lock)) {
2663 raw_spin_unlock(&rq->lock);
2664 raw_spin_lock(&p->pi_lock);
2665 raw_spin_lock(&rq->lock);
2666 }
2667
2564 if (!(p->state & TASK_NORMAL)) 2668 if (!(p->state & TASK_NORMAL))
2565 return; 2669 goto out;
2566 2670
2567 if (!p->se.on_rq) { 2671 if (!p->on_rq)
2568 if (likely(!task_running(rq, p))) { 2672 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2569 schedstat_inc(rq, ttwu_count); 2673
2570 schedstat_inc(rq, ttwu_local); 2674 ttwu_do_wakeup(rq, p, 0);
2571 } 2675 ttwu_stat(p, smp_processor_id(), 0);
2572 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); 2676out:
2573 success = true; 2677 raw_spin_unlock(&p->pi_lock);
2574 }
2575 ttwu_post_activation(p, rq, 0, success);
2576} 2678}
2577 2679
2578/** 2680/**
@@ -2605,19 +2707,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
2605 */ 2707 */
2606static void __sched_fork(struct task_struct *p) 2708static void __sched_fork(struct task_struct *p)
2607{ 2709{
2710 p->on_rq = 0;
2711
2712 p->se.on_rq = 0;
2608 p->se.exec_start = 0; 2713 p->se.exec_start = 0;
2609 p->se.sum_exec_runtime = 0; 2714 p->se.sum_exec_runtime = 0;
2610 p->se.prev_sum_exec_runtime = 0; 2715 p->se.prev_sum_exec_runtime = 0;
2611 p->se.nr_migrations = 0; 2716 p->se.nr_migrations = 0;
2612 p->se.vruntime = 0; 2717 p->se.vruntime = 0;
2718 INIT_LIST_HEAD(&p->se.group_node);
2613 2719
2614#ifdef CONFIG_SCHEDSTATS 2720#ifdef CONFIG_SCHEDSTATS
2615 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2721 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2616#endif 2722#endif
2617 2723
2618 INIT_LIST_HEAD(&p->rt.run_list); 2724 INIT_LIST_HEAD(&p->rt.run_list);
2619 p->se.on_rq = 0;
2620 INIT_LIST_HEAD(&p->se.group_node);
2621 2725
2622#ifdef CONFIG_PREEMPT_NOTIFIERS 2726#ifdef CONFIG_PREEMPT_NOTIFIERS
2623 INIT_HLIST_HEAD(&p->preempt_notifiers); 2727 INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2629,6 +2733,7 @@ static void __sched_fork(struct task_struct *p)
2629 */ 2733 */
2630void sched_fork(struct task_struct *p, int clone_flags) 2734void sched_fork(struct task_struct *p, int clone_flags)
2631{ 2735{
2736 unsigned long flags;
2632 int cpu = get_cpu(); 2737 int cpu = get_cpu();
2633 2738
2634 __sched_fork(p); 2739 __sched_fork(p);
@@ -2679,16 +2784,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
2679 * 2784 *
2680 * Silence PROVE_RCU. 2785 * Silence PROVE_RCU.
2681 */ 2786 */
2682 rcu_read_lock(); 2787 raw_spin_lock_irqsave(&p->pi_lock, flags);
2683 set_task_cpu(p, cpu); 2788 set_task_cpu(p, cpu);
2684 rcu_read_unlock(); 2789 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2685 2790
2686#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2791#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2687 if (likely(sched_info_on())) 2792 if (likely(sched_info_on()))
2688 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2793 memset(&p->sched_info, 0, sizeof(p->sched_info));
2689#endif 2794#endif
2690#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 2795#if defined(CONFIG_SMP)
2691 p->oncpu = 0; 2796 p->on_cpu = 0;
2692#endif 2797#endif
2693#ifdef CONFIG_PREEMPT 2798#ifdef CONFIG_PREEMPT
2694 /* Want to start with kernel preemption disabled. */ 2799 /* Want to start with kernel preemption disabled. */
@@ -2712,37 +2817,27 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2712{ 2817{
2713 unsigned long flags; 2818 unsigned long flags;
2714 struct rq *rq; 2819 struct rq *rq;
2715 int cpu __maybe_unused = get_cpu();
2716 2820
2821 raw_spin_lock_irqsave(&p->pi_lock, flags);
2717#ifdef CONFIG_SMP 2822#ifdef CONFIG_SMP
2718 rq = task_rq_lock(p, &flags);
2719 p->state = TASK_WAKING;
2720
2721 /* 2823 /*
2722 * Fork balancing, do it here and not earlier because: 2824 * Fork balancing, do it here and not earlier because:
2723 * - cpus_allowed can change in the fork path 2825 * - cpus_allowed can change in the fork path
2724 * - any previously selected cpu might disappear through hotplug 2826 * - any previously selected cpu might disappear through hotplug
2725 *
2726 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2727 * without people poking at ->cpus_allowed.
2728 */ 2827 */
2729 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); 2828 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
2730 set_task_cpu(p, cpu);
2731
2732 p->state = TASK_RUNNING;
2733 task_rq_unlock(rq, &flags);
2734#endif 2829#endif
2735 2830
2736 rq = task_rq_lock(p, &flags); 2831 rq = __task_rq_lock(p);
2737 activate_task(rq, p, 0); 2832 activate_task(rq, p, 0);
2738 trace_sched_wakeup_new(p, 1); 2833 p->on_rq = 1;
2834 trace_sched_wakeup_new(p, true);
2739 check_preempt_curr(rq, p, WF_FORK); 2835 check_preempt_curr(rq, p, WF_FORK);
2740#ifdef CONFIG_SMP 2836#ifdef CONFIG_SMP
2741 if (p->sched_class->task_woken) 2837 if (p->sched_class->task_woken)
2742 p->sched_class->task_woken(rq, p); 2838 p->sched_class->task_woken(rq, p);
2743#endif 2839#endif
2744 task_rq_unlock(rq, &flags); 2840 task_rq_unlock(rq, p, &flags);
2745 put_cpu();
2746} 2841}
2747 2842
2748#ifdef CONFIG_PREEMPT_NOTIFIERS 2843#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -3451,27 +3546,22 @@ void sched_exec(void)
3451{ 3546{
3452 struct task_struct *p = current; 3547 struct task_struct *p = current;
3453 unsigned long flags; 3548 unsigned long flags;
3454 struct rq *rq;
3455 int dest_cpu; 3549 int dest_cpu;
3456 3550
3457 rq = task_rq_lock(p, &flags); 3551 raw_spin_lock_irqsave(&p->pi_lock, flags);
3458 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); 3552 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
3459 if (dest_cpu == smp_processor_id()) 3553 if (dest_cpu == smp_processor_id())
3460 goto unlock; 3554 goto unlock;
3461 3555
3462 /* 3556 if (likely(cpu_active(dest_cpu))) {
3463 * select_task_rq() can race against ->cpus_allowed
3464 */
3465 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3466 likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
3467 struct migration_arg arg = { p, dest_cpu }; 3557 struct migration_arg arg = { p, dest_cpu };
3468 3558
3469 task_rq_unlock(rq, &flags); 3559 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3470 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 3560 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
3471 return; 3561 return;
3472 } 3562 }
3473unlock: 3563unlock:
3474 task_rq_unlock(rq, &flags); 3564 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3475} 3565}
3476 3566
3477#endif 3567#endif
@@ -3508,7 +3598,7 @@ unsigned long long task_delta_exec(struct task_struct *p)
3508 3598
3509 rq = task_rq_lock(p, &flags); 3599 rq = task_rq_lock(p, &flags);
3510 ns = do_task_delta_exec(p, rq); 3600 ns = do_task_delta_exec(p, rq);
3511 task_rq_unlock(rq, &flags); 3601 task_rq_unlock(rq, p, &flags);
3512 3602
3513 return ns; 3603 return ns;
3514} 3604}
@@ -3526,7 +3616,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3526 3616
3527 rq = task_rq_lock(p, &flags); 3617 rq = task_rq_lock(p, &flags);
3528 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 3618 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3529 task_rq_unlock(rq, &flags); 3619 task_rq_unlock(rq, p, &flags);
3530 3620
3531 return ns; 3621 return ns;
3532} 3622}
@@ -3550,7 +3640,7 @@ unsigned long long thread_group_sched_runtime(struct task_struct *p)
3550 rq = task_rq_lock(p, &flags); 3640 rq = task_rq_lock(p, &flags);
3551 thread_group_cputime(p, &totals); 3641 thread_group_cputime(p, &totals);
3552 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); 3642 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3553 task_rq_unlock(rq, &flags); 3643 task_rq_unlock(rq, p, &flags);
3554 3644
3555 return ns; 3645 return ns;
3556} 3646}
@@ -4036,7 +4126,7 @@ static inline void schedule_debug(struct task_struct *prev)
4036 4126
4037static void put_prev_task(struct rq *rq, struct task_struct *prev) 4127static void put_prev_task(struct rq *rq, struct task_struct *prev)
4038{ 4128{
4039 if (prev->se.on_rq) 4129 if (prev->on_rq)
4040 update_rq_clock(rq); 4130 update_rq_clock(rq);
4041 prev->sched_class->put_prev_task(rq, prev); 4131 prev->sched_class->put_prev_task(rq, prev);
4042} 4132}
@@ -4098,11 +4188,13 @@ need_resched:
4098 if (unlikely(signal_pending_state(prev->state, prev))) { 4188 if (unlikely(signal_pending_state(prev->state, prev))) {
4099 prev->state = TASK_RUNNING; 4189 prev->state = TASK_RUNNING;
4100 } else { 4190 } else {
4191 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4192 prev->on_rq = 0;
4193
4101 /* 4194 /*
4102 * If a worker is going to sleep, notify and 4195 * If a worker went to sleep, notify and ask workqueue
4103 * ask workqueue whether it wants to wake up a 4196 * whether it wants to wake up a task to maintain
4104 * task to maintain concurrency. If so, wake 4197 * concurrency.
4105 * up the task.
4106 */ 4198 */
4107 if (prev->flags & PF_WQ_WORKER) { 4199 if (prev->flags & PF_WQ_WORKER) {
4108 struct task_struct *to_wakeup; 4200 struct task_struct *to_wakeup;
@@ -4111,21 +4203,20 @@ need_resched:
4111 if (to_wakeup) 4203 if (to_wakeup)
4112 try_to_wake_up_local(to_wakeup); 4204 try_to_wake_up_local(to_wakeup);
4113 } 4205 }
4114 deactivate_task(rq, prev, DEQUEUE_SLEEP); 4206
4207 /*
4208 * If we are going to sleep and we have plugged IO
4209 * queued, make sure to submit it to avoid deadlocks.
4210 */
4211 if (blk_needs_flush_plug(prev)) {
4212 raw_spin_unlock(&rq->lock);
4213 blk_flush_plug(prev);
4214 raw_spin_lock(&rq->lock);
4215 }
4115 } 4216 }
4116 switch_count = &prev->nvcsw; 4217 switch_count = &prev->nvcsw;
4117 } 4218 }
4118 4219
4119 /*
4120 * If we are going to sleep and we have plugged IO queued, make
4121 * sure to submit it to avoid deadlocks.
4122 */
4123 if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
4124 raw_spin_unlock(&rq->lock);
4125 blk_flush_plug(prev);
4126 raw_spin_lock(&rq->lock);
4127 }
4128
4129 pre_schedule(rq, prev); 4220 pre_schedule(rq, prev);
4130 4221
4131 if (unlikely(!rq->nr_running)) 4222 if (unlikely(!rq->nr_running))
@@ -4162,70 +4253,53 @@ need_resched:
4162EXPORT_SYMBOL(schedule); 4253EXPORT_SYMBOL(schedule);
4163 4254
4164#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4255#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4165/*
4166 * Look out! "owner" is an entirely speculative pointer
4167 * access and not reliable.
4168 */
4169int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4170{
4171 unsigned int cpu;
4172 struct rq *rq;
4173 4256
4174 if (!sched_feat(OWNER_SPIN)) 4257static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4175 return 0; 4258{
4259 bool ret = false;
4176 4260
4177#ifdef CONFIG_DEBUG_PAGEALLOC 4261 rcu_read_lock();
4178 /* 4262 if (lock->owner != owner)
4179 * Need to access the cpu field knowing that 4263 goto fail;
4180 * DEBUG_PAGEALLOC could have unmapped it if
4181 * the mutex owner just released it and exited.
4182 */
4183 if (probe_kernel_address(&owner->cpu, cpu))
4184 return 0;
4185#else
4186 cpu = owner->cpu;
4187#endif
4188 4264
4189 /* 4265 /*
4190 * Even if the access succeeded (likely case), 4266 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4191 * the cpu field may no longer be valid. 4267 * lock->owner still matches owner, if that fails, owner might
4268 * point to free()d memory, if it still matches, the rcu_read_lock()
4269 * ensures the memory stays valid.
4192 */ 4270 */
4193 if (cpu >= nr_cpumask_bits) 4271 barrier();
4194 return 0;
4195 4272
4196 /* 4273 ret = owner->on_cpu;
4197 * We need to validate that we can do a 4274fail:
4198 * get_cpu() and that we have the percpu area. 4275 rcu_read_unlock();
4199 */
4200 if (!cpu_online(cpu))
4201 return 0;
4202 4276
4203 rq = cpu_rq(cpu); 4277 return ret;
4278}
4204 4279
4205 for (;;) { 4280/*
4206 /* 4281 * Look out! "owner" is an entirely speculative pointer
4207 * Owner changed, break to re-assess state. 4282 * access and not reliable.
4208 */ 4283 */
4209 if (lock->owner != owner) { 4284int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
4210 /* 4285{
4211 * If the lock has switched to a different owner, 4286 if (!sched_feat(OWNER_SPIN))
4212 * we likely have heavy contention. Return 0 to quit 4287 return 0;
4213 * optimistic spinning and not contend further:
4214 */
4215 if (lock->owner)
4216 return 0;
4217 break;
4218 }
4219 4288
4220 /* 4289 while (owner_running(lock, owner)) {
4221 * Is that owner really running on that cpu? 4290 if (need_resched())
4222 */
4223 if (task_thread_info(rq->curr) != owner || need_resched())
4224 return 0; 4291 return 0;
4225 4292
4226 arch_mutex_cpu_relax(); 4293 arch_mutex_cpu_relax();
4227 } 4294 }
4228 4295
4296 /*
4297 * If the owner changed to another task there is likely
4298 * heavy contention, stop spinning.
4299 */
4300 if (lock->owner)
4301 return 0;
4302
4229 return 1; 4303 return 1;
4230} 4304}
4231#endif 4305#endif
@@ -4685,19 +4759,18 @@ EXPORT_SYMBOL(sleep_on_timeout);
4685 */ 4759 */
4686void rt_mutex_setprio(struct task_struct *p, int prio) 4760void rt_mutex_setprio(struct task_struct *p, int prio)
4687{ 4761{
4688 unsigned long flags;
4689 int oldprio, on_rq, running; 4762 int oldprio, on_rq, running;
4690 struct rq *rq; 4763 struct rq *rq;
4691 const struct sched_class *prev_class; 4764 const struct sched_class *prev_class;
4692 4765
4693 BUG_ON(prio < 0 || prio > MAX_PRIO); 4766 BUG_ON(prio < 0 || prio > MAX_PRIO);
4694 4767
4695 rq = task_rq_lock(p, &flags); 4768 rq = __task_rq_lock(p);
4696 4769
4697 trace_sched_pi_setprio(p, prio); 4770 trace_sched_pi_setprio(p, prio);
4698 oldprio = p->prio; 4771 oldprio = p->prio;
4699 prev_class = p->sched_class; 4772 prev_class = p->sched_class;
4700 on_rq = p->se.on_rq; 4773 on_rq = p->on_rq;
4701 running = task_current(rq, p); 4774 running = task_current(rq, p);
4702 if (on_rq) 4775 if (on_rq)
4703 dequeue_task(rq, p, 0); 4776 dequeue_task(rq, p, 0);
@@ -4717,7 +4790,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4717 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 4790 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4718 4791
4719 check_class_changed(rq, p, prev_class, oldprio); 4792 check_class_changed(rq, p, prev_class, oldprio);
4720 task_rq_unlock(rq, &flags); 4793 __task_rq_unlock(rq);
4721} 4794}
4722 4795
4723#endif 4796#endif
@@ -4745,7 +4818,7 @@ void set_user_nice(struct task_struct *p, long nice)
4745 p->static_prio = NICE_TO_PRIO(nice); 4818 p->static_prio = NICE_TO_PRIO(nice);
4746 goto out_unlock; 4819 goto out_unlock;
4747 } 4820 }
4748 on_rq = p->se.on_rq; 4821 on_rq = p->on_rq;
4749 if (on_rq) 4822 if (on_rq)
4750 dequeue_task(rq, p, 0); 4823 dequeue_task(rq, p, 0);
4751 4824
@@ -4765,7 +4838,7 @@ void set_user_nice(struct task_struct *p, long nice)
4765 resched_task(rq->curr); 4838 resched_task(rq->curr);
4766 } 4839 }
4767out_unlock: 4840out_unlock:
4768 task_rq_unlock(rq, &flags); 4841 task_rq_unlock(rq, p, &flags);
4769} 4842}
4770EXPORT_SYMBOL(set_user_nice); 4843EXPORT_SYMBOL(set_user_nice);
4771 4844
@@ -4879,8 +4952,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
4879static void 4952static void
4880__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) 4953__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4881{ 4954{
4882 BUG_ON(p->se.on_rq);
4883
4884 p->policy = policy; 4955 p->policy = policy;
4885 p->rt_priority = prio; 4956 p->rt_priority = prio;
4886 p->normal_prio = normal_prio(p); 4957 p->normal_prio = normal_prio(p);
@@ -4995,20 +5066,17 @@ recheck:
4995 /* 5066 /*
4996 * make sure no PI-waiters arrive (or leave) while we are 5067 * make sure no PI-waiters arrive (or leave) while we are
4997 * changing the priority of the task: 5068 * changing the priority of the task:
4998 */ 5069 *
4999 raw_spin_lock_irqsave(&p->pi_lock, flags);
5000 /*
5001 * To be able to change p->policy safely, the appropriate 5070 * To be able to change p->policy safely, the appropriate
5002 * runqueue lock must be held. 5071 * runqueue lock must be held.
5003 */ 5072 */
5004 rq = __task_rq_lock(p); 5073 rq = task_rq_lock(p, &flags);
5005 5074
5006 /* 5075 /*
5007 * Changing the policy of the stop threads its a very bad idea 5076 * Changing the policy of the stop threads its a very bad idea
5008 */ 5077 */
5009 if (p == rq->stop) { 5078 if (p == rq->stop) {
5010 __task_rq_unlock(rq); 5079 task_rq_unlock(rq, p, &flags);
5011 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5012 return -EINVAL; 5080 return -EINVAL;
5013 } 5081 }
5014 5082
@@ -5032,8 +5100,7 @@ recheck:
5032 if (rt_bandwidth_enabled() && rt_policy(policy) && 5100 if (rt_bandwidth_enabled() && rt_policy(policy) &&
5033 task_group(p)->rt_bandwidth.rt_runtime == 0 && 5101 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5034 !task_group_is_autogroup(task_group(p))) { 5102 !task_group_is_autogroup(task_group(p))) {
5035 __task_rq_unlock(rq); 5103 task_rq_unlock(rq, p, &flags);
5036 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5037 return -EPERM; 5104 return -EPERM;
5038 } 5105 }
5039 } 5106 }
@@ -5042,11 +5109,10 @@ recheck:
5042 /* recheck policy now with rq lock held */ 5109 /* recheck policy now with rq lock held */
5043 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 5110 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5044 policy = oldpolicy = -1; 5111 policy = oldpolicy = -1;
5045 __task_rq_unlock(rq); 5112 task_rq_unlock(rq, p, &flags);
5046 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5047 goto recheck; 5113 goto recheck;
5048 } 5114 }
5049 on_rq = p->se.on_rq; 5115 on_rq = p->on_rq;
5050 running = task_current(rq, p); 5116 running = task_current(rq, p);
5051 if (on_rq) 5117 if (on_rq)
5052 deactivate_task(rq, p, 0); 5118 deactivate_task(rq, p, 0);
@@ -5065,8 +5131,7 @@ recheck:
5065 activate_task(rq, p, 0); 5131 activate_task(rq, p, 0);
5066 5132
5067 check_class_changed(rq, p, prev_class, oldprio); 5133 check_class_changed(rq, p, prev_class, oldprio);
5068 __task_rq_unlock(rq); 5134 task_rq_unlock(rq, p, &flags);
5069 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5070 5135
5071 rt_mutex_adjust_pi(p); 5136 rt_mutex_adjust_pi(p);
5072 5137
@@ -5317,7 +5382,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
5317{ 5382{
5318 struct task_struct *p; 5383 struct task_struct *p;
5319 unsigned long flags; 5384 unsigned long flags;
5320 struct rq *rq;
5321 int retval; 5385 int retval;
5322 5386
5323 get_online_cpus(); 5387 get_online_cpus();
@@ -5332,9 +5396,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
5332 if (retval) 5396 if (retval)
5333 goto out_unlock; 5397 goto out_unlock;
5334 5398
5335 rq = task_rq_lock(p, &flags); 5399 raw_spin_lock_irqsave(&p->pi_lock, flags);
5336 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); 5400 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5337 task_rq_unlock(rq, &flags); 5401 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5338 5402
5339out_unlock: 5403out_unlock:
5340 rcu_read_unlock(); 5404 rcu_read_unlock();
@@ -5659,7 +5723,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5659 5723
5660 rq = task_rq_lock(p, &flags); 5724 rq = task_rq_lock(p, &flags);
5661 time_slice = p->sched_class->get_rr_interval(rq, p); 5725 time_slice = p->sched_class->get_rr_interval(rq, p);
5662 task_rq_unlock(rq, &flags); 5726 task_rq_unlock(rq, p, &flags);
5663 5727
5664 rcu_read_unlock(); 5728 rcu_read_unlock();
5665 jiffies_to_timespec(time_slice, &t); 5729 jiffies_to_timespec(time_slice, &t);
@@ -5777,8 +5841,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5777 rcu_read_unlock(); 5841 rcu_read_unlock();
5778 5842
5779 rq->curr = rq->idle = idle; 5843 rq->curr = rq->idle = idle;
5780#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5844#if defined(CONFIG_SMP)
5781 idle->oncpu = 1; 5845 idle->on_cpu = 1;
5782#endif 5846#endif
5783 raw_spin_unlock_irqrestore(&rq->lock, flags); 5847 raw_spin_unlock_irqrestore(&rq->lock, flags);
5784 5848
@@ -5882,18 +5946,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5882 unsigned int dest_cpu; 5946 unsigned int dest_cpu;
5883 int ret = 0; 5947 int ret = 0;
5884 5948
5885 /*
5886 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5887 * drop the rq->lock and still rely on ->cpus_allowed.
5888 */
5889again:
5890 while (task_is_waking(p))
5891 cpu_relax();
5892 rq = task_rq_lock(p, &flags); 5949 rq = task_rq_lock(p, &flags);
5893 if (task_is_waking(p)) {
5894 task_rq_unlock(rq, &flags);
5895 goto again;
5896 }
5897 5950
5898 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 5951 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5899 ret = -EINVAL; 5952 ret = -EINVAL;
@@ -5918,16 +5971,16 @@ again:
5918 goto out; 5971 goto out;
5919 5972
5920 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 5973 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5921 if (migrate_task(p, rq)) { 5974 if (p->on_rq) {
5922 struct migration_arg arg = { p, dest_cpu }; 5975 struct migration_arg arg = { p, dest_cpu };
5923 /* Need help from migration thread: drop lock and wait. */ 5976 /* Need help from migration thread: drop lock and wait. */
5924 task_rq_unlock(rq, &flags); 5977 task_rq_unlock(rq, p, &flags);
5925 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 5978 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5926 tlb_migrate_finish(p->mm); 5979 tlb_migrate_finish(p->mm);
5927 return 0; 5980 return 0;
5928 } 5981 }
5929out: 5982out:
5930 task_rq_unlock(rq, &flags); 5983 task_rq_unlock(rq, p, &flags);
5931 5984
5932 return ret; 5985 return ret;
5933} 5986}
@@ -5955,6 +6008,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5955 rq_src = cpu_rq(src_cpu); 6008 rq_src = cpu_rq(src_cpu);
5956 rq_dest = cpu_rq(dest_cpu); 6009 rq_dest = cpu_rq(dest_cpu);
5957 6010
6011 raw_spin_lock(&p->pi_lock);
5958 double_rq_lock(rq_src, rq_dest); 6012 double_rq_lock(rq_src, rq_dest);
5959 /* Already moved. */ 6013 /* Already moved. */
5960 if (task_cpu(p) != src_cpu) 6014 if (task_cpu(p) != src_cpu)
@@ -5967,7 +6021,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5967 * If we're not on a rq, the next wake-up will ensure we're 6021 * If we're not on a rq, the next wake-up will ensure we're
5968 * placed properly. 6022 * placed properly.
5969 */ 6023 */
5970 if (p->se.on_rq) { 6024 if (p->on_rq) {
5971 deactivate_task(rq_src, p, 0); 6025 deactivate_task(rq_src, p, 0);
5972 set_task_cpu(p, dest_cpu); 6026 set_task_cpu(p, dest_cpu);
5973 activate_task(rq_dest, p, 0); 6027 activate_task(rq_dest, p, 0);
@@ -5977,6 +6031,7 @@ done:
5977 ret = 1; 6031 ret = 1;
5978fail: 6032fail:
5979 double_rq_unlock(rq_src, rq_dest); 6033 double_rq_unlock(rq_src, rq_dest);
6034 raw_spin_unlock(&p->pi_lock);
5980 return ret; 6035 return ret;
5981} 6036}
5982 6037
@@ -6317,6 +6372,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6317 6372
6318#ifdef CONFIG_HOTPLUG_CPU 6373#ifdef CONFIG_HOTPLUG_CPU
6319 case CPU_DYING: 6374 case CPU_DYING:
6375 sched_ttwu_pending();
6320 /* Update our root-domain */ 6376 /* Update our root-domain */
6321 raw_spin_lock_irqsave(&rq->lock, flags); 6377 raw_spin_lock_irqsave(&rq->lock, flags);
6322 if (rq->rd) { 6378 if (rq->rd) {
@@ -7961,7 +8017,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7961 int old_prio = p->prio; 8017 int old_prio = p->prio;
7962 int on_rq; 8018 int on_rq;
7963 8019
7964 on_rq = p->se.on_rq; 8020 on_rq = p->on_rq;
7965 if (on_rq) 8021 if (on_rq)
7966 deactivate_task(rq, p, 0); 8022 deactivate_task(rq, p, 0);
7967 __setscheduler(rq, p, SCHED_NORMAL, 0); 8023 __setscheduler(rq, p, SCHED_NORMAL, 0);
@@ -8304,7 +8360,7 @@ void sched_move_task(struct task_struct *tsk)
8304 rq = task_rq_lock(tsk, &flags); 8360 rq = task_rq_lock(tsk, &flags);
8305 8361
8306 running = task_current(rq, tsk); 8362 running = task_current(rq, tsk);
8307 on_rq = tsk->se.on_rq; 8363 on_rq = tsk->on_rq;
8308 8364
8309 if (on_rq) 8365 if (on_rq)
8310 dequeue_task(rq, tsk, 0); 8366 dequeue_task(rq, tsk, 0);
@@ -8323,7 +8379,7 @@ void sched_move_task(struct task_struct *tsk)
8323 if (on_rq) 8379 if (on_rq)
8324 enqueue_task(rq, tsk, 0); 8380 enqueue_task(rq, tsk, 0);
8325 8381
8326 task_rq_unlock(rq, &flags); 8382 task_rq_unlock(rq, tsk, &flags);
8327} 8383}
8328#endif /* CONFIG_CGROUP_SCHED */ 8384#endif /* CONFIG_CGROUP_SCHED */
8329 8385
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 7bacd83a4158..3669bec6e130 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
152 read_lock_irqsave(&tasklist_lock, flags); 152 read_lock_irqsave(&tasklist_lock, flags);
153 153
154 do_each_thread(g, p) { 154 do_each_thread(g, p) {
155 if (!p->se.on_rq || task_cpu(p) != rq_cpu) 155 if (!p->on_rq || task_cpu(p) != rq_cpu)
156 continue; 156 continue;
157 157
158 print_task(m, rq, p); 158 print_task(m, rq, p);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9c5679cfe3b0..87445931a179 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
358 } 358 }
359 359
360 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 360 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
361#ifndef CONFIG_64BIT
362 smp_wmb();
363 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
364#endif
361} 365}
362 366
363/* 367/*
@@ -1372,12 +1376,25 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1372 1376
1373#ifdef CONFIG_SMP 1377#ifdef CONFIG_SMP
1374 1378
1375static void task_waking_fair(struct rq *rq, struct task_struct *p) 1379static void task_waking_fair(struct task_struct *p)
1376{ 1380{
1377 struct sched_entity *se = &p->se; 1381 struct sched_entity *se = &p->se;
1378 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1382 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1383 u64 min_vruntime;
1379 1384
1380 se->vruntime -= cfs_rq->min_vruntime; 1385#ifndef CONFIG_64BIT
1386 u64 min_vruntime_copy;
1387
1388 do {
1389 min_vruntime_copy = cfs_rq->min_vruntime_copy;
1390 smp_rmb();
1391 min_vruntime = cfs_rq->min_vruntime;
1392 } while (min_vruntime != min_vruntime_copy);
1393#else
1394 min_vruntime = cfs_rq->min_vruntime;
1395#endif
1396
1397 se->vruntime -= min_vruntime;
1381} 1398}
1382 1399
1383#ifdef CONFIG_FAIR_GROUP_SCHED 1400#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1659,7 +1676,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
1659 * preempt must be disabled. 1676 * preempt must be disabled.
1660 */ 1677 */
1661static int 1678static int
1662select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) 1679select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1663{ 1680{
1664 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 1681 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1665 int cpu = smp_processor_id(); 1682 int cpu = smp_processor_id();
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 68e69acc29b9..be40f7371ee1 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1)
64 * Decrement CPU power based on irq activity 64 * Decrement CPU power based on irq activity
65 */ 65 */
66SCHED_FEAT(NONIRQ_POWER, 1) 66SCHED_FEAT(NONIRQ_POWER, 1)
67
68/*
69 * Queue remote wakeups on the target CPU and process them
70 * using the scheduler IPI. Reduces rq->lock contention/bounces.
71 */
72SCHED_FEAT(TTWU_QUEUE, 1)
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a776a6396427..0a51882534ea 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -7,7 +7,7 @@
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9static int 9static int
10select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) 10select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
11{ 11{
12 return task_cpu(p); /* IDLE tasks as never migrated */ 12 return task_cpu(p); /* IDLE tasks as never migrated */
13} 13}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index e7cebdc65f82..19ecb3127379 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
977static int find_lowest_rq(struct task_struct *task); 977static int find_lowest_rq(struct task_struct *task);
978 978
979static int 979static int
980select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) 980select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
981{ 981{
982 struct task_struct *curr;
983 struct rq *rq;
984 int cpu;
985
982 if (sd_flag != SD_BALANCE_WAKE) 986 if (sd_flag != SD_BALANCE_WAKE)
983 return smp_processor_id(); 987 return smp_processor_id();
984 988
989 cpu = task_cpu(p);
990 rq = cpu_rq(cpu);
991
992 rcu_read_lock();
993 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
994
985 /* 995 /*
986 * If the current task is an RT task, then 996 * If the current task on @p's runqueue is an RT task, then
987 * try to see if we can wake this RT task up on another 997 * try to see if we can wake this RT task up on another
988 * runqueue. Otherwise simply start this RT task 998 * runqueue. Otherwise simply start this RT task
989 * on its current runqueue. 999 * on its current runqueue.
@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
997 * lock? 1007 * lock?
998 * 1008 *
999 * For equal prio tasks, we just let the scheduler sort it out. 1009 * For equal prio tasks, we just let the scheduler sort it out.
1010 *
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 *
1014 * This test is optimistic, if we get it wrong the load-balancer
1015 * will have to sort it out.
1000 */ 1016 */
1001 if (unlikely(rt_task(rq->curr)) && 1017 if (curr && unlikely(rt_task(curr)) &&
1002 (rq->curr->rt.nr_cpus_allowed < 2 || 1018 (curr->rt.nr_cpus_allowed < 2 ||
1003 rq->curr->prio < p->prio) && 1019 curr->prio < p->prio) &&
1004 (p->rt.nr_cpus_allowed > 1)) { 1020 (p->rt.nr_cpus_allowed > 1)) {
1005 int cpu = find_lowest_rq(p); 1021 int target = find_lowest_rq(p);
1006 1022
1007 return (cpu == -1) ? task_cpu(p) : cpu; 1023 if (target != -1)
1024 cpu = target;
1008 } 1025 }
1026 rcu_read_unlock();
1009 1027
1010 /* 1028 return cpu;
1011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
1013 */
1014 return task_cpu(p);
1015} 1029}
1016 1030
1017static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1031static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
@@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1136 * The previous task needs to be made eligible for pushing 1150 * The previous task needs to be made eligible for pushing
1137 * if it is still active 1151 * if it is still active
1138 */ 1152 */
1139 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) 1153 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p); 1154 enqueue_pushable_task(rq, p);
1141} 1155}
1142 1156
@@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1287 !cpumask_test_cpu(lowest_rq->cpu, 1301 !cpumask_test_cpu(lowest_rq->cpu,
1288 &task->cpus_allowed) || 1302 &task->cpus_allowed) ||
1289 task_running(rq, task) || 1303 task_running(rq, task) ||
1290 !task->se.on_rq)) { 1304 !task->on_rq)) {
1291 1305
1292 raw_spin_unlock(&lowest_rq->lock); 1306 raw_spin_unlock(&lowest_rq->lock);
1293 lowest_rq = NULL; 1307 lowest_rq = NULL;
@@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1321 BUG_ON(task_current(rq, p)); 1335 BUG_ON(task_current(rq, p));
1322 BUG_ON(p->rt.nr_cpus_allowed <= 1); 1336 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1323 1337
1324 BUG_ON(!p->se.on_rq); 1338 BUG_ON(!p->on_rq);
1325 BUG_ON(!rt_task(p)); 1339 BUG_ON(!rt_task(p));
1326 1340
1327 return p; 1341 return p;
@@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq)
1467 */ 1481 */
1468 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1482 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1469 WARN_ON(p == src_rq->curr); 1483 WARN_ON(p == src_rq->curr);
1470 WARN_ON(!p->se.on_rq); 1484 WARN_ON(!p->on_rq);
1471 1485
1472 /* 1486 /*
1473 * There's a chance that p is higher in priority 1487 * There's a chance that p is higher in priority
@@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1538 * Update the migration status of the RQ if we have an RT task 1552 * Update the migration status of the RQ if we have an RT task
1539 * which is running AND changing its weight value. 1553 * which is running AND changing its weight value.
1540 */ 1554 */
1541 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1555 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1542 struct rq *rq = task_rq(p); 1556 struct rq *rq = task_rq(p);
1543 1557
1544 if (!task_current(rq, p)) { 1558 if (!task_current(rq, p)) {
@@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
1608 * we may need to handle the pulling of RT tasks 1622 * we may need to handle the pulling of RT tasks
1609 * now. 1623 * now.
1610 */ 1624 */
1611 if (p->se.on_rq && !rq->rt.rt_nr_running) 1625 if (p->on_rq && !rq->rt.rt_nr_running)
1612 pull_rt_task(rq); 1626 pull_rt_task(rq);
1613} 1627}
1614 1628
@@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1638 * If that current running task is also an RT task 1652 * If that current running task is also an RT task
1639 * then see if we can move to another run queue. 1653 * then see if we can move to another run queue.
1640 */ 1654 */
1641 if (p->se.on_rq && rq->curr != p) { 1655 if (p->on_rq && rq->curr != p) {
1642#ifdef CONFIG_SMP 1656#ifdef CONFIG_SMP
1643 if (rq->rt.overloaded && push_rt_task(rq) && 1657 if (rq->rt.overloaded && push_rt_task(rq) &&
1644 /* Don't resched if we changed runqueues */ 1658 /* Don't resched if we changed runqueues */
@@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
1657static void 1671static void
1658prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1672prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1659{ 1673{
1660 if (!p->se.on_rq) 1674 if (!p->on_rq)
1661 return; 1675 return;
1662 1676
1663 if (rq->curr == p) { 1677 if (rq->curr == p) {
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 1ba2bd40fdac..6f437632afab 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -9,8 +9,7 @@
9 9
10#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
11static int 11static int
12select_task_rq_stop(struct rq *rq, struct task_struct *p, 12select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
13 int sd_flag, int flags)
14{ 13{
15 return task_cpu(p); /* stop tasks as never migrate */ 14 return task_cpu(p); /* stop tasks as never migrate */
16} 15}
@@ -26,7 +25,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
26{ 25{
27 struct task_struct *stop = rq->stop; 26 struct task_struct *stop = rq->stop;
28 27
29 if (stop && stop->se.on_rq) 28 if (stop && stop->on_rq)
30 return stop; 29 return stop;
31 30
32 return NULL; 31 return NULL;
diff --git a/mm/memory.c b/mm/memory.c
index 9da8cab1b1b0..b623a249918c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1410,6 +1410,13 @@ no_page_table:
1410 return page; 1410 return page;
1411} 1411}
1412 1412
1413static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
1414{
1415 return (vma->vm_flags & VM_GROWSDOWN) &&
1416 (vma->vm_start == addr) &&
1417 !vma_stack_continue(vma->vm_prev, addr);
1418}
1419
1413/** 1420/**
1414 * __get_user_pages() - pin user pages in memory 1421 * __get_user_pages() - pin user pages in memory
1415 * @tsk: task_struct of target task 1422 * @tsk: task_struct of target task
@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1488 vma = find_extend_vma(mm, start); 1495 vma = find_extend_vma(mm, start);
1489 if (!vma && in_gate_area(mm, start)) { 1496 if (!vma && in_gate_area(mm, start)) {
1490 unsigned long pg = start & PAGE_MASK; 1497 unsigned long pg = start & PAGE_MASK;
1491 struct vm_area_struct *gate_vma = get_gate_vma(mm);
1492 pgd_t *pgd; 1498 pgd_t *pgd;
1493 pud_t *pud; 1499 pud_t *pud;
1494 pmd_t *pmd; 1500 pmd_t *pmd;
@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1513 pte_unmap(pte); 1519 pte_unmap(pte);
1514 return i ? : -EFAULT; 1520 return i ? : -EFAULT;
1515 } 1521 }
1522 vma = get_gate_vma(mm);
1516 if (pages) { 1523 if (pages) {
1517 struct page *page; 1524 struct page *page;
1518 1525
1519 page = vm_normal_page(gate_vma, start, *pte); 1526 page = vm_normal_page(vma, start, *pte);
1520 if (!page) { 1527 if (!page) {
1521 if (!(gup_flags & FOLL_DUMP) && 1528 if (!(gup_flags & FOLL_DUMP) &&
1522 is_zero_pfn(pte_pfn(*pte))) 1529 is_zero_pfn(pte_pfn(*pte)))
@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1530 get_page(page); 1537 get_page(page);
1531 } 1538 }
1532 pte_unmap(pte); 1539 pte_unmap(pte);
1533 if (vmas) 1540 goto next_page;
1534 vmas[i] = gate_vma;
1535 i++;
1536 start += PAGE_SIZE;
1537 nr_pages--;
1538 continue;
1539 } 1541 }
1540 1542
1541 if (!vma || 1543 if (!vma ||
@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1549 continue; 1551 continue;
1550 } 1552 }
1551 1553
1554 /*
1555 * If we don't actually want the page itself,
1556 * and it's the stack guard page, just skip it.
1557 */
1558 if (!pages && stack_guard_page(vma, start))
1559 goto next_page;
1560
1552 do { 1561 do {
1553 struct page *page; 1562 struct page *page;
1554 unsigned int foll_flags = gup_flags; 1563 unsigned int foll_flags = gup_flags;
@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1631 flush_anon_page(vma, page, start); 1640 flush_anon_page(vma, page, start);
1632 flush_dcache_page(page); 1641 flush_dcache_page(page);
1633 } 1642 }
1643next_page:
1634 if (vmas) 1644 if (vmas)
1635 vmas[i] = vma; 1645 vmas[i] = vma;
1636 i++; 1646 i++;
diff --git a/mm/mlock.c b/mm/mlock.c
index 2689a08c79af..6b55e3efe0df 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
135 } 135 }
136} 136}
137 137
138static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
139{
140 return (vma->vm_flags & VM_GROWSDOWN) &&
141 (vma->vm_start == addr) &&
142 !vma_stack_continue(vma->vm_prev, addr);
143}
144
145/** 138/**
146 * __mlock_vma_pages_range() - mlock a range of pages in the vma. 139 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
147 * @vma: target vma 140 * @vma: target vma
@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
188 if (vma->vm_flags & VM_LOCKED) 181 if (vma->vm_flags & VM_LOCKED)
189 gup_flags |= FOLL_MLOCK; 182 gup_flags |= FOLL_MLOCK;
190 183
191 /* We don't try to access the guard page of a stack vma */
192 if (stack_guard_page(vma, start)) {
193 addr += PAGE_SIZE;
194 nr_pages--;
195 }
196
197 return __get_user_pages(current, mm, addr, nr_pages, gup_flags, 184 return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
198 NULL, NULL, nonblocking); 185 NULL, NULL, nonblocking);
199} 186}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2ec8eb5a9cdd..8c05e5b43b69 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma,
1814 size = vma->vm_end - address; 1814 size = vma->vm_end - address;
1815 grow = (vma->vm_start - address) >> PAGE_SHIFT; 1815 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1816 1816
1817 error = acct_stack_growth(vma, size, grow); 1817 error = -ENOMEM;
1818 if (!error) { 1818 if (grow <= vma->vm_pgoff) {
1819 vma->vm_start = address; 1819 error = acct_stack_growth(vma, size, grow);
1820 vma->vm_pgoff -= grow; 1820 if (!error) {
1821 perf_event_mmap(vma); 1821 vma->vm_start = address;
1822 vma->vm_pgoff -= grow;
1823 perf_event_mmap(vma);
1824 }
1822 } 1825 }
1823 } 1826 }
1824 vma_unlock_anon_vma(vma); 1827 vma_unlock_anon_vma(vma);
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index d951f93644bf..3da418894efc 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -14,6 +14,13 @@
14#include "dsa_priv.h" 14#include "dsa_priv.h"
15#include "mv88e6xxx.h" 15#include "mv88e6xxx.h"
16 16
17/*
18 * Switch product IDs
19 */
20#define ID_6085 0x04a0
21#define ID_6095 0x0950
22#define ID_6131 0x1060
23
17static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) 24static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
18{ 25{
19 int ret; 26 int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
21 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
22 if (ret >= 0) { 29 if (ret >= 0) {
23 ret &= 0xfff0; 30 ret &= 0xfff0;
24 if (ret == 0x0950) 31 if (ret == ID_6085)
32 return "Marvell 88E6085";
33 if (ret == ID_6095)
25 return "Marvell 88E6095/88E6095F"; 34 return "Marvell 88E6095/88E6095F";
26 if (ret == 0x1060) 35 if (ret == ID_6131)
27 return "Marvell 88E6131"; 36 return "Marvell 88E6131";
28 } 37 }
29 38
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
164 173
165static int mv88e6131_setup_port(struct dsa_switch *ds, int p) 174static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
166{ 175{
176 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
167 int addr = REG_PORT(p); 177 int addr = REG_PORT(p);
168 u16 val; 178 u16 val;
169 179
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
171 * MAC Forcing register: don't force link, speed, duplex 181 * MAC Forcing register: don't force link, speed, duplex
172 * or flow control state to any particular values on physical 182 * or flow control state to any particular values on physical
173 * ports, but force the CPU port and all DSA ports to 1000 Mb/s 183 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
174 * full duplex. 184 * (100 Mb/s on 6085) full duplex.
175 */ 185 */
176 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) 186 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
177 REG_WRITE(addr, 0x01, 0x003e); 187 if (ps->id == ID_6085)
188 REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
189 else
190 REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
178 else 191 else
179 REG_WRITE(addr, 0x01, 0x0003); 192 REG_WRITE(addr, 0x01, 0x0003);
180 193
@@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
286 mv88e6xxx_ppu_state_init(ds); 299 mv88e6xxx_ppu_state_init(ds);
287 mutex_init(&ps->stats_mutex); 300 mutex_init(&ps->stats_mutex);
288 301
302 ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
303
289 ret = mv88e6131_switch_reset(ds); 304 ret = mv88e6131_switch_reset(ds);
290 if (ret < 0) 305 if (ret < 0)
291 return ret; 306 return ret;
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h
index eb0e0aaa9f1b..61156ca26a0d 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/net/dsa/mv88e6xxx.h
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
39 * Hold this mutex over snapshot + dump sequences. 39 * Hold this mutex over snapshot + dump sequences.
40 */ 40 */
41 struct mutex stats_mutex; 41 struct mutex stats_mutex;
42
43 int id; /* switch product id */
42}; 44};
43 45
44struct mv88e6xxx_hw_stat { 46struct mv88e6xxx_hw_stat {
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f3c0b549b8e1..4614babdc45f 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
221 return csum; 221 return csum;
222} 222}
223 223
224static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) 224static int nf_ip_route(struct net *net, struct dst_entry **dst,
225 struct flowi *fl, bool strict __always_unused)
225{ 226{
226 struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); 227 struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
227 if (IS_ERR(rt)) 228 if (IS_ERR(rt))
228 return PTR_ERR(rt); 229 return PTR_ERR(rt);
229 *dst = &rt->dst; 230 *dst = &rt->dst;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ea107515c53e..c1acf69858fd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1891#ifdef CONFIG_IP_ROUTE_CLASSID 1891#ifdef CONFIG_IP_ROUTE_CLASSID
1892 rth->dst.tclassid = itag; 1892 rth->dst.tclassid = itag;
1893#endif 1893#endif
1894 rth->rt_route_iif = dev->ifindex;
1894 rth->rt_iif = dev->ifindex; 1895 rth->rt_iif = dev->ifindex;
1895 rth->dst.dev = init_net.loopback_dev; 1896 rth->dst.dev = init_net.loopback_dev;
1896 dev_hold(rth->dst.dev); 1897 dev_hold(rth->dst.dev);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
2026 rth->rt_key_src = saddr; 2027 rth->rt_key_src = saddr;
2027 rth->rt_src = saddr; 2028 rth->rt_src = saddr;
2028 rth->rt_gateway = daddr; 2029 rth->rt_gateway = daddr;
2030 rth->rt_route_iif = in_dev->dev->ifindex;
2029 rth->rt_iif = in_dev->dev->ifindex; 2031 rth->rt_iif = in_dev->dev->ifindex;
2030 rth->dst.dev = (out_dev)->dev; 2032 rth->dst.dev = (out_dev)->dev;
2031 dev_hold(rth->dst.dev); 2033 dev_hold(rth->dst.dev);
@@ -2202,6 +2204,7 @@ local_input:
2202#ifdef CONFIG_IP_ROUTE_CLASSID 2204#ifdef CONFIG_IP_ROUTE_CLASSID
2203 rth->dst.tclassid = itag; 2205 rth->dst.tclassid = itag;
2204#endif 2206#endif
2207 rth->rt_route_iif = dev->ifindex;
2205 rth->rt_iif = dev->ifindex; 2208 rth->rt_iif = dev->ifindex;
2206 rth->dst.dev = net->loopback_dev; 2209 rth->dst.dev = net->loopback_dev;
2207 dev_hold(rth->dst.dev); 2210 dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2401 rth->rt_mark = oldflp4->flowi4_mark; 2404 rth->rt_mark = oldflp4->flowi4_mark;
2402 rth->rt_dst = fl4->daddr; 2405 rth->rt_dst = fl4->daddr;
2403 rth->rt_src = fl4->saddr; 2406 rth->rt_src = fl4->saddr;
2404 rth->rt_iif = 0; 2407 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
2405 /* get references to the devices that are to be hold by the routing 2409 /* get references to the devices that are to be hold by the routing
2406 cache entry */ 2410 cache entry */
2407 rth->dst.dev = dev_out; 2411 rth->dst.dev = dev_out;
@@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2716 rt->rt_key_dst = ort->rt_key_dst; 2720 rt->rt_key_dst = ort->rt_key_dst;
2717 rt->rt_key_src = ort->rt_key_src; 2721 rt->rt_key_src = ort->rt_key_src;
2718 rt->rt_tos = ort->rt_tos; 2722 rt->rt_tos = ort->rt_tos;
2723 rt->rt_route_iif = ort->rt_route_iif;
2719 rt->rt_iif = ort->rt_iif; 2724 rt->rt_iif = ort->rt_iif;
2720 rt->rt_oif = ort->rt_oif; 2725 rt->rt_oif = ort->rt_oif;
2721 rt->rt_mark = ort->rt_mark; 2726 rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2725 rt->rt_type = ort->rt_type; 2730 rt->rt_type = ort->rt_type;
2726 rt->rt_dst = ort->rt_dst; 2731 rt->rt_dst = ort->rt_dst;
2727 rt->rt_src = ort->rt_src; 2732 rt->rt_src = ort->rt_src;
2728 rt->rt_iif = ort->rt_iif;
2729 rt->rt_gateway = ort->rt_gateway; 2733 rt->rt_gateway = ort->rt_gateway;
2730 rt->rt_spec_dst = ort->rt_spec_dst; 2734 rt->rt_spec_dst = ort->rt_spec_dst;
2731 rt->peer = ort->peer; 2735 rt->peer = ort->peer;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 13e0e7f659ff..d20a05e970d8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
74 rt->rt_key_dst = fl4->daddr; 74 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr; 75 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos; 76 rt->rt_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif;
77 rt->rt_iif = fl4->flowi4_iif; 78 rt->rt_iif = fl4->flowi4_iif;
78 rt->rt_oif = fl4->flowi4_oif; 79 rt->rt_oif = fl4->flowi4_oif;
79 rt->rt_mark = fl4->flowi4_mark; 80 rt->rt_mark = fl4->flowi4_mark;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39aaca2b4fd2..28bc1f644b7b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
90 return 0; 90 return 0;
91} 91}
92 92
93static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) 93static int nf_ip6_route(struct net *net, struct dst_entry **dst,
94 struct flowi *fl, bool strict)
94{ 95{
95 *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); 96 static const struct ipv6_pinfo fake_pinfo;
97 static const struct inet_sock fake_sk = {
98 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
99 .sk.sk_bound_dev_if = 1,
100 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
101 };
102 const void *sk = strict ? &fake_sk : NULL;
103
104 *dst = ip6_route_output(net, sk, &fl->u.ip6);
96 return (*dst)->error; 105 return (*dst)->error;
97} 106}
98 107
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 56fa12538d45..4f49e5dd41bb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1622 opt_skb = skb_clone(skb, GFP_ATOMIC); 1622 opt_skb = skb_clone(skb, GFP_ATOMIC);
1623 1623
1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1625 sock_rps_save_rxhash(sk, skb->rxhash);
1625 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1626 goto reset; 1627 goto reset;
1627 if (opt_skb) 1628 if (opt_skb)
@@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1649 __kfree_skb(opt_skb); 1650 __kfree_skb(opt_skb);
1650 return 0; 1651 return 0;
1651 } 1652 }
1652 } 1653 } else
1654 sock_rps_save_rxhash(sk, skb->rxhash);
1653 1655
1654 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1656 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1655 goto reset; 1657 goto reset;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7037c006e13..15c37746845e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
505 int rc; 505 int rc;
506 int is_udplite = IS_UDPLITE(sk); 506 int is_udplite = IS_UDPLITE(sk);
507 507
508 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
509 sock_rps_save_rxhash(sk, skb->rxhash);
510
508 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 511 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
509 goto drop; 512 goto drop;
510 513
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9d192d665ff5..c5d4530d8284 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2541 * same TID from the same station 2541 * same TID from the same station
2542 */ 2542 */
2543 rx->skb = skb; 2543 rx->skb = skb;
2544 rx->flags = 0;
2545 2544
2546 CALL_RXH(ieee80211_rx_h_decrypt) 2545 CALL_RXH(ieee80211_rx_h_decrypt)
2547 CALL_RXH(ieee80211_rx_h_check_more_data) 2546 CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2612 .sdata = sta->sdata, 2611 .sdata = sta->sdata,
2613 .local = sta->local, 2612 .local = sta->local,
2614 .queue = tid, 2613 .queue = tid,
2614 .flags = 0,
2615 }; 2615 };
2616 struct tid_ampdu_rx *tid_agg_rx; 2616 struct tid_ampdu_rx *tid_agg_rx;
2617 2617
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3f988aa1152..32bff6d86cb2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -652,7 +652,6 @@ comment "Xtables matches"
652config NETFILTER_XT_MATCH_ADDRTYPE 652config NETFILTER_XT_MATCH_ADDRTYPE
653 tristate '"addrtype" address type match support' 653 tristate '"addrtype" address type match support'
654 depends on NETFILTER_ADVANCED 654 depends on NETFILTER_ADVANCED
655 depends on (IPV6 || IPV6=n)
656 ---help--- 655 ---help---
657 This option allows you to match what routing thinks of an address, 656 This option allows you to match what routing thinks of an address,
658 eg. UNICAST, LOCAL, BROADCAST, ... 657 eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218d..a113ff066928 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
339 if (map->netmask != 32) 339 if (map->netmask != 32)
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
342 htonl(atomic_read(&set->ref) - 1));
343 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
344 htonl(sizeof(*map) + map->memsize)); 343 htonl(sizeof(*map) + map->memsize));
345 if (with_timeout(map->timeout)) 344 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172deff..00a33242e90c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
434 goto nla_put_failure; 434 goto nla_put_failure;
435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
438 htonl(atomic_read(&set->ref) - 1));
439 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 438 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
440 htonl(sizeof(*map) 439 htonl(sizeof(*map)
441 + (map->last_ip - map->first_ip + 1) * map->dsize)); 440 + (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9cb..6b38eb8f6ed8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 goto nla_put_failure; 320 goto nla_put_failure;
321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
324 htonl(atomic_read(&set->ref) - 1));
325 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 324 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
326 htonl(sizeof(*map) + map->memsize)); 325 htonl(sizeof(*map) + map->memsize));
327 if (with_timeout(map->timeout)) 326 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 253326e8d990..9152e69a162d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(ip_set_type_list); /* all registered set types */ 27static LIST_HEAD(ip_set_type_list); /* all registered set types */
28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ 28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
29 30
30static struct ip_set **ip_set_list; /* all individual sets */ 31static struct ip_set **ip_set_list; /* all individual sets */
31static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ 32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
301static inline void 302static inline void
302__ip_set_get(ip_set_id_t index) 303__ip_set_get(ip_set_id_t index)
303{ 304{
304 atomic_inc(&ip_set_list[index]->ref); 305 write_lock_bh(&ip_set_ref_lock);
306 ip_set_list[index]->ref++;
307 write_unlock_bh(&ip_set_ref_lock);
305} 308}
306 309
307static inline void 310static inline void
308__ip_set_put(ip_set_id_t index) 311__ip_set_put(ip_set_id_t index)
309{ 312{
310 atomic_dec(&ip_set_list[index]->ref); 313 write_lock_bh(&ip_set_ref_lock);
314 BUG_ON(ip_set_list[index]->ref == 0);
315 ip_set_list[index]->ref--;
316 write_unlock_bh(&ip_set_ref_lock);
311} 317}
312 318
313/* 319/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
324 struct ip_set *set = ip_set_list[index]; 330 struct ip_set *set = ip_set_list[index];
325 int ret = 0; 331 int ret = 0;
326 332
327 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 333 BUG_ON(set == NULL);
328 pr_debug("set %s, index %u\n", set->name, index); 334 pr_debug("set %s, index %u\n", set->name, index);
329 335
330 if (dim < set->type->dimension || 336 if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
356 struct ip_set *set = ip_set_list[index]; 362 struct ip_set *set = ip_set_list[index];
357 int ret; 363 int ret;
358 364
359 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 365 BUG_ON(set == NULL);
360 pr_debug("set %s, index %u\n", set->name, index); 366 pr_debug("set %s, index %u\n", set->name, index);
361 367
362 if (dim < set->type->dimension || 368 if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
378 struct ip_set *set = ip_set_list[index]; 384 struct ip_set *set = ip_set_list[index];
379 int ret = 0; 385 int ret = 0;
380 386
381 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 387 BUG_ON(set == NULL);
382 pr_debug("set %s, index %u\n", set->name, index); 388 pr_debug("set %s, index %u\n", set->name, index);
383 389
384 if (dim < set->type->dimension || 390 if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
397 * Find set by name, reference it once. The reference makes sure the 403 * Find set by name, reference it once. The reference makes sure the
398 * thing pointed to, does not go away under our feet. 404 * thing pointed to, does not go away under our feet.
399 * 405 *
400 * The nfnl mutex must already be activated.
401 */ 406 */
402ip_set_id_t 407ip_set_id_t
403ip_set_get_byname(const char *name, struct ip_set **set) 408ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
423 * reference count by 1. The caller shall not assume the index 428 * reference count by 1. The caller shall not assume the index
424 * to be valid, after calling this function. 429 * to be valid, after calling this function.
425 * 430 *
426 * The nfnl mutex must already be activated.
427 */ 431 */
428void 432void
429ip_set_put_byindex(ip_set_id_t index) 433ip_set_put_byindex(ip_set_id_t index)
430{ 434{
431 if (ip_set_list[index] != NULL) { 435 if (ip_set_list[index] != NULL)
432 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
433 __ip_set_put(index); 436 __ip_set_put(index);
434 }
435} 437}
436EXPORT_SYMBOL_GPL(ip_set_put_byindex); 438EXPORT_SYMBOL_GPL(ip_set_put_byindex);
437 439
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
441 * can't be destroyed. The set cannot be renamed due to 443 * can't be destroyed. The set cannot be renamed due to
442 * the referencing either. 444 * the referencing either.
443 * 445 *
444 * The nfnl mutex must already be activated.
445 */ 446 */
446const char * 447const char *
447ip_set_name_byindex(ip_set_id_t index) 448ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
449 const struct ip_set *set = ip_set_list[index]; 450 const struct ip_set *set = ip_set_list[index];
450 451
451 BUG_ON(set == NULL); 452 BUG_ON(set == NULL);
452 BUG_ON(atomic_read(&set->ref) == 0); 453 BUG_ON(set->ref == 0);
453 454
454 /* Referenced, so it's safe */ 455 /* Referenced, so it's safe */
455 return set->name; 456 return set->name;
@@ -515,10 +516,7 @@ void
515ip_set_nfnl_put(ip_set_id_t index) 516ip_set_nfnl_put(ip_set_id_t index)
516{ 517{
517 nfnl_lock(); 518 nfnl_lock();
518 if (ip_set_list[index] != NULL) { 519 ip_set_put_byindex(index);
519 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
520 __ip_set_put(index);
521 }
522 nfnl_unlock(); 520 nfnl_unlock();
523} 521}
524EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 522EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
526/* 524/*
527 * Communication protocol with userspace over netlink. 525 * Communication protocol with userspace over netlink.
528 * 526 *
529 * We already locked by nfnl_lock. 527 * The commands are serialized by the nfnl mutex.
530 */ 528 */
531 529
532static inline bool 530static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
657 return -ENOMEM; 655 return -ENOMEM;
658 rwlock_init(&set->lock); 656 rwlock_init(&set->lock);
659 strlcpy(set->name, name, IPSET_MAXNAMELEN); 657 strlcpy(set->name, name, IPSET_MAXNAMELEN);
660 atomic_set(&set->ref, 0);
661 set->family = family; 658 set->family = family;
662 659
663 /* 660 /*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
690 687
691 /* 688 /*
692 * Here, we have a valid, constructed set and we are protected 689 * Here, we have a valid, constructed set and we are protected
693 * by nfnl_lock. Find the first free index in ip_set_list and 690 * by the nfnl mutex. Find the first free index in ip_set_list
694 * check clashing. 691 * and check clashing.
695 */ 692 */
696 if ((ret = find_free_id(set->name, &index, &clash)) != 0) { 693 if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
697 /* If this is the same set and requested, ignore error */ 694 /* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
751 const struct nlattr * const attr[]) 748 const struct nlattr * const attr[])
752{ 749{
753 ip_set_id_t i; 750 ip_set_id_t i;
751 int ret = 0;
754 752
755 if (unlikely(protocol_failed(attr))) 753 if (unlikely(protocol_failed(attr)))
756 return -IPSET_ERR_PROTOCOL; 754 return -IPSET_ERR_PROTOCOL;
757 755
758 /* References are protected by the nfnl mutex */ 756 /* Commands are serialized and references are
757 * protected by the ip_set_ref_lock.
758 * External systems (i.e. xt_set) must call
759 * ip_set_put|get_nfnl_* functions, that way we
760 * can safely check references here.
761 *
762 * list:set timer can only decrement the reference
763 * counter, so if it's already zero, we can proceed
764 * without holding the lock.
765 */
766 read_lock_bh(&ip_set_ref_lock);
759 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
760 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
761 if (ip_set_list[i] != NULL && 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
762 (atomic_read(&ip_set_list[i]->ref))) 770 ret = IPSET_ERR_BUSY;
763 return -IPSET_ERR_BUSY; 771 goto out;
772 }
764 } 773 }
774 read_unlock_bh(&ip_set_ref_lock);
765 for (i = 0; i < ip_set_max; i++) { 775 for (i = 0; i < ip_set_max; i++) {
766 if (ip_set_list[i] != NULL) 776 if (ip_set_list[i] != NULL)
767 ip_set_destroy_set(i); 777 ip_set_destroy_set(i);
768 } 778 }
769 } else { 779 } else {
770 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 780 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
771 if (i == IPSET_INVALID_ID) 781 if (i == IPSET_INVALID_ID) {
772 return -ENOENT; 782 ret = -ENOENT;
773 else if (atomic_read(&ip_set_list[i]->ref)) 783 goto out;
774 return -IPSET_ERR_BUSY; 784 } else if (ip_set_list[i]->ref) {
785 ret = -IPSET_ERR_BUSY;
786 goto out;
787 }
788 read_unlock_bh(&ip_set_ref_lock);
775 789
776 ip_set_destroy_set(i); 790 ip_set_destroy_set(i);
777 } 791 }
778 return 0; 792 return 0;
793out:
794 read_unlock_bh(&ip_set_ref_lock);
795 return ret;
779} 796}
780 797
781/* Flush sets */ 798/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
834 struct ip_set *set; 851 struct ip_set *set;
835 const char *name2; 852 const char *name2;
836 ip_set_id_t i; 853 ip_set_id_t i;
854 int ret = 0;
837 855
838 if (unlikely(protocol_failed(attr) || 856 if (unlikely(protocol_failed(attr) ||
839 attr[IPSET_ATTR_SETNAME] == NULL || 857 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
843 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); 861 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
844 if (set == NULL) 862 if (set == NULL)
845 return -ENOENT; 863 return -ENOENT;
846 if (atomic_read(&set->ref) != 0) 864
847 return -IPSET_ERR_REFERENCED; 865 read_lock_bh(&ip_set_ref_lock);
866 if (set->ref != 0) {
867 ret = -IPSET_ERR_REFERENCED;
868 goto out;
869 }
848 870
849 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); 871 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
850 for (i = 0; i < ip_set_max; i++) { 872 for (i = 0; i < ip_set_max; i++) {
851 if (ip_set_list[i] != NULL && 873 if (ip_set_list[i] != NULL &&
852 STREQ(ip_set_list[i]->name, name2)) 874 STREQ(ip_set_list[i]->name, name2)) {
853 return -IPSET_ERR_EXIST_SETNAME2; 875 ret = -IPSET_ERR_EXIST_SETNAME2;
876 goto out;
877 }
854 } 878 }
855 strncpy(set->name, name2, IPSET_MAXNAMELEN); 879 strncpy(set->name, name2, IPSET_MAXNAMELEN);
856 880
857 return 0; 881out:
882 read_unlock_bh(&ip_set_ref_lock);
883 return ret;
858} 884}
859 885
860/* Swap two sets so that name/index points to the other. 886/* Swap two sets so that name/index points to the other.
861 * References and set names are also swapped. 887 * References and set names are also swapped.
862 * 888 *
863 * We are protected by the nfnl mutex and references are 889 * The commands are serialized by the nfnl mutex and references are
864 * manipulated only by holding the mutex. The kernel interfaces 890 * protected by the ip_set_ref_lock. The kernel interfaces
865 * do not hold the mutex but the pointer settings are atomic 891 * do not hold the mutex but the pointer settings are atomic
866 * so the ip_set_list always contains valid pointers to the sets. 892 * so the ip_set_list always contains valid pointers to the sets.
867 */ 893 */
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
874 struct ip_set *from, *to; 900 struct ip_set *from, *to;
875 ip_set_id_t from_id, to_id; 901 ip_set_id_t from_id, to_id;
876 char from_name[IPSET_MAXNAMELEN]; 902 char from_name[IPSET_MAXNAMELEN];
877 u32 from_ref;
878 903
879 if (unlikely(protocol_failed(attr) || 904 if (unlikely(protocol_failed(attr) ||
880 attr[IPSET_ATTR_SETNAME] == NULL || 905 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
899 from->type->family == to->type->family)) 924 from->type->family == to->type->family))
900 return -IPSET_ERR_TYPE_MISMATCH; 925 return -IPSET_ERR_TYPE_MISMATCH;
901 926
902 /* No magic here: ref munging protected by the nfnl_lock */
903 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 927 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
904 from_ref = atomic_read(&from->ref);
905
906 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 928 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
907 atomic_set(&from->ref, atomic_read(&to->ref));
908 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 929 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
909 atomic_set(&to->ref, from_ref);
910 930
931 write_lock_bh(&ip_set_ref_lock);
932 swap(from->ref, to->ref);
911 ip_set_list[from_id] = to; 933 ip_set_list[from_id] = to;
912 ip_set_list[to_id] = from; 934 ip_set_list[to_id] = from;
935 write_unlock_bh(&ip_set_ref_lock);
913 936
914 return 0; 937 return 0;
915} 938}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
926{ 949{
927 if (cb->args[2]) { 950 if (cb->args[2]) {
928 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); 951 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
929 __ip_set_put((ip_set_id_t) cb->args[1]); 952 ip_set_put_byindex((ip_set_id_t) cb->args[1]);
930 } 953 }
931 return 0; 954 return 0;
932} 955}
@@ -1068,7 +1091,7 @@ release_refcount:
1068 /* If there was an error or set is done, release set */ 1091 /* If there was an error or set is done, release set */
1069 if (ret || !cb->args[2]) { 1092 if (ret || !cb->args[2]) {
1070 pr_debug("release set %s\n", ip_set_list[index]->name); 1093 pr_debug("release set %s\n", ip_set_list[index]->name);
1071 __ip_set_put(index); 1094 ip_set_put_byindex(index);
1072 } 1095 }
1073 1096
1074 /* If we dump all sets, continue with dumping last ones */ 1097 /* If we dump all sets, continue with dumping last ones */
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f06..e9159e99fc4b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
43static inline struct set_elem * 43static inline struct set_elem *
44list_set_elem(const struct list_set *map, u32 id) 44list_set_elem(const struct list_set *map, u32 id)
45{ 45{
46 return (struct set_elem *)((char *)map->members + id * map->dsize); 46 return (struct set_elem *)((void *)map->members + id * map->dsize);
47}
48
49static inline struct set_telem *
50list_set_telem(const struct list_set *map, u32 id)
51{
52 return (struct set_telem *)((void *)map->members + id * map->dsize);
47} 53}
48 54
49static inline bool 55static inline bool
50list_set_timeout(const struct list_set *map, u32 id) 56list_set_timeout(const struct list_set *map, u32 id)
51{ 57{
52 const struct set_telem *elem = 58 const struct set_telem *elem = list_set_telem(map, id);
53 (const struct set_telem *) list_set_elem(map, id);
54 59
55 return ip_set_timeout_test(elem->timeout); 60 return ip_set_timeout_test(elem->timeout);
56} 61}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
58static inline bool 63static inline bool
59list_set_expired(const struct list_set *map, u32 id) 64list_set_expired(const struct list_set *map, u32 id)
60{ 65{
61 const struct set_telem *elem = 66 const struct set_telem *elem = list_set_telem(map, id);
62 (const struct set_telem *) list_set_elem(map, id);
63 67
64 return ip_set_timeout_expired(elem->timeout); 68 return ip_set_timeout_expired(elem->timeout);
65} 69}
66 70
67static inline int
68list_set_exist(const struct set_telem *elem)
69{
70 return elem->id != IPSET_INVALID_ID &&
71 !ip_set_timeout_expired(elem->timeout);
72}
73
74/* Set list without and with timeout */ 71/* Set list without and with timeout */
75 72
76static int 73static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
146 struct set_telem *e; 143 struct set_telem *e;
147 144
148 for (; i < map->size; i++) { 145 for (; i < map->size; i++) {
149 e = (struct set_telem *)list_set_elem(map, i); 146 e = list_set_telem(map, i);
150 swap(e->id, id); 147 swap(e->id, id);
148 swap(e->timeout, timeout);
151 if (e->id == IPSET_INVALID_ID) 149 if (e->id == IPSET_INVALID_ID)
152 break; 150 break;
153 swap(e->timeout, timeout);
154 } 151 }
155} 152}
156 153
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
164 /* Last element replaced: e.g. add new,before,last */ 161 /* Last element replaced: e.g. add new,before,last */
165 ip_set_put_byindex(e->id); 162 ip_set_put_byindex(e->id);
166 if (with_timeout(map->timeout)) 163 if (with_timeout(map->timeout))
167 list_elem_tadd(map, i, id, timeout); 164 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
168 else 165 else
169 list_elem_add(map, i, id); 166 list_elem_add(map, i, id);
170 167
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
172} 169}
173 170
174static int 171static int
175list_set_del(struct list_set *map, ip_set_id_t id, u32 i) 172list_set_del(struct list_set *map, u32 i)
176{ 173{
177 struct set_elem *a = list_set_elem(map, i), *b; 174 struct set_elem *a = list_set_elem(map, i), *b;
178 175
179 ip_set_put_byindex(id); 176 ip_set_put_byindex(a->id);
180 177
181 for (; i < map->size - 1; i++) { 178 for (; i < map->size - 1; i++) {
182 b = list_set_elem(map, i + 1); 179 b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
308 (before == 0 || 305 (before == 0 ||
309 (before > 0 && 306 (before > 0 &&
310 next_id_eq(map, i, refid)))) 307 next_id_eq(map, i, refid))))
311 ret = list_set_del(map, id, i); 308 ret = list_set_del(map, i);
312 else if (before < 0 && 309 else if (before < 0 &&
313 elem->id == refid && 310 elem->id == refid &&
314 next_id_eq(map, i, id)) 311 next_id_eq(map, i, id))
315 ret = list_set_del(map, id, i + 1); 312 ret = list_set_del(map, i + 1);
316 } 313 }
317 break; 314 break;
318 default: 315 default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
369 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 366 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
370 if (with_timeout(map->timeout)) 367 if (with_timeout(map->timeout))
371 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 368 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
372 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 369 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
373 htonl(atomic_read(&set->ref) - 1));
374 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 370 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
375 htonl(sizeof(*map) + map->size * map->dsize)); 371 htonl(sizeof(*map) + map->size * map->dsize));
376 ipset_nest_end(skb, nested); 372 ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
461 struct set_telem *e; 457 struct set_telem *e;
462 u32 i; 458 u32 i;
463 459
464 /* We run parallel with other readers (test element) 460 write_lock_bh(&set->lock);
465 * but adding/deleting new entries is locked out */ 461 for (i = 0; i < map->size; i++) {
466 read_lock_bh(&set->lock); 462 e = list_set_telem(map, i);
467 for (i = map->size - 1; i >= 0; i--) { 463 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
468 e = (struct set_telem *) list_set_elem(map, i); 464 list_set_del(map, i);
469 if (e->id != IPSET_INVALID_ID &&
470 list_set_expired(map, i))
471 list_set_del(map, e->id, i);
472 } 465 }
473 read_unlock_bh(&set->lock); 466 write_unlock_bh(&set->lock);
474 467
475 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; 468 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
476 add_timer(&map->gc); 469 add_timer(&map->gc);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 33733c8872e7..ae47090bf45f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3120,7 +3120,7 @@ nla_put_failure:
3120static int ip_vs_genl_dump_daemons(struct sk_buff *skb, 3120static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3121 struct netlink_callback *cb) 3121 struct netlink_callback *cb)
3122{ 3122{
3123 struct net *net = skb_net(skb); 3123 struct net *net = skb_sknet(skb);
3124 struct netns_ipvs *ipvs = net_ipvs(net); 3124 struct netns_ipvs *ipvs = net_ipvs(net);
3125 3125
3126 mutex_lock(&__ip_vs_mutex); 3126 mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 867882313e49..bcd5ed6b7130 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
631 CHECK_BOUND(bs, 2); 631 CHECK_BOUND(bs, 2);
632 count = *bs->cur++; 632 count = *bs->cur++;
633 count <<= 8; 633 count <<= 8;
634 count = *bs->cur++; 634 count += *bs->cur++;
635 break; 635 break;
636 case SEMI: 636 case SEMI:
637 BYTE_ALIGN(bs); 637 BYTE_ALIGN(bs);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 533a183e6661..18b2ce5c8ced 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
731 731
732 memset(&fl2, 0, sizeof(fl2)); 732 memset(&fl2, 0, sizeof(fl2));
733 fl2.daddr = dst->ip; 733 fl2.daddr = dst->ip;
734 if (!afinfo->route((struct dst_entry **)&rt1, 734 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
735 flowi4_to_flowi(&fl1))) { 735 flowi4_to_flowi(&fl1), false)) {
736 if (!afinfo->route((struct dst_entry **)&rt2, 736 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
737 flowi4_to_flowi(&fl2))) { 737 flowi4_to_flowi(&fl2), false)) {
738 if (rt1->rt_gateway == rt2->rt_gateway && 738 if (rt1->rt_gateway == rt2->rt_gateway &&
739 rt1->dst.dev == rt2->dst.dev) 739 rt1->dst.dev == rt2->dst.dev)
740 ret = 1; 740 ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
755 755
756 memset(&fl2, 0, sizeof(fl2)); 756 memset(&fl2, 0, sizeof(fl2));
757 ipv6_addr_copy(&fl2.daddr, &dst->in6); 757 ipv6_addr_copy(&fl2.daddr, &dst->in6);
758 if (!afinfo->route((struct dst_entry **)&rt1, 758 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
759 flowi6_to_flowi(&fl1))) { 759 flowi6_to_flowi(&fl1), false)) {
760 if (!afinfo->route((struct dst_entry **)&rt2, 760 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
761 flowi6_to_flowi(&fl2))) { 761 flowi6_to_flowi(&fl2), false)) {
762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
763 sizeof(rt1->rt6i_gateway)) && 763 sizeof(rt1->rt6i_gateway)) &&
764 rt1->dst.dev == rt2->dst.dev) 764 rt1->dst.dev == rt2->dst.dev)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6e6b46cb1db9..9e63b43faeed 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
166 rcu_read_lock(); 166 rcu_read_lock();
167 ai = nf_get_afinfo(family); 167 ai = nf_get_afinfo(family);
168 if (ai != NULL) 168 if (ai != NULL)
169 ai->route((struct dst_entry **)&rt, &fl); 169 ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
170 rcu_read_unlock(); 170 rcu_read_unlock();
171 171
172 if (rt != NULL) { 172 if (rt != NULL) {
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 2220b85e9519..b77d383cec78 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
32MODULE_ALIAS("ip6t_addrtype"); 32MODULE_ALIAS("ip6t_addrtype");
33 33
34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
35static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) 35static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
36 const struct in6_addr *addr)
36{ 37{
38 const struct nf_afinfo *afinfo;
39 struct flowi6 flow;
40 struct rt6_info *rt;
37 u32 ret; 41 u32 ret;
42 int route_err;
38 43
39 if (!rt) 44 memset(&flow, 0, sizeof(flow));
45 ipv6_addr_copy(&flow.daddr, addr);
46 if (dev)
47 flow.flowi6_oif = dev->ifindex;
48
49 rcu_read_lock();
50
51 afinfo = nf_get_afinfo(NFPROTO_IPV6);
52 if (afinfo != NULL)
53 route_err = afinfo->route(net, (struct dst_entry **)&rt,
54 flowi6_to_flowi(&flow), !!dev);
55 else
56 route_err = 1;
57
58 rcu_read_unlock();
59
60 if (route_err)
40 return XT_ADDRTYPE_UNREACHABLE; 61 return XT_ADDRTYPE_UNREACHABLE;
41 62
42 if (rt->rt6i_flags & RTF_REJECT) 63 if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
48 ret |= XT_ADDRTYPE_LOCAL; 69 ret |= XT_ADDRTYPE_LOCAL;
49 if (rt->rt6i_flags & RTF_ANYCAST) 70 if (rt->rt6i_flags & RTF_ANYCAST)
50 ret |= XT_ADDRTYPE_ANYCAST; 71 ret |= XT_ADDRTYPE_ANYCAST;
72
73
74 dst_release(&rt->dst);
51 return ret; 75 return ret;
52} 76}
53 77
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
65 return false; 89 return false;
66 90
67 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | 91 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
68 XT_ADDRTYPE_UNREACHABLE) & mask) { 92 XT_ADDRTYPE_UNREACHABLE) & mask)
69 struct rt6_info *rt; 93 return !!(mask & match_lookup_rt6(net, dev, addr));
70 u32 type;
71 int ifindex = dev ? dev->ifindex : 0;
72
73 rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
74
75 type = xt_addrtype_rt6_to_type(rt);
76
77 dst_release(&rt->dst);
78 return !!(mask & type);
79 }
80 return true; 94 return true;
81} 95}
82 96
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 2c0086a4751e..481a86fdc409 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
195 return info->match_flags & XT_CONNTRACK_STATE; 195 return info->match_flags & XT_CONNTRACK_STATE;
196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) && 196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ 197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
198 !!(info->invert_flags & XT_CONNTRACK_DIRECTION)) 198 !(info->invert_flags & XT_CONNTRACK_DIRECTION))
199 return false; 199 return false;
200 200
201 if (info->match_flags & XT_CONNTRACK_ORIGSRC) 201 if (info->match_flags & XT_CONNTRACK_ORIGSRC)