aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/lguest/lguest.c2
-rw-r--r--arch/arm/kernel/bios32.c4
-rw-r--r--arch/i386/boot/memory.c39
-rw-r--r--arch/i386/xen/mmu.c5
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/platforms/83xx/usb.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/sysdev/commproc.c2
-rw-r--r--arch/ppc/8xx_io/commproc.c2
-rw-r--r--drivers/acpi/sleep/Makefile2
-rw-r--r--drivers/acpi/sleep/main.c46
-rw-r--r--drivers/ata/pata_sis.c3
-rw-r--r--drivers/ata/sata_sil24.c16
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/char/drm/i915_drv.h6
-rw-r--r--drivers/char/drm/i915_irq.c12
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/mspec.c26
-rw-r--r--drivers/char/vt_ioctl.c15
-rw-r--r--drivers/input/joystick/Kconfig2
-rw-r--r--drivers/input/mouse/appletouch.c6
-rw-r--r--drivers/lguest/lguest_asm.S6
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c6
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c5
-rw-r--r--drivers/net/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/e1000/e1000_hw.c1
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/mv643xx_eth.c4
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/ppp_mppe.c14
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/sky2.c90
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/scsi/esp_scsi.c3
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.h2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/lockd/svclock.c29
-rw-r--r--fs/nfs/client.c29
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/xfs/xfs_buf_item.h5
-rw-r--r--fs/xfs/xfs_log_recover.c51
-rw-r--r--fs/xfs/xfs_trans_buf.c1
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/asm-i386/system.h5
-rw-r--r--include/asm-mips/page.h2
-rw-r--r--include/linux/cpufreq.h19
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/tcp.h6
-rw-r--r--kernel/futex.c26
-rw-r--r--kernel/futex_compat.c28
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/sys.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c54
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/mac80211/ieee80211.c2
-rw-r--r--net/mac80211/rc80211_simple.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/sm_make_chunk.c46
-rw-r--r--net/sctp/sm_statefuns.c243
-rw-r--r--net/sctp/sm_statetable.c16
-rw-r--r--net/socket.c3
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/sysfs.c2
84 files changed, 696 insertions, 385 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 8de132a02ba9..6c46730c631a 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -94,6 +94,8 @@ Your cooperation is appreciated.
94 9 = /dev/urandom Faster, less secure random number gen. 94 9 = /dev/urandom Faster, less secure random number gen.
95 10 = /dev/aio Asynchronous I/O notification interface 95 10 = /dev/aio Asynchronous I/O notification interface
96 11 = /dev/kmsg Writes to this come out as printk's 96 11 = /dev/kmsg Writes to this come out as printk's
97 12 = /dev/oldmem Used by crashdump kernels to access
98 the memory of the kernel that crashed.
97 99
98 1 block RAM disk 100 1 block RAM disk
99 0 = /dev/ram0 First RAM disk 101 0 = /dev/ram0 First RAM disk
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index f7918401a007..73c5f1f3d5d2 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov,
882 * of the block file (possibly extending it). */ 882 * of the block file (possibly extending it). */
883 if (off + len > device_len) { 883 if (off + len > device_len) {
884 /* Trim it back to the correct length */ 884 /* Trim it back to the correct length */
885 ftruncate(dev->fd, device_len); 885 ftruncate64(dev->fd, device_len);
886 /* Die, bad Guest, die. */ 886 /* Die, bad Guest, die. */
887 errx(1, "Write past end %llu+%u", off, len); 887 errx(1, "Write past end %llu+%u", off, len);
888 } 888 }
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 240c448ec31c..a2dd930d11ef 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
338 * pcibios_fixup_bus - Called after each bus is probed, 338 * pcibios_fixup_bus - Called after each bus is probed,
339 * but before its children are examined. 339 * but before its children are examined.
340 */ 340 */
341void __devinit pcibios_fixup_bus(struct pci_bus *bus) 341void pcibios_fixup_bus(struct pci_bus *bus)
342{ 342{
343 struct pci_sys_data *root = bus->sysdata; 343 struct pci_sys_data *root = bus->sysdata;
344 struct pci_dev *dev; 344 struct pci_dev *dev;
@@ -419,7 +419,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
419/* 419/*
420 * Convert from Linux-centric to bus-centric addresses for bridge devices. 420 * Convert from Linux-centric to bus-centric addresses for bridge devices.
421 */ 421 */
422void __devinit 422void
423pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 423pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
424 struct resource *res) 424 struct resource *res)
425{ 425{
diff --git a/arch/i386/boot/memory.c b/arch/i386/boot/memory.c
index 1a2e62db8bed..378353956b5d 100644
--- a/arch/i386/boot/memory.c
+++ b/arch/i386/boot/memory.c
@@ -20,6 +20,7 @@
20 20
21static int detect_memory_e820(void) 21static int detect_memory_e820(void)
22{ 22{
23 int count = 0;
23 u32 next = 0; 24 u32 next = 0;
24 u32 size, id; 25 u32 size, id;
25 u8 err; 26 u8 err;
@@ -27,20 +28,33 @@ static int detect_memory_e820(void)
27 28
28 do { 29 do {
29 size = sizeof(struct e820entry); 30 size = sizeof(struct e820entry);
30 id = SMAP; 31
32 /* Important: %edx is clobbered by some BIOSes,
33 so it must be either used for the error output
34 or explicitly marked clobbered. */
31 asm("int $0x15; setc %0" 35 asm("int $0x15; setc %0"
32 : "=am" (err), "+b" (next), "+d" (id), "+c" (size), 36 : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
33 "=m" (*desc) 37 "=m" (*desc)
34 : "D" (desc), "a" (0xe820)); 38 : "D" (desc), "d" (SMAP), "a" (0xe820));
39
40 /* Some BIOSes stop returning SMAP in the middle of
41 the search loop. We don't know exactly how the BIOS
42 screwed up the map at that point, we might have a
43 partial map, the full map, or complete garbage, so
44 just return failure. */
45 if (id != SMAP) {
46 count = 0;
47 break;
48 }
35 49
36 if (err || id != SMAP) 50 if (err)
37 break; 51 break;
38 52
39 boot_params.e820_entries++; 53 count++;
40 desc++; 54 desc++;
41 } while (next && boot_params.e820_entries < E820MAX); 55 } while (next && count < E820MAX);
42 56
43 return boot_params.e820_entries; 57 return boot_params.e820_entries = count;
44} 58}
45 59
46static int detect_memory_e801(void) 60static int detect_memory_e801(void)
@@ -89,11 +103,16 @@ static int detect_memory_88(void)
89 103
90int detect_memory(void) 104int detect_memory(void)
91{ 105{
106 int err = -1;
107
92 if (detect_memory_e820() > 0) 108 if (detect_memory_e820() > 0)
93 return 0; 109 err = 0;
94 110
95 if (!detect_memory_e801()) 111 if (!detect_memory_e801())
96 return 0; 112 err = 0;
113
114 if (!detect_memory_88())
115 err = 0;
97 116
98 return detect_memory_88(); 117 return err;
99} 118}
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
index 4ae038aa6c24..874db0cd1d2a 100644
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -559,6 +559,9 @@ void xen_exit_mmap(struct mm_struct *mm)
559 put_cpu(); 559 put_cpu();
560 560
561 spin_lock(&mm->page_table_lock); 561 spin_lock(&mm->page_table_lock);
562 xen_pgd_unpin(mm->pgd); 562
563 /* pgd may not be pinned in the error exit path of execve */
564 if (PagePinned(virt_to_page(mm->pgd)))
565 xen_pgd_unpin(mm->pgd);
563 spin_unlock(&mm->page_table_lock); 566 spin_unlock(&mm->page_table_lock);
564} 567}
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b3ed731a24c6..dd68afce7da5 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -525,5 +525,5 @@ sys_call_table:
525 PTR compat_sys_signalfd 525 PTR compat_sys_signalfd
526 PTR compat_sys_timerfd 526 PTR compat_sys_timerfd
527 PTR sys_eventfd 527 PTR sys_eventfd
528 PTR sys_fallocate /* 4320 */ 528 PTR sys32_fallocate /* 4320 */
529 .size sys_call_table,.-sys_call_table 529 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index ba3697ee7ff6..7309e48d163d 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -41,8 +41,8 @@ static struct platform_device uart8250_device = {
41 41
42static int __init uart8250_init(void) 42static int __init uart8250_init(void)
43{ 43{
44 uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1; 44 uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1;
45 uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1; 45 uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1;
46 46
47 return platform_device_register(&uart8250_device); 47 return platform_device_register(&uart8250_device);
48} 48}
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 502f47c01797..44c065a6b5e7 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -99,6 +99,7 @@
99 #size-cells = <0>; 99 #size-cells = <0>;
100 interrupt-parent = < &ipic >; 100 interrupt-parent = < &ipic >;
101 interrupts = <26 8>; 101 interrupts = <26 8>;
102 dr_mode = "peripheral";
102 phy_type = "ulpi"; 103 phy_type = "ulpi";
103 }; 104 };
104 105
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e477c9d0498b..8a1b001d0b11 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -605,6 +605,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
605 regs->ccr = 0; 605 regs->ccr = 0;
606 regs->gpr[1] = sp; 606 regs->gpr[1] = sp;
607 607
608 /*
609 * We have just cleared all the nonvolatile GPRs, so make
610 * FULL_REGS(regs) return true. This is necessary to allow
611 * ptrace to examine the thread immediately after exec.
612 */
613 regs->trap &= ~1UL;
614
608#ifdef CONFIG_PPC32 615#ifdef CONFIG_PPC32
609 regs->mq = 0; 616 regs->mq = 0;
610 regs->nip = start; 617 regs->nip = start;
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index e7fdf013cd39..eafe7605cdac 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void)
76 if (port0_is_dr) 76 if (port0_is_dr)
77 printk(KERN_WARNING 77 printk(KERN_WARNING
78 "834x USB port0 can't be used by both DR and MPH!\n"); 78 "834x USB port0 can't be used by both DR and MPH!\n");
79 sicrl |= MPC834X_SICRL_USB0; 79 sicrl &= ~MPC834X_SICRL_USB0;
80 } 80 }
81 prop = of_get_property(np, "port1", NULL); 81 prop = of_get_property(np, "port1", NULL);
82 if (prop) { 82 if (prop) {
83 if (port1_is_dr) 83 if (port1_is_dr)
84 printk(KERN_WARNING 84 printk(KERN_WARNING
85 "834x USB port1 can't be used by both DR and MPH!\n"); 85 "834x USB port1 can't be used by both DR and MPH!\n");
86 sicrl |= MPC834X_SICRL_USB1; 86 sicrl &= ~MPC834X_SICRL_USB1;
87 } 87 }
88 of_node_put(np); 88 of_node_put(np);
89 } 89 }
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 4100ddc52f02..7de4e919687b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2177,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = {
2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2177 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2178 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2179 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2180 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2180 { "signal1", &spufs_signal1_fops, 0666, },
2181 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2181 { "signal2", &spufs_signal2_fops, 0666, },
2182 { "signal1_type", &spufs_signal1_type, 0666, }, 2182 { "signal1_type", &spufs_signal1_type, 0666, },
2183 { "signal2_type", &spufs_signal2_type, 0666, }, 2183 { "signal2_type", &spufs_signal2_type, 0666, },
2184 { "cntl", &spufs_cntl_fops, 0666, }, 2184 { "cntl", &spufs_cntl_fops, 0666, },
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
index 4f67b89ba1d0..dd5417aec1b4 100644
--- a/arch/powerpc/sysdev/commproc.c
+++ b/arch/powerpc/sysdev/commproc.c
@@ -395,4 +395,4 @@ uint cpm_dpram_phys(u8* addr)
395{ 395{
396 return (dpram_pbase + (uint)(addr - dpram_vbase)); 396 return (dpram_pbase + (uint)(addr - dpram_vbase));
397} 397}
398EXPORT_SYMBOL(cpm_dpram_addr); 398EXPORT_SYMBOL(cpm_dpram_phys);
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 7088428e1fe2..9da880be4dc0 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump);
459 459
460void *cpm_dpram_addr(unsigned long offset) 460void *cpm_dpram_addr(unsigned long offset)
461{ 461{
462 return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; 462 return (void *)(dpram_vbase + offset);
463} 463}
464EXPORT_SYMBOL(cpm_dpram_addr); 464EXPORT_SYMBOL(cpm_dpram_addr);
465 465
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
index ba9bd403d443..f1fb888c2d29 100644
--- a/drivers/acpi/sleep/Makefile
+++ b/drivers/acpi/sleep/Makefile
@@ -1,5 +1,5 @@
1obj-y := wakeup.o 1obj-y := wakeup.o
2obj-$(CONFIG_ACPI_SLEEP) += main.o 2obj-y += main.o
3obj-$(CONFIG_ACPI_SLEEP) += proc.o 3obj-$(CONFIG_ACPI_SLEEP) += proc.o
4 4
5EXTRA_CFLAGS += $(ACPI_CFLAGS) 5EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 85633c585aab..2cbb9aabd00e 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -24,7 +24,30 @@
24 24
25u8 sleep_states[ACPI_S_STATE_COUNT]; 25u8 sleep_states[ACPI_S_STATE_COUNT];
26 26
27#ifdef CONFIG_PM_SLEEP
27static u32 acpi_target_sleep_state = ACPI_STATE_S0; 28static u32 acpi_target_sleep_state = ACPI_STATE_S0;
29#endif
30
31int acpi_sleep_prepare(u32 acpi_state)
32{
33#ifdef CONFIG_ACPI_SLEEP
34 /* do we have a wakeup address for S2 and S3? */
35 if (acpi_state == ACPI_STATE_S3) {
36 if (!acpi_wakeup_address) {
37 return -EFAULT;
38 }
39 acpi_set_firmware_waking_vector((acpi_physical_address)
40 virt_to_phys((void *)
41 acpi_wakeup_address));
42
43 }
44 ACPI_FLUSH_CPU_CACHE();
45 acpi_enable_wakeup_device_prep(acpi_state);
46#endif
47 acpi_gpe_sleep_prepare(acpi_state);
48 acpi_enter_sleep_state_prep(acpi_state);
49 return 0;
50}
28 51
29#ifdef CONFIG_SUSPEND 52#ifdef CONFIG_SUSPEND
30static struct pm_ops acpi_pm_ops; 53static struct pm_ops acpi_pm_ops;
@@ -60,27 +83,6 @@ static int acpi_pm_set_target(suspend_state_t pm_state)
60 return error; 83 return error;
61} 84}
62 85
63int acpi_sleep_prepare(u32 acpi_state)
64{
65#ifdef CONFIG_ACPI_SLEEP
66 /* do we have a wakeup address for S2 and S3? */
67 if (acpi_state == ACPI_STATE_S3) {
68 if (!acpi_wakeup_address) {
69 return -EFAULT;
70 }
71 acpi_set_firmware_waking_vector((acpi_physical_address)
72 virt_to_phys((void *)
73 acpi_wakeup_address));
74
75 }
76 ACPI_FLUSH_CPU_CACHE();
77 acpi_enable_wakeup_device_prep(acpi_state);
78#endif
79 acpi_gpe_sleep_prepare(acpi_state);
80 acpi_enter_sleep_state_prep(acpi_state);
81 return 0;
82}
83
84/** 86/**
85 * acpi_pm_prepare - Do preliminary suspend work. 87 * acpi_pm_prepare - Do preliminary suspend work.
86 * @pm_state: ignored 88 * @pm_state: ignored
@@ -299,6 +301,7 @@ int acpi_suspend(u32 acpi_state)
299 return -EINVAL; 301 return -EINVAL;
300} 302}
301 303
304#ifdef CONFIG_PM_SLEEP
302/** 305/**
303 * acpi_pm_device_sleep_state - return preferred power state of ACPI device 306 * acpi_pm_device_sleep_state - return preferred power state of ACPI device
304 * in the system sleep state given by %acpi_target_sleep_state 307 * in the system sleep state given by %acpi_target_sleep_state
@@ -373,6 +376,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
373 *d_min_p = d_min; 376 *d_min_p = d_min;
374 return d_max; 377 return d_max;
375} 378}
379#endif
376 380
377static void acpi_power_off_prepare(void) 381static void acpi_power_off_prepare(void)
378{ 382{
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 2bd7645f1a88..cce2834b2b60 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
375 int drive_pci = sis_old_port_base(adev); 375 int drive_pci = sis_old_port_base(adev);
376 u16 timing; 376 u16 timing;
377 377
378 /* MWDMA 0-2 and UDMA 0-5 */
378 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 }; 379 const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
379 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000}; 380 const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
380 381
381 pci_read_config_word(pdev, drive_pci, &timing); 382 pci_read_config_word(pdev, drive_pci, &timing);
382 383
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ef83e6b1e314..233e88693395 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
888 u32 slot_stat, qc_active; 888 u32 slot_stat, qc_active;
889 int rc; 889 int rc;
890 890
891 /* If PCIX_IRQ_WOC, there's an inherent race window between
892 * clearing IRQ pending status and reading PORT_SLOT_STAT
893 * which may cause spurious interrupts afterwards. This is
894 * unavoidable and much better than losing interrupts which
895 * happens if IRQ pending is cleared after reading
896 * PORT_SLOT_STAT.
897 */
898 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
899 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
900
891 slot_stat = readl(port + PORT_SLOT_STAT); 901 slot_stat = readl(port + PORT_SLOT_STAT);
892 902
893 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { 903 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap)
895 return; 905 return;
896 } 906 }
897 907
898 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
899 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
900
901 qc_active = slot_stat & ~HOST_SSTAT_ATTN; 908 qc_active = slot_stat & ~HOST_SSTAT_ATTN;
902 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc); 909 rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
903 if (rc > 0) 910 if (rc > 0)
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
910 return; 917 return;
911 } 918 }
912 919
913 if (ata_ratelimit()) 920 /* spurious interrupts are expected if PCIX_IRQ_WOC */
921 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
914 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 922 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
915 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n", 923 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
916 slot_stat, ap->active_tag, ap->sactive); 924 slot_stat, ap->active_tag, ap->sactive);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6de33d7a29ba..67c92582d6ef 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -284,6 +284,7 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
284 284
285 /* let the kset specific function add its keys */ 285 /* let the kset specific function add its keys */
286 pos = data; 286 pos = data;
287 memset(envp, 0, sizeof(envp));
287 retval = kset->uevent_ops->uevent(kset, &dev->kobj, 288 retval = kset->uevent_ops->uevent(kset, &dev->kobj,
288 envp, ARRAY_SIZE(envp), 289 envp, ARRAY_SIZE(envp),
289 pos, PAGE_SIZE); 290 pos, PAGE_SIZE);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 67ee3d4b2878..79245714f0a7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
1032 check_disk_change(ip->i_bdev); 1032 check_disk_change(ip->i_bdev);
1033 return 0; 1033 return 0;
1034err_release: 1034err_release:
1035 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
1036 cdi->ops->lock_door(cdi, 0);
1037 cdinfo(CD_OPEN, "door unlocked.\n");
1038 }
1035 cdi->ops->release(cdi); 1039 cdi->ops->release(cdi);
1036err: 1040err:
1037 cdi->use_count--; 1041 cdi->use_count--;
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 737088bd0780..28b98733beb8 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -210,6 +210,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
210#define I915REG_INT_MASK_R 0x020a8 210#define I915REG_INT_MASK_R 0x020a8
211#define I915REG_INT_ENABLE_R 0x020a0 211#define I915REG_INT_ENABLE_R 0x020a0
212 212
213#define I915REG_PIPEASTAT 0x70024
214#define I915REG_PIPEBSTAT 0x71024
215
216#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
217#define I915_VBLANK_CLEAR (1UL<<1)
218
213#define SRX_INDEX 0x3c4 219#define SRX_INDEX 0x3c4
214#define SRX_DATA 0x3c5 220#define SRX_DATA 0x3c5
215#define SR01 1 221#define SR01 1
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 4b4b2ce89863..bb8e9e9c8201 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -214,6 +214,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
214 struct drm_device *dev = (struct drm_device *) arg; 214 struct drm_device *dev = (struct drm_device *) arg;
215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216 u16 temp; 216 u16 temp;
217 u32 pipea_stats, pipeb_stats;
218
219 pipea_stats = I915_READ(I915REG_PIPEASTAT);
220 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
217 221
218 temp = I915_READ16(I915REG_INT_IDENTITY_R); 222 temp = I915_READ16(I915REG_INT_IDENTITY_R);
219 223
@@ -225,6 +229,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
225 return IRQ_NONE; 229 return IRQ_NONE;
226 230
227 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 231 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
232 (void) I915_READ16(I915REG_INT_IDENTITY_R);
233 DRM_READMEMORYBARRIER();
228 234
229 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 235 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
230 236
@@ -252,6 +258,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
252 258
253 if (dev_priv->swaps_pending > 0) 259 if (dev_priv->swaps_pending > 0)
254 drm_locked_tasklet(dev, i915_vblank_tasklet); 260 drm_locked_tasklet(dev, i915_vblank_tasklet);
261 I915_WRITE(I915REG_PIPEASTAT,
262 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
263 I915_VBLANK_CLEAR);
264 I915_WRITE(I915REG_PIPEBSTAT,
265 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
266 I915_VBLANK_CLEAR);
255 } 267 }
256 268
257 return IRQ_HANDLED; 269 return IRQ_HANDLED;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 7ecffc9c738f..4c16778e3f84 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -62,6 +62,8 @@
62 62
63static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 63static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
64 64
65/* This clocksource driver currently only works on ia64 */
66#ifdef CONFIG_IA64
65static void __iomem *hpet_mctr; 67static void __iomem *hpet_mctr;
66 68
67static cycle_t read_hpet(void) 69static cycle_t read_hpet(void)
@@ -79,6 +81,7 @@ static struct clocksource clocksource_hpet = {
79 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 81 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
80}; 82};
81static struct clocksource *hpet_clocksource; 83static struct clocksource *hpet_clocksource;
84#endif
82 85
83/* A lock for concurrent access by app and isr hpet activity. */ 86/* A lock for concurrent access by app and isr hpet activity. */
84static DEFINE_SPINLOCK(hpet_lock); 87static DEFINE_SPINLOCK(hpet_lock);
@@ -943,14 +946,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
943 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 946 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
944 __FUNCTION__, hdp->hd_phys_address); 947 __FUNCTION__, hdp->hd_phys_address);
945 iounmap(hdp->hd_address); 948 iounmap(hdp->hd_address);
946 return -EBUSY; 949 return AE_ALREADY_EXISTS;
947 } 950 }
948 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 951 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
949 struct acpi_resource_fixed_memory32 *fixmem32; 952 struct acpi_resource_fixed_memory32 *fixmem32;
950 953
951 fixmem32 = &res->data.fixed_memory32; 954 fixmem32 = &res->data.fixed_memory32;
952 if (!fixmem32) 955 if (!fixmem32)
953 return -EINVAL; 956 return AE_NO_MEMORY;
954 957
955 hdp->hd_phys_address = fixmem32->address; 958 hdp->hd_phys_address = fixmem32->address;
956 hdp->hd_address = ioremap(fixmem32->address, 959 hdp->hd_address = ioremap(fixmem32->address,
@@ -960,7 +963,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
960 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 963 printk(KERN_DEBUG "%s: 0x%lx is busy\n",
961 __FUNCTION__, hdp->hd_phys_address); 964 __FUNCTION__, hdp->hd_phys_address);
962 iounmap(hdp->hd_address); 965 iounmap(hdp->hd_address);
963 return -EBUSY; 966 return AE_ALREADY_EXISTS;
964 } 967 }
965 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 968 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
966 struct acpi_resource_extended_irq *irqp; 969 struct acpi_resource_extended_irq *irqp;
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 049a46cc9f87..04ac155d3a07 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
155 * mspec_close 155 * mspec_close
156 * 156 *
157 * Called when unmapping a device mapping. Frees all mspec pages 157 * Called when unmapping a device mapping. Frees all mspec pages
158 * belonging to the vma. 158 * belonging to all the vma's sharing this vma_data structure.
159 */ 159 */
160static void 160static void
161mspec_close(struct vm_area_struct *vma) 161mspec_close(struct vm_area_struct *vma)
162{ 162{
163 struct vma_data *vdata; 163 struct vma_data *vdata;
164 int index, last_index, result; 164 int index, last_index;
165 unsigned long my_page; 165 unsigned long my_page;
166 166
167 vdata = vma->vm_private_data; 167 vdata = vma->vm_private_data;
168 168
169 BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); 169 if (!atomic_dec_and_test(&vdata->refcnt))
170 return;
170 171
171 spin_lock(&vdata->lock); 172 last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
172 index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; 173 for (index = 0; index < last_index; index++) {
173 last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
174 for (; index < last_index; index++) {
175 if (vdata->maddr[index] == 0) 174 if (vdata->maddr[index] == 0)
176 continue; 175 continue;
177 /* 176 /*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
180 */ 179 */
181 my_page = vdata->maddr[index]; 180 my_page = vdata->maddr[index];
182 vdata->maddr[index] = 0; 181 vdata->maddr[index] = 0;
183 spin_unlock(&vdata->lock); 182 if (!mspec_zero_block(my_page, PAGE_SIZE))
184 result = mspec_zero_block(my_page, PAGE_SIZE);
185 if (!result)
186 uncached_free_page(my_page); 183 uncached_free_page(my_page);
187 else 184 else
188 printk(KERN_WARNING "mspec_close(): " 185 printk(KERN_WARNING "mspec_close(): "
189 "failed to zero page %i\n", 186 "failed to zero page %ld\n", my_page);
190 result);
191 spin_lock(&vdata->lock);
192 } 187 }
193 spin_unlock(&vdata->lock);
194
195 if (!atomic_dec_and_test(&vdata->refcnt))
196 return;
197 188
198 if (vdata->flags & VMD_VMALLOCED) 189 if (vdata->flags & VMD_VMALLOCED)
199 vfree(vdata); 190 vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
201 kfree(vdata); 192 kfree(vdata);
202} 193}
203 194
204
205/* 195/*
206 * mspec_nopfn 196 * mspec_nopfn
207 * 197 *
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index c6f6f4209739..c799b7f7bbb3 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
770 /* 770 /*
771 * Switching-from response 771 * Switching-from response
772 */ 772 */
773 acquire_console_sem();
773 if (vc->vt_newvt >= 0) { 774 if (vc->vt_newvt >= 0) {
774 if (arg == 0) 775 if (arg == 0)
775 /* 776 /*
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
784 * complete the switch. 785 * complete the switch.
785 */ 786 */
786 int newvt; 787 int newvt;
787 acquire_console_sem();
788 newvt = vc->vt_newvt; 788 newvt = vc->vt_newvt;
789 vc->vt_newvt = -1; 789 vc->vt_newvt = -1;
790 i = vc_allocate(newvt); 790 i = vc_allocate(newvt);
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
798 * other console switches.. 798 * other console switches..
799 */ 799 */
800 complete_change_console(vc_cons[newvt].d); 800 complete_change_console(vc_cons[newvt].d);
801 release_console_sem();
802 } 801 }
803 } 802 }
804 803
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
810 /* 809 /*
811 * If it's just an ACK, ignore it 810 * If it's just an ACK, ignore it
812 */ 811 */
813 if (arg != VT_ACKACQ) 812 if (arg != VT_ACKACQ) {
813 release_console_sem();
814 return -EINVAL; 814 return -EINVAL;
815 }
815 } 816 }
817 release_console_sem();
816 818
817 return 0; 819 return 0;
818 820
@@ -1208,15 +1210,18 @@ void change_console(struct vc_data *new_vc)
1208 /* 1210 /*
1209 * Send the signal as privileged - kill_pid() will 1211 * Send the signal as privileged - kill_pid() will
1210 * tell us if the process has gone or something else 1212 * tell us if the process has gone or something else
1211 * is awry 1213 * is awry.
1214 *
1215 * We need to set vt_newvt *before* sending the signal or we
1216 * have a race.
1212 */ 1217 */
1218 vc->vt_newvt = new_vc->vc_num;
1213 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { 1219 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
1214 /* 1220 /*
1215 * It worked. Mark the vt to switch to and 1221 * It worked. Mark the vt to switch to and
1216 * return. The process needs to send us a 1222 * return. The process needs to send us a
1217 * VT_RELDISP ioctl to complete the switch. 1223 * VT_RELDISP ioctl to complete the switch.
1218 */ 1224 */
1219 vc->vt_newvt = new_vc->vc_num;
1220 return; 1225 return;
1221 } 1226 }
1222 1227
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index e2abe18e575d..7c662ee594a3 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF
277 277
278config JOYSTICK_XPAD_LEDS 278config JOYSTICK_XPAD_LEDS
279 bool "LED Support for Xbox360 controller 'BigX' LED" 279 bool "LED Support for Xbox360 controller 'BigX' LED"
280 depends on LEDS_CLASS && JOYSTICK_XPAD 280 depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD)
281 ---help--- 281 ---help---
282 This option enables support for the LED which surrounds the Big X on 282 This option enables support for the LED which surrounds the Big X on
283 XBox 360 controller. 283 XBox 360 controller.
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 2bea1b2c631c..a1804bfdbb8c 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
328{ 328{
329 int x, y, x_z, y_z, x_f, y_f; 329 int x, y, x_z, y_z, x_f, y_f;
330 int retval, i, j; 330 int retval, i, j;
331 int key;
331 struct atp *dev = urb->context; 332 struct atp *dev = urb->context;
332 333
333 switch (urb->status) { 334 switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
468 ATP_XFACT, &x_z, &x_f); 469 ATP_XFACT, &x_z, &x_f);
469 y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS, 470 y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
470 ATP_YFACT, &y_z, &y_f); 471 ATP_YFACT, &y_z, &y_f);
472 key = dev->data[dev->datalen - 1] & 1;
471 473
472 if (x && y) { 474 if (x && y) {
473 if (dev->x_old != -1) { 475 if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
505 the first touch unless reinitialised. Do so if it's been 507 the first touch unless reinitialised. Do so if it's been
506 idle for a while in order to avoid waking the kernel up 508 idle for a while in order to avoid waking the kernel up
507 several hundred times a second */ 509 several hundred times a second */
508 if (atp_is_geyser_3(dev)) { 510 if (!key && atp_is_geyser_3(dev)) {
509 dev->idlecount++; 511 dev->idlecount++;
510 if (dev->idlecount == 10) { 512 if (dev->idlecount == 10) {
511 dev->valid = 0; 513 dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
514 } 516 }
515 } 517 }
516 518
517 input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1); 519 input_report_key(dev->input, BTN_LEFT, key);
518 input_sync(dev->input); 520 input_sync(dev->input);
519 521
520exit: 522exit:
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
index f182c6a36209..1ddcd5cd20f6 100644
--- a/drivers/lguest/lguest_asm.S
+++ b/drivers/lguest/lguest_asm.S
@@ -22,8 +22,9 @@
22 jmp lguest_init 22 jmp lguest_init
23 23
24/*G:055 We create a macro which puts the assembler code between lgstart_ and 24/*G:055 We create a macro which puts the assembler code between lgstart_ and
25 * lgend_ markers. These templates end up in the .init.text section, so they 25 * lgend_ markers. These templates are put in the .text section: they can't be
26 * are discarded after boot. */ 26 * discarded after boot as we may need to patch modules, too. */
27.text
27#define LGUEST_PATCH(name, insns...) \ 28#define LGUEST_PATCH(name, insns...) \
28 lgstart_##name: insns; lgend_##name:; \ 29 lgstart_##name: insns; lgend_##name:; \
29 .globl lgstart_##name; .globl lgend_##name 30 .globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
34LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) 35LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
35/*:*/ 36/*:*/
36 37
37.text
38/* These demark the EIP range where host should never deliver interrupts. */ 38/* These demark the EIP range where host should never deliver interrupts. */
39.global lguest_noirq_start 39.global lguest_noirq_start
40.global lguest_noirq_end 40.global lguest_noirq_end
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 0285c4a830eb..66ea3cbc369c 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
754 ivtv_yuv_close(itv); 754 ivtv_yuv_close(itv);
755 } 755 }
756 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) 756 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
757 itv->output_mode = OUT_NONE; 757 itv->output_mode = OUT_NONE;
758 else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
759 itv->output_mode = OUT_NONE;
758 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) 760 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
759 itv->output_mode = OUT_NONE; 761 itv->output_mode = OUT_NONE;
760 762
761 itv->speed = 0; 763 itv->speed = 0;
762 clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); 764 clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index e3371f972240..0cb006f2943d 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1387,7 +1387,6 @@ static const struct file_operations usbvision_fops = {
1387 .ioctl = video_ioctl2, 1387 .ioctl = video_ioctl2,
1388 .llseek = no_llseek, 1388 .llseek = no_llseek,
1389/* .poll = video_poll, */ 1389/* .poll = video_poll, */
1390 .mmap = usbvision_v4l2_mmap,
1391 .compat_ioctl = v4l_compat_ioctl32, 1390 .compat_ioctl = v4l_compat_ioctl32,
1392}; 1391};
1393static struct video_device usbvision_video_template = { 1392static struct video_device usbvision_video_template = {
@@ -1413,7 +1412,7 @@ static struct video_device usbvision_video_template = {
1413 .vidioc_s_input = vidioc_s_input, 1412 .vidioc_s_input = vidioc_s_input,
1414 .vidioc_queryctrl = vidioc_queryctrl, 1413 .vidioc_queryctrl = vidioc_queryctrl,
1415 .vidioc_g_audio = vidioc_g_audio, 1414 .vidioc_g_audio = vidioc_g_audio,
1416 .vidioc_g_audio = vidioc_s_audio, 1415 .vidioc_s_audio = vidioc_s_audio,
1417 .vidioc_g_ctrl = vidioc_g_ctrl, 1416 .vidioc_g_ctrl = vidioc_g_ctrl,
1418 .vidioc_s_ctrl = vidioc_s_ctrl, 1417 .vidioc_s_ctrl = vidioc_s_ctrl,
1419 .vidioc_streamon = vidioc_streamon, 1418 .vidioc_streamon = vidioc_streamon,
@@ -1459,7 +1458,7 @@ static struct video_device usbvision_radio_template=
1459 .vidioc_s_input = vidioc_s_input, 1458 .vidioc_s_input = vidioc_s_input,
1460 .vidioc_queryctrl = vidioc_queryctrl, 1459 .vidioc_queryctrl = vidioc_queryctrl,
1461 .vidioc_g_audio = vidioc_g_audio, 1460 .vidioc_g_audio = vidioc_g_audio,
1462 .vidioc_g_audio = vidioc_s_audio, 1461 .vidioc_s_audio = vidioc_s_audio,
1463 .vidioc_g_ctrl = vidioc_g_ctrl, 1462 .vidioc_g_ctrl = vidioc_g_ctrl,
1464 .vidioc_s_ctrl = vidioc_s_ctrl, 1463 .vidioc_s_ctrl = vidioc_s_ctrl,
1465 .vidioc_g_tuner = vidioc_g_tuner, 1464 .vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 4c3785c9d4b8..9ecc3adcf6c1 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1726,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
1726 case E1000_DEV_ID_82571EB_QUAD_COPPER: 1726 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1727 case E1000_DEV_ID_82571EB_QUAD_FIBER: 1727 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1728 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 1728 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1729 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1729 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1730 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1730 /* quad port adapters only support WoL on port A */ 1731 /* quad port adapters only support WoL on port A */
1731 if (!adapter->quad_port_a) { 1732 if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index ba120f7fb0be..8604adbe351c 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -387,6 +387,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
387 case E1000_DEV_ID_82571EB_SERDES_DUAL: 387 case E1000_DEV_ID_82571EB_SERDES_DUAL:
388 case E1000_DEV_ID_82571EB_SERDES_QUAD: 388 case E1000_DEV_ID_82571EB_SERDES_QUAD:
389 case E1000_DEV_ID_82571EB_QUAD_COPPER: 389 case E1000_DEV_ID_82571EB_QUAD_COPPER:
390 case E1000_DEV_ID_82571PT_QUAD_COPPER:
390 case E1000_DEV_ID_82571EB_QUAD_FIBER: 391 case E1000_DEV_ID_82571EB_QUAD_FIBER:
391 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 392 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
392 hw->mac_type = e1000_82571; 393 hw->mac_type = e1000_82571;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index fe8714655c90..07f0ea73676e 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -475,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
475#define E1000_DEV_ID_82571EB_FIBER 0x105F 475#define E1000_DEV_ID_82571EB_FIBER 0x105F
476#define E1000_DEV_ID_82571EB_SERDES 0x1060 476#define E1000_DEV_ID_82571EB_SERDES 0x1060
477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 477#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
478#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
478#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 479#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
479#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC 480#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
480#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 481#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4a225950fb43..e7c8951f47fa 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -108,6 +108,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
108 INTEL_E1000_ETHERNET_DEVICE(0x10BC), 108 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C4), 109 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
110 INTEL_E1000_ETHERNET_DEVICE(0x10C5), 110 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
111 INTEL_E1000_ETHERNET_DEVICE(0x10D5),
111 INTEL_E1000_ETHERNET_DEVICE(0x10D9), 112 INTEL_E1000_ETHERNET_DEVICE(0x10D9),
112 INTEL_E1000_ETHERNET_DEVICE(0x10DA), 113 INTEL_E1000_ETHERNET_DEVICE(0x10DA),
113 /* required last entry */ 114 /* required last entry */
@@ -1101,6 +1102,7 @@ e1000_probe(struct pci_dev *pdev,
1101 case E1000_DEV_ID_82571EB_QUAD_COPPER: 1102 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1102 case E1000_DEV_ID_82571EB_QUAD_FIBER: 1103 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1103 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: 1104 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1105 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1104 /* if quad port adapter, disable WoL on all but port A */ 1106 /* if quad port adapter, disable WoL on all but port A */
1105 if (global_quad_port_a != 0) 1107 if (global_quad_port_a != 0)
1106 adapter->eeprom_wol = 0; 1108 adapter->eeprom_wol = 0;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 6a117e9968cb..34288fe038c3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
534 } 534 }
535 535
536 /* PHY status changed */ 536 /* PHY status changed */
537 if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { 537 if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
538 struct ethtool_cmd cmd; 538 struct ethtool_cmd cmd;
539 539
540 if (mii_link_ok(&mp->mii)) { 540 if (mii_link_ok(&mp->mii)) {
@@ -2768,8 +2768,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
2768 .get_stats_count = mv643xx_get_stats_count, 2768 .get_stats_count = mv643xx_get_stats_count,
2769 .get_ethtool_stats = mv643xx_get_ethtool_stats, 2769 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2770 .get_strings = mv643xx_get_strings, 2770 .get_strings = mv643xx_get_strings,
2771 .get_stats_count = mv643xx_get_stats_count,
2772 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2773 .nway_reset = mv643xx_eth_nway_restart, 2771 .nway_reset = mv643xx_eth_nway_restart,
2774}; 2772};
2775 2773
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 82f8c0cbfb64..565b96696aca 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -64,7 +64,9 @@
64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) 64#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) 65#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
66#define ETH_INT_CAUSE_PHY 0x00010000 66#define ETH_INT_CAUSE_PHY 0x00010000
67#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) 67#define ETH_INT_CAUSE_STATE 0x00100000
68#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
69 ETH_INT_CAUSE_STATE)
68 70
69#define ETH_INT_MASK_ALL 0x00000000 71#define ETH_INT_MASK_ALL 0x00000000
70#define ETH_INT_MASK_ALL_EXT 0x00000000 72#define ETH_INT_MASK_ALL_EXT 0x00000000
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index c06cae3f0b56..503f2685fb73 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -116,7 +116,7 @@ struct el3_private {
116 spinlock_t lock; 116 spinlock_t lock;
117}; 117};
118 118
119static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" }; 119static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
120 120
121/*====================================================================*/ 121/*====================================================================*/
122 122
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index f79cf87a2bff..c0b6d19d1457 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -136,7 +136,7 @@ struct ppp_mppe_state {
136 * Key Derivation, from RFC 3078, RFC 3079. 136 * Key Derivation, from RFC 3078, RFC 3079.
137 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. 137 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
138 */ 138 */
139static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) 139static void get_new_key_from_sha(struct ppp_mppe_state * state)
140{ 140{
141 struct hash_desc desc; 141 struct hash_desc desc;
142 struct scatterlist sg[4]; 142 struct scatterlist sg[4];
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
153 desc.flags = 0; 153 desc.flags = 0;
154 154
155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); 155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
156
157 memcpy(InterimKey, state->sha1_digest, state->keylen);
158} 156}
159 157
160/* 158/*
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
163 */ 161 */
164static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) 162static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
165{ 163{
166 unsigned char InterimKey[MPPE_MAX_KEY_LEN];
167 struct scatterlist sg_in[1], sg_out[1]; 164 struct scatterlist sg_in[1], sg_out[1];
168 struct blkcipher_desc desc = { .tfm = state->arc4 }; 165 struct blkcipher_desc desc = { .tfm = state->arc4 };
169 166
170 get_new_key_from_sha(state, InterimKey); 167 get_new_key_from_sha(state);
171 if (!initial_key) { 168 if (!initial_key) {
172 crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); 169 crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
173 setup_sg(sg_in, InterimKey, state->keylen); 170 state->keylen);
171 setup_sg(sg_in, state->sha1_digest, state->keylen);
174 setup_sg(sg_out, state->session_key, state->keylen); 172 setup_sg(sg_out, state->session_key, state->keylen);
175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 173 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
176 state->keylen) != 0) { 174 state->keylen) != 0) {
177 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 175 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
178 } 176 }
179 } else { 177 } else {
180 memcpy(state->session_key, InterimKey, state->keylen); 178 memcpy(state->session_key, state->sha1_digest, state->keylen);
181 } 179 }
182 if (state->keylen == 8) { 180 if (state->keylen == 8) {
183 /* See RFC 3078 */ 181 /* See RFC 3078 */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b85ab4a8f2a3..c921ec32c232 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1228 return; 1228 return;
1229 } 1229 }
1230 1230
1231 /* phy config for RTL8169s mac_version C chip */ 1231 if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1232 (tp->mac_version != RTL_GIGA_MAC_VER_03))
1233 return;
1234
1232 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 1235 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
1233 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 1236 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
1234 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 1237 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
2567 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 2570 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
2568 netif_wake_queue(dev); 2571 netif_wake_queue(dev);
2569 } 2572 }
2573 /*
2574 * 8168 hack: TxPoll requests are lost when the Tx packets are
2575 * too close. Let's kick an extra TxPoll request when a burst
2576 * of start_xmit activity is detected (if it is not detected,
2577 * it is slow enough). -- FR
2578 */
2579 smp_rmb();
2580 if (tp->cur_tx != dirty_tx)
2581 RTL_W8(TxPoll, NPQ);
2570 } 2582 }
2571} 2583}
2572 2584
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index eaffe551d1d8..162489b9f599 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -338,6 +338,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
338 if (!(hw->flags & SKY2_HW_GIGABIT)) { 338 if (!(hw->flags & SKY2_HW_GIGABIT)) {
339 /* enable automatic crossover */ 339 /* enable automatic crossover */
340 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; 340 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
341
342 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
343 hw->chip_rev == CHIP_REV_YU_FE2_A0) {
344 u16 spec;
345
346 /* Enable Class A driver for FE+ A0 */
347 spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
348 spec |= PHY_M_FESC_SEL_CL_A;
349 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
350 }
341 } else { 351 } else {
342 /* disable energy detect */ 352 /* disable energy detect */
343 ctrl &= ~PHY_M_PC_EN_DET_MSK; 353 ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -816,7 +826,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
816 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 826 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
817 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 827 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
818 828
819 if (!(hw->flags & SKY2_HW_RAMBUFFER)) { 829 /* On chips without ram buffer, pause is controled by MAC level */
830 if (sky2_read8(hw, B2_E_0) == 0) {
820 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 831 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
821 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 832 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
822 833
@@ -899,6 +910,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
899 return le; 910 return le;
900} 911}
901 912
913static void tx_init(struct sky2_port *sky2)
914{
915 struct sky2_tx_le *le;
916
917 sky2->tx_prod = sky2->tx_cons = 0;
918 sky2->tx_tcpsum = 0;
919 sky2->tx_last_mss = 0;
920
921 le = get_tx_le(sky2);
922 le->addr = 0;
923 le->opcode = OP_ADDR64 | HW_OWNER;
924 sky2->tx_addr64 = 0;
925}
926
902static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, 927static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
903 struct sky2_tx_le *le) 928 struct sky2_tx_le *le)
904{ 929{
@@ -1271,7 +1296,7 @@ static int sky2_up(struct net_device *dev)
1271 struct sky2_port *sky2 = netdev_priv(dev); 1296 struct sky2_port *sky2 = netdev_priv(dev);
1272 struct sky2_hw *hw = sky2->hw; 1297 struct sky2_hw *hw = sky2->hw;
1273 unsigned port = sky2->port; 1298 unsigned port = sky2->port;
1274 u32 imask; 1299 u32 imask, ramsize;
1275 int cap, err = -ENOMEM; 1300 int cap, err = -ENOMEM;
1276 struct net_device *otherdev = hw->dev[sky2->port^1]; 1301 struct net_device *otherdev = hw->dev[sky2->port^1];
1277 1302
@@ -1309,7 +1334,8 @@ static int sky2_up(struct net_device *dev)
1309 GFP_KERNEL); 1334 GFP_KERNEL);
1310 if (!sky2->tx_ring) 1335 if (!sky2->tx_ring)
1311 goto err_out; 1336 goto err_out;
1312 sky2->tx_prod = sky2->tx_cons = 0; 1337
1338 tx_init(sky2);
1313 1339
1314 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, 1340 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1315 &sky2->rx_le_map); 1341 &sky2->rx_le_map);
@@ -1326,13 +1352,12 @@ static int sky2_up(struct net_device *dev)
1326 1352
1327 sky2_mac_init(hw, port); 1353 sky2_mac_init(hw, port);
1328 1354
1329 if (hw->flags & SKY2_HW_RAMBUFFER) { 1355 /* Register is number of 4K blocks on internal RAM buffer. */
1330 /* Register is number of 4K blocks on internal RAM buffer. */ 1356 ramsize = sky2_read8(hw, B2_E_0) * 4;
1331 u32 ramsize = sky2_read8(hw, B2_E_0) * 4; 1357 if (ramsize > 0) {
1332 u32 rxspace; 1358 u32 rxspace;
1333 1359
1334 printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1360 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1335
1336 if (ramsize < 16) 1361 if (ramsize < 16)
1337 rxspace = ramsize / 2; 1362 rxspace = ramsize / 2;
1338 else 1363 else
@@ -1995,7 +2020,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1995 2020
1996 synchronize_irq(hw->pdev->irq); 2021 synchronize_irq(hw->pdev->irq);
1997 2022
1998 if (!(hw->flags & SKY2_HW_RAMBUFFER)) 2023 if (sky2_read8(hw, B2_E_0) == 0)
1999 sky2_set_tx_stfwd(hw, port); 2024 sky2_set_tx_stfwd(hw, port);
2000 2025
2001 ctl = gma_read16(hw, port, GM_GP_CTRL); 2026 ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2138,6 +2163,18 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2138 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2163 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2139 prefetch(sky2->rx_ring + sky2->rx_next); 2164 prefetch(sky2->rx_ring + sky2->rx_next);
2140 2165
2166 if (length < ETH_ZLEN || length > sky2->rx_data_size)
2167 goto len_error;
2168
2169 /* This chip has hardware problems that generates bogus status.
2170 * So do only marginal checking and expect higher level protocols
2171 * to handle crap frames.
2172 */
2173 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2174 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2175 length != count)
2176 goto okay;
2177
2141 if (status & GMR_FS_ANY_ERR) 2178 if (status & GMR_FS_ANY_ERR)
2142 goto error; 2179 goto error;
2143 2180
@@ -2146,8 +2183,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2146 2183
2147 /* if length reported by DMA does not match PHY, packet was truncated */ 2184 /* if length reported by DMA does not match PHY, packet was truncated */
2148 if (length != count) 2185 if (length != count)
2149 goto len_mismatch; 2186 goto len_error;
2150 2187
2188okay:
2151 if (length < copybreak) 2189 if (length < copybreak)
2152 skb = receive_copy(sky2, re, length); 2190 skb = receive_copy(sky2, re, length);
2153 else 2191 else
@@ -2157,13 +2195,13 @@ resubmit:
2157 2195
2158 return skb; 2196 return skb;
2159 2197
2160len_mismatch: 2198len_error:
2161 /* Truncation of overlength packets 2199 /* Truncation of overlength packets
2162 causes PHY length to not match MAC length */ 2200 causes PHY length to not match MAC length */
2163 ++sky2->net_stats.rx_length_errors; 2201 ++sky2->net_stats.rx_length_errors;
2164 if (netif_msg_rx_err(sky2) && net_ratelimit()) 2202 if (netif_msg_rx_err(sky2) && net_ratelimit())
2165 pr_info(PFX "%s: rx length mismatch: length %d status %#x\n", 2203 pr_info(PFX "%s: rx length error: status %#x length %d\n",
2166 dev->name, length, status); 2204 dev->name, status, length);
2167 goto resubmit; 2205 goto resubmit;
2168 2206
2169error: 2207error:
@@ -2526,7 +2564,7 @@ static void sky2_watchdog(unsigned long arg)
2526 ++active; 2564 ++active;
2527 2565
2528 /* For chips with Rx FIFO, check if stuck */ 2566 /* For chips with Rx FIFO, check if stuck */
2529 if ((hw->flags & SKY2_HW_RAMBUFFER) && 2567 if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
2530 sky2_rx_hung(dev)) { 2568 sky2_rx_hung(dev)) {
2531 pr_info(PFX "%s: receiver hang detected\n", 2569 pr_info(PFX "%s: receiver hang detected\n",
2532 dev->name); 2570 dev->name);
@@ -2684,8 +2722,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2684 switch(hw->chip_id) { 2722 switch(hw->chip_id) {
2685 case CHIP_ID_YUKON_XL: 2723 case CHIP_ID_YUKON_XL:
2686 hw->flags = SKY2_HW_GIGABIT 2724 hw->flags = SKY2_HW_GIGABIT
2687 | SKY2_HW_NEWER_PHY 2725 | SKY2_HW_NEWER_PHY;
2688 | SKY2_HW_RAMBUFFER; 2726 if (hw->chip_rev < 3)
2727 hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
2728
2689 break; 2729 break;
2690 2730
2691 case CHIP_ID_YUKON_EC_U: 2731 case CHIP_ID_YUKON_EC_U:
@@ -2711,11 +2751,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2711 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 2751 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2712 return -EOPNOTSUPP; 2752 return -EOPNOTSUPP;
2713 } 2753 }
2714 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER; 2754 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
2715 break; 2755 break;
2716 2756
2717 case CHIP_ID_YUKON_FE: 2757 case CHIP_ID_YUKON_FE:
2718 hw->flags = SKY2_HW_RAMBUFFER;
2719 break; 2758 break;
2720 2759
2721 case CHIP_ID_YUKON_FE_P: 2760 case CHIP_ID_YUKON_FE_P:
@@ -3923,13 +3962,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3923 sky2->hw = hw; 3962 sky2->hw = hw;
3924 sky2->msg_enable = netif_msg_init(debug, default_msg); 3963 sky2->msg_enable = netif_msg_init(debug, default_msg);
3925 3964
3926 /* This chip has hardware problems that generates
3927 * bogus PHY receive status so by default shut up the message.
3928 */
3929 if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
3930 hw->chip_rev == CHIP_REV_YU_FE2_A0)
3931 sky2->msg_enable &= ~NETIF_MSG_RX_ERR;
3932
3933 /* Auto speed and flow control */ 3965 /* Auto speed and flow control */
3934 sky2->autoneg = AUTONEG_ENABLE; 3966 sky2->autoneg = AUTONEG_ENABLE;
3935 sky2->flow_mode = FC_BOTH; 3967 sky2->flow_mode = FC_BOTH;
@@ -3953,8 +3985,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3953 dev->features |= NETIF_F_HIGHDMA; 3985 dev->features |= NETIF_F_HIGHDMA;
3954 3986
3955#ifdef SKY2_VLAN_TAG_USED 3987#ifdef SKY2_VLAN_TAG_USED
3956 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3988 /* The workaround for FE+ status conflicts with VLAN tag detection. */
3957 dev->vlan_rx_register = sky2_vlan_rx_register; 3989 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
3990 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
3991 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3992 dev->vlan_rx_register = sky2_vlan_rx_register;
3993 }
3958#endif 3994#endif
3959 3995
3960 /* read the mac address */ 3996 /* read the mac address */
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 69cd98400fe6..8bc5c54e3efa 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2063,7 +2063,7 @@ struct sky2_hw {
2063#define SKY2_HW_FIBRE_PHY 0x00000002 2063#define SKY2_HW_FIBRE_PHY 0x00000002
2064#define SKY2_HW_GIGABIT 0x00000004 2064#define SKY2_HW_GIGABIT 0x00000004
2065#define SKY2_HW_NEWER_PHY 0x00000008 2065#define SKY2_HW_NEWER_PHY 0x00000008
2066#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */ 2066#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
2067#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2067#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2068#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2068#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2069#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2069#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7dcaa09b3c20..50f2dd9e1bb2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1444,7 +1444,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
1444static void __devinit quirk_e100_interrupt(struct pci_dev *dev) 1444static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1445{ 1445{
1446 u16 command; 1446 u16 command;
1447 u32 bar;
1448 u8 __iomem *csr; 1447 u8 __iomem *csr;
1449 u8 cmd_hi; 1448 u8 cmd_hi;
1450 1449
@@ -1476,12 +1475,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1476 * re-enable them when it's ready. 1475 * re-enable them when it's ready.
1477 */ 1476 */
1478 pci_read_config_word(dev, PCI_COMMAND, &command); 1477 pci_read_config_word(dev, PCI_COMMAND, &command);
1479 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
1480 1478
1481 if (!(command & PCI_COMMAND_MEMORY) || !bar) 1479 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
1482 return; 1480 return;
1483 1481
1484 csr = ioremap(bar, 8); 1482 /* Convert from PCI bus to resource space. */
1483 csr = ioremap(pci_resource_start(dev, 0), 8);
1485 if (!csr) { 1484 if (!csr) {
1486 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", 1485 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
1487 pci_name(dev)); 1486 pci_name(dev));
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 77b06a983fa7..95cf7b6cd622 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2314,6 +2314,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2314 esp->host->transportt = esp_transport_template; 2314 esp->host->transportt = esp_transport_template;
2315 esp->host->max_lun = ESP_MAX_LUN; 2315 esp->host->max_lun = ESP_MAX_LUN;
2316 esp->host->cmd_per_lun = 2; 2316 esp->host->cmd_per_lun = 2;
2317 esp->host->unique_id = instance;
2317 2318
2318 esp_set_clock_params(esp); 2319 esp_set_clock_params(esp);
2319 2320
@@ -2337,7 +2338,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
2337 if (err) 2338 if (err)
2338 return err; 2339 return err;
2339 2340
2340 esp->host->unique_id = instance++; 2341 instance++;
2341 2342
2342 scsi_scan_host(esp->host); 2343 scsi_scan_host(esp->host);
2343 2344
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 6f56f8750635..4df21c92ff1e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
787 struct scsi_target *starget = sdev->sdev_target; 787 struct scsi_target *starget = sdev->sdev_target;
788 struct Scsi_Host *shost = sdev->host; 788 struct Scsi_Host *shost = sdev->host;
789 int len = sdev->inquiry_len; 789 int len = sdev->inquiry_len;
790 int min_period = spi_min_period(starget);
791 int max_width = spi_max_width(starget);
790 /* first set us up for narrow async */ 792 /* first set us up for narrow async */
791 DV_SET(offset, 0); 793 DV_SET(offset, 0);
792 DV_SET(width, 0); 794 DV_SET(width, 0);
793 795
794 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) 796 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
795 != SPI_COMPARE_SUCCESS) { 797 != SPI_COMPARE_SUCCESS) {
796 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); 798 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
798 return; 800 return;
799 } 801 }
800 802
803 if (!scsi_device_wide(sdev)) {
804 spi_max_width(starget) = 0;
805 max_width = 0;
806 }
807
801 /* test width */ 808 /* test width */
802 if (i->f->set_width && spi_max_width(starget) && 809 if (i->f->set_width && max_width) {
803 scsi_device_wide(sdev)) {
804 i->f->set_width(starget, 1); 810 i->f->set_width(starget, 1);
805 811
806 if (spi_dv_device_compare_inquiry(sdev, buffer, 812 if (spi_dv_device_compare_inquiry(sdev, buffer,
@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
809 != SPI_COMPARE_SUCCESS) { 815 != SPI_COMPARE_SUCCESS) {
810 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); 816 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
811 i->f->set_width(starget, 0); 817 i->f->set_width(starget, 0);
818 /* Make sure we don't force wide back on by asking
819 * for a transfer period that requires it */
820 max_width = 0;
821 if (min_period < 10)
822 min_period = 10;
812 } 823 }
813 } 824 }
814 825
@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
828 839
829 /* now set up to the maximum */ 840 /* now set up to the maximum */
830 DV_SET(offset, spi_max_offset(starget)); 841 DV_SET(offset, spi_max_offset(starget));
831 DV_SET(period, spi_min_period(starget)); 842 DV_SET(period, min_period);
843
832 /* try QAS requests; this should be harmless to set if the 844 /* try QAS requests; this should be harmless to set if the
833 * target supports it */ 845 * target supports it */
834 if (scsi_device_qas(sdev)) { 846 if (scsi_device_qas(sdev)) {
@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
837 DV_SET(qas, 0); 849 DV_SET(qas, 0);
838 } 850 }
839 851
840 if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { 852 if (scsi_device_ius(sdev) && min_period < 9) {
841 /* This u320 (or u640). Set IU transfers */ 853 /* This u320 (or u640). Set IU transfers */
842 DV_SET(iu, 1); 854 DV_SET(iu, 1);
843 /* Then set the optional parameters */ 855 /* Then set the optional parameters */
844 DV_SET(rd_strm, 1); 856 DV_SET(rd_strm, 1);
845 DV_SET(wr_flow, 1); 857 DV_SET(wr_flow, 1);
846 DV_SET(rti, 1); 858 DV_SET(rti, 1);
847 if (spi_min_period(starget) == 8) 859 if (min_period == 8)
848 DV_SET(pcomp_en, 1); 860 DV_SET(pcomp_en, 1);
849 } else { 861 } else {
850 DV_SET(iu, 0); 862 DV_SET(iu, 0);
@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
862 } else { 874 } else {
863 DV_SET(dt, 1); 875 DV_SET(dt, 1);
864 } 876 }
877 /* set width last because it will pull all the other
878 * parameters down to required values */
879 DV_SET(width, max_width);
880
865 /* Do the read only INQUIRY tests */ 881 /* Do the read only INQUIRY tests */
866 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 882 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
867 spi_dv_device_compare_inquiry); 883 spi_dv_device_compare_inquiry);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
index a99e45e2b6d8..2a6477834c3e 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
@@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up)
37 up->smc_tfcr = SMC_EB; 37 up->smc_tfcr = SMC_EB;
38} 38}
39 39
40#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) 40#define DPRAM_BASE ((unsigned char *)cpm_dpram_addr(0))
41 41
42#endif 42#endif
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index e348ba684050..ff610c23314b 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -38,7 +38,7 @@
38#include <asm/prom.h> 38#include <asm/prom.h>
39#include <asm/of_device.h> 39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
43#endif 43#endif
44 44
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8d7ab74170d5..a593f900eff4 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
431 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size, 431 err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
432 &cur_len, "W1_SLAVE_ID=%024LX", 432 &cur_len, "W1_SLAVE_ID=%024LX",
433 (unsigned long long)sl->reg_num.id); 433 (unsigned long long)sl->reg_num.id);
434 envp[cur_index] = NULL;
434 if (err) 435 if (err)
435 return err; 436 return err;
436 437
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 5a5b7116cefb..37310b0e8107 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -3190,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
3190COMPATIBLE_IOCTL(SIOCGIWRETRY) 3190COMPATIBLE_IOCTL(SIOCGIWRETRY)
3191COMPATIBLE_IOCTL(SIOCSIWPOWER) 3191COMPATIBLE_IOCTL(SIOCSIWPOWER)
3192COMPATIBLE_IOCTL(SIOCGIWPOWER) 3192COMPATIBLE_IOCTL(SIOCGIWPOWER)
3193COMPATIBLE_IOCTL(SIOCSIWAUTH)
3194COMPATIBLE_IOCTL(SIOCGIWAUTH)
3193/* hiddev */ 3195/* hiddev */
3194COMPATIBLE_IOCTL(HIDIOCGVERSION) 3196COMPATIBLE_IOCTL(HIDIOCGVERSION)
3195COMPATIBLE_IOCTL(HIDIOCAPPLICATION) 3197COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index a21e4bc5444b..d098c7af0d22 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -171,19 +171,14 @@ found:
171 * GRANTED_RES message by cookie, without having to rely on the client's IP 171 * GRANTED_RES message by cookie, without having to rely on the client's IP
172 * address. --okir 172 * address. --okir
173 */ 173 */
174static inline struct nlm_block * 174static struct nlm_block *
175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, 175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
176 struct nlm_lock *lock, struct nlm_cookie *cookie) 176 struct nlm_file *file, struct nlm_lock *lock,
177 struct nlm_cookie *cookie)
177{ 178{
178 struct nlm_block *block; 179 struct nlm_block *block;
179 struct nlm_host *host;
180 struct nlm_rqst *call = NULL; 180 struct nlm_rqst *call = NULL;
181 181
182 /* Create host handle for callback */
183 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
184 if (host == NULL)
185 return NULL;
186
187 call = nlm_alloc_call(host); 182 call = nlm_alloc_call(host);
188 if (call == NULL) 183 if (call == NULL)
189 return NULL; 184 return NULL;
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
366 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) 361 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
367{ 362{
368 struct nlm_block *block = NULL; 363 struct nlm_block *block = NULL;
364 struct nlm_host *host;
369 int error; 365 int error;
370 __be32 ret; 366 __be32 ret;
371 367
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
377 (long long)lock->fl.fl_end, 373 (long long)lock->fl.fl_end,
378 wait); 374 wait);
379 375
376 /* Create host handle for callback */
377 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
378 if (host == NULL)
379 return nlm_lck_denied_nolocks;
380 380
381 /* Lock file against concurrent access */ 381 /* Lock file against concurrent access */
382 mutex_lock(&file->f_mutex); 382 mutex_lock(&file->f_mutex);
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
385 */ 385 */
386 block = nlmsvc_lookup_block(file, lock); 386 block = nlmsvc_lookup_block(file, lock);
387 if (block == NULL) { 387 if (block == NULL) {
388 block = nlmsvc_create_block(rqstp, file, lock, cookie); 388 block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
389 lock, cookie);
389 ret = nlm_lck_denied_nolocks; 390 ret = nlm_lck_denied_nolocks;
390 if (block == NULL) 391 if (block == NULL)
391 goto out; 392 goto out;
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
449out: 450out:
450 mutex_unlock(&file->f_mutex); 451 mutex_unlock(&file->f_mutex);
451 nlmsvc_release_block(block); 452 nlmsvc_release_block(block);
453 nlm_release_host(host);
452 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 454 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
453 return ret; 455 return ret;
454} 456}
@@ -477,10 +479,15 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
477 479
478 if (block == NULL) { 480 if (block == NULL) {
479 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 481 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
482 struct nlm_host *host;
480 483
481 if (conf == NULL) 484 if (conf == NULL)
482 return nlm_granted; 485 return nlm_granted;
483 block = nlmsvc_create_block(rqstp, file, lock, cookie); 486 /* Create host handle for callback */
487 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
488 if (host == NULL)
489 return nlm_lck_denied_nolocks;
490 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
484 if (block == NULL) { 491 if (block == NULL) {
485 kfree(conf); 492 kfree(conf);
486 return nlm_granted; 493 return nlm_granted;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a49f9feff776..a204484072f3 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
588 server->namelen = data->namlen; 588 server->namelen = data->namlen;
589 /* Create a client RPC handle for the NFSv3 ACL management interface */ 589 /* Create a client RPC handle for the NFSv3 ACL management interface */
590 nfs_init_server_aclclient(server); 590 nfs_init_server_aclclient(server);
591 if (clp->cl_nfsversion == 3) {
592 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
593 server->namelen = NFS3_MAXNAMLEN;
594 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
595 server->caps |= NFS_CAP_READDIRPLUS;
596 } else {
597 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
598 server->namelen = NFS2_MAXNAMLEN;
599 }
600
601 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); 591 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
602 return 0; 592 return 0;
603 593
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
794 error = nfs_probe_fsinfo(server, mntfh, &fattr); 784 error = nfs_probe_fsinfo(server, mntfh, &fattr);
795 if (error < 0) 785 if (error < 0)
796 goto error; 786 goto error;
787 if (server->nfs_client->rpc_ops->version == 3) {
788 if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
789 server->namelen = NFS3_MAXNAMLEN;
790 if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
791 server->caps |= NFS_CAP_READDIRPLUS;
792 } else {
793 if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
794 server->namelen = NFS2_MAXNAMLEN;
795 }
796
797 if (!(fattr.valid & NFS_ATTR_FATTR)) { 797 if (!(fattr.valid & NFS_ATTR_FATTR)) {
798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); 798 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
799 if (error < 0) { 799 if (error < 0) {
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
984 if (error < 0) 984 if (error < 0)
985 goto error; 985 goto error;
986 986
987 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
988 server->namelen = NFS4_MAXNAMLEN;
989
987 BUG_ON(!server->nfs_client); 990 BUG_ON(!server->nfs_client);
988 BUG_ON(!server->nfs_client->rpc_ops); 991 BUG_ON(!server->nfs_client->rpc_ops);
989 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); 992 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1056 if (error < 0) 1059 if (error < 0)
1057 goto error; 1060 goto error;
1058 1061
1062 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1063 server->namelen = NFS4_MAXNAMLEN;
1064
1059 dprintk("Referral FSID: %llx:%llx\n", 1065 dprintk("Referral FSID: %llx:%llx\n",
1060 (unsigned long long) server->fsid.major, 1066 (unsigned long long) server->fsid.major,
1061 (unsigned long long) server->fsid.minor); 1067 (unsigned long long) server->fsid.minor);
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
1115 if (error < 0) 1121 if (error < 0)
1116 goto out_free_server; 1122 goto out_free_server;
1117 1123
1124 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1125 server->namelen = NFS4_MAXNAMLEN;
1126
1118 dprintk("Cloned FSID: %llx:%llx\n", 1127 dprintk("Cloned FSID: %llx:%llx\n",
1119 (unsigned long long) server->fsid.major, 1128 (unsigned long long) server->fsid.major,
1120 (unsigned long long) server->fsid.minor); 1129 (unsigned long long) server->fsid.minor);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ea97408e423e..e4a04d16b8b0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
1162 } 1162 }
1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) 1163 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
1164 return NULL; 1164 return NULL;
1165 if (name.len > NFS_SERVER(dir)->namelen)
1166 return NULL;
1165 /* Note: caller is already holding the dir->i_mutex! */ 1167 /* Note: caller is already holding the dir->i_mutex! */
1166 dentry = d_alloc(parent, &name); 1168 dentry = d_alloc(parent, &name);
1167 if (dentry == NULL) 1169 if (dentry == NULL)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index d1cbf0a0fbb2..522e5ad4d8ad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -175,6 +175,9 @@ next_component:
175 path++; 175 path++;
176 name.len = path - (const char *) name.name; 176 name.len = path - (const char *) name.name;
177 177
178 if (name.len > NFS4_MAXNAMLEN)
179 return -ENAMETOOLONG;
180
178eat_dot_dir: 181eat_dot_dir:
179 while (*path == '/') 182 while (*path == '/')
180 path++; 183 path++;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 73402c5eeb8a..38eb0b7a1f3d 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
894 goto again; 894 goto again;
895 } 895 }
896 896
897 897 sbi->s_flags = flags;/*after that line some functions use s_flags*/
898 ufs_print_super_stuff(sb, usb1, usb2, usb3); 898 ufs_print_super_stuff(sb, usb1, usb2, usb3);
899 899
900 /* 900 /*
@@ -1025,8 +1025,6 @@ magic_found:
1025 UFS_MOUNT_UFSTYPE_44BSD) 1025 UFS_MOUNT_UFSTYPE_44BSD)
1026 uspi->s_maxsymlinklen = 1026 uspi->s_maxsymlinklen =
1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1027 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1028
1029 sbi->s_flags = flags;
1030 1028
1031 inode = iget(sb, UFS_ROOTINO); 1029 inode = iget(sb, UFS_ROOTINO);
1032 if (!inode || is_bad_inode(inode)) 1030 if (!inode || is_bad_inode(inode))
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index fa25b7dcc6c3..d7e136143066 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t {
52#define XFS_BLI_UDQUOT_BUF 0x4 52#define XFS_BLI_UDQUOT_BUF 0x4
53#define XFS_BLI_PDQUOT_BUF 0x8 53#define XFS_BLI_PDQUOT_BUF 0x8
54#define XFS_BLI_GDQUOT_BUF 0x10 54#define XFS_BLI_GDQUOT_BUF 0x10
55/*
56 * This flag indicates that the buffer contains newly allocated
57 * inodes.
58 */
59#define XFS_BLI_INODE_NEW_BUF 0x20
60 55
61#define XFS_BLI_CHUNK 128 56#define XFS_BLI_CHUNK 128
62#define XFS_BLI_SHIFT 7 57#define XFS_BLI_SHIFT 7
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7174991f4bef..8ae6e8e5f3db 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer(
1874/*ARGSUSED*/ 1874/*ARGSUSED*/
1875STATIC void 1875STATIC void
1876xlog_recover_do_reg_buffer( 1876xlog_recover_do_reg_buffer(
1877 xfs_mount_t *mp,
1878 xlog_recover_item_t *item, 1877 xlog_recover_item_t *item,
1879 xfs_buf_t *bp, 1878 xfs_buf_t *bp,
1880 xfs_buf_log_format_t *buf_f) 1879 xfs_buf_log_format_t *buf_f)
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer(
1885 unsigned int *data_map = NULL; 1884 unsigned int *data_map = NULL;
1886 unsigned int map_size = 0; 1885 unsigned int map_size = 0;
1887 int error; 1886 int error;
1888 int stale_buf = 1;
1889
1890 /*
1891 * Scan through the on-disk inode buffer and attempt to
1892 * determine if it has been written to since it was logged.
1893 *
1894 * - If any of the magic numbers are incorrect then the buffer is stale
1895 * - If any of the modes are non-zero then the buffer is not stale
1896 * - If all of the modes are zero and at least one of the generation
1897 * counts is non-zero then the buffer is stale
1898 *
1899 * If the end result is a stale buffer then the log buffer is replayed
1900 * otherwise it is skipped.
1901 *
1902 * This heuristic is not perfect. It can be improved by scanning the
1903 * entire inode chunk for evidence that any of the inode clusters have
1904 * been updated. To fix this problem completely we will need a major
1905 * architectural change to the logging system.
1906 */
1907 if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
1908 xfs_dinode_t *dip;
1909 int inodes_per_buf;
1910 int mode_count = 0;
1911 int gen_count = 0;
1912
1913 stale_buf = 0;
1914 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1915 for (i = 0; i < inodes_per_buf; i++) {
1916 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
1917 i * mp->m_sb.sb_inodesize);
1918 if (be16_to_cpu(dip->di_core.di_magic) !=
1919 XFS_DINODE_MAGIC) {
1920 stale_buf = 1;
1921 break;
1922 }
1923 if (dip->di_core.di_mode)
1924 mode_count++;
1925 if (dip->di_core.di_gen)
1926 gen_count++;
1927 }
1928
1929 if (!mode_count && gen_count)
1930 stale_buf = 1;
1931 }
1932 1887
1933 switch (buf_f->blf_type) { 1888 switch (buf_f->blf_type) {
1934 case XFS_LI_BUF: 1889 case XFS_LI_BUF:
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer(
1962 -1, 0, XFS_QMOPT_DOWARN, 1917 -1, 0, XFS_QMOPT_DOWARN,
1963 "dquot_buf_recover"); 1918 "dquot_buf_recover");
1964 } 1919 }
1965 if (!error && stale_buf) 1920 if (!error)
1966 memcpy(xfs_buf_offset(bp, 1921 memcpy(xfs_buf_offset(bp,
1967 (uint)bit << XFS_BLI_SHIFT), /* dest */ 1922 (uint)bit << XFS_BLI_SHIFT), /* dest */
1968 item->ri_buf[i].i_addr, /* source */ 1923 item->ri_buf[i].i_addr, /* source */
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer(
2134 if (log->l_quotaoffs_flag & type) 2089 if (log->l_quotaoffs_flag & type)
2135 return; 2090 return;
2136 2091
2137 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2092 xlog_recover_do_reg_buffer(item, bp, buf_f);
2138} 2093}
2139 2094
2140/* 2095/*
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans(
2235 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) { 2190 (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2236 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2191 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2237 } else { 2192 } else {
2238 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2193 xlog_recover_do_reg_buffer(item, bp, buf_f);
2239 } 2194 }
2240 if (error) 2195 if (error)
2241 return XFS_ERROR(error); 2196 return XFS_ERROR(error);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 95fff6872a2f..60b6b898022b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf(
966 ASSERT(atomic_read(&bip->bli_refcount) > 0); 966 ASSERT(atomic_read(&bip->bli_refcount) > 0);
967 967
968 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 968 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
969 bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF;
970} 969}
971 970
972 971
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 202acb9ff4d0..f85f77a538aa 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -147,10 +147,6 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
147/*-------------------------------------------------------------------------- 147/*--------------------------------------------------------------------------
148 Suspend/Resume 148 Suspend/Resume
149 -------------------------------------------------------------------------- */ 149 -------------------------------------------------------------------------- */
150#ifdef CONFIG_ACPI_SLEEP
151extern int acpi_sleep_init(void); 150extern int acpi_sleep_init(void);
152#else
153static inline int acpi_sleep_init(void) { return 0; }
154#endif
155 151
156#endif /*__ACPI_DRIVERS_H__*/ 152#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 609756c61676..d69ba937e092 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment)
214 */ 214 */
215 215
216 216
217/*
218 * Actually only lfence would be needed for mb() because all stores done
219 * by the kernel should be already ordered. But keep a full barrier for now.
220 */
221
222#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) 217#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
223#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) 218#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
224 219
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index b92dd8c760da..e3301e54d559 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -142,7 +142,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
142/* 142/*
143 * __pa()/__va() should be used only during mem init. 143 * __pa()/__va() should be used only during mem init.
144 */ 144 */
145#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 145#ifdef CONFIG_64BIT
146#define __pa(x) \ 146#define __pa(x) \
147({ \ 147({ \
148 unsigned long __x = (unsigned long)(x); \ 148 unsigned long __x = (unsigned long)(x); \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 963051a967d6..3ec6e7ff5fbd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,15 +32,7 @@
32 * CPUFREQ NOTIFIER INTERFACE * 32 * CPUFREQ NOTIFIER INTERFACE *
33 *********************************************************************/ 33 *********************************************************************/
34 34
35#ifdef CONFIG_CPU_FREQ
36int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 35int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
37#else
38static inline int cpufreq_register_notifier(struct notifier_block *nb,
39 unsigned int list)
40{
41 return 0;
42}
43#endif
44int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); 36int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
45 37
46#define CPUFREQ_TRANSITION_NOTIFIER (0) 38#define CPUFREQ_TRANSITION_NOTIFIER (0)
@@ -268,22 +260,17 @@ struct freq_attr {
268int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 260int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
269int cpufreq_update_policy(unsigned int cpu); 261int cpufreq_update_policy(unsigned int cpu);
270 262
263/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
264unsigned int cpufreq_get(unsigned int cpu);
271 265
272/* 266/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
273 * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
274 */
275#ifdef CONFIG_CPU_FREQ 267#ifdef CONFIG_CPU_FREQ
276unsigned int cpufreq_quick_get(unsigned int cpu); 268unsigned int cpufreq_quick_get(unsigned int cpu);
277unsigned int cpufreq_get(unsigned int cpu);
278#else 269#else
279static inline unsigned int cpufreq_quick_get(unsigned int cpu) 270static inline unsigned int cpufreq_quick_get(unsigned int cpu)
280{ 271{
281 return 0; 272 return 0;
282} 273}
283static inline unsigned int cpufreq_get(unsigned int cpu)
284{
285 return 0;
286}
287#endif 274#endif
288 275
289 276
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 991c85bb9e36..e8e3a64eb322 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -114,7 +114,6 @@ sctp_state_fn_t sctp_sf_do_4_C;
114sctp_state_fn_t sctp_sf_eat_data_6_2; 114sctp_state_fn_t sctp_sf_eat_data_6_2;
115sctp_state_fn_t sctp_sf_eat_data_fast_4_4; 115sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
116sctp_state_fn_t sctp_sf_eat_sack_6_2; 116sctp_state_fn_t sctp_sf_eat_sack_6_2;
117sctp_state_fn_t sctp_sf_tabort_8_4_8;
118sctp_state_fn_t sctp_sf_operr_notify; 117sctp_state_fn_t sctp_sf_operr_notify;
119sctp_state_fn_t sctp_sf_t1_init_timer_expire; 118sctp_state_fn_t sctp_sf_t1_init_timer_expire;
120sctp_state_fn_t sctp_sf_t1_cookie_timer_expire; 119sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
@@ -247,6 +246,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
247 int, __be16); 246 int, __be16);
248struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, 247struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
249 union sctp_addr *addr); 248 union sctp_addr *addr);
249int sctp_verify_asconf(const struct sctp_association *asoc,
250 struct sctp_paramhdr *param_hdr, void *chunk_end,
251 struct sctp_paramhdr **errp);
250struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, 252struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
251 struct sctp_chunk *asconf); 253 struct sctp_chunk *asconf);
252int sctp_process_asconf_ack(struct sctp_association *asoc, 254int sctp_process_asconf_ack(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c2fe2dcc9afc..baff49dfcdbd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -421,6 +421,7 @@ struct sctp_signed_cookie {
421 * internally. 421 * internally.
422 */ 422 */
423union sctp_addr_param { 423union sctp_addr_param {
424 struct sctp_paramhdr p;
424 struct sctp_ipv4addr_param v4; 425 struct sctp_ipv4addr_param v4;
425 struct sctp_ipv6addr_param v6; 426 struct sctp_ipv6addr_param v6;
426}; 427};
@@ -1156,7 +1157,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
1156int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1157int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1157 __u8 use_as_src, gfp_t gfp); 1158 __u8 use_as_src, gfp_t gfp);
1158int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1159int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
1159 void (*rcu_call)(struct rcu_head *, 1160 void fastcall (*rcu_call)(struct rcu_head *,
1160 void (*func)(struct rcu_head *))); 1161 void (*func)(struct rcu_head *)));
1161int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1162int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
1162 struct sctp_sock *); 1163 struct sctp_sock *);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 185c7ecce4cc..54053de0bdd7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1059,14 +1059,12 @@ struct tcp_md5sig_key {
1059}; 1059};
1060 1060
1061struct tcp4_md5sig_key { 1061struct tcp4_md5sig_key {
1062 u8 *key; 1062 struct tcp_md5sig_key base;
1063 u16 keylen;
1064 __be32 addr; 1063 __be32 addr;
1065}; 1064};
1066 1065
1067struct tcp6_md5sig_key { 1066struct tcp6_md5sig_key {
1068 u8 *key; 1067 struct tcp_md5sig_key base;
1069 u16 keylen;
1070#if 0 1068#if 0
1071 u32 scope_id; /* XXX */ 1069 u32 scope_id; /* XXX */
1072#endif 1070#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index e8935b195e88..fcc94e7b4086 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
1943void exit_robust_list(struct task_struct *curr) 1943void exit_robust_list(struct task_struct *curr)
1944{ 1944{
1945 struct robust_list_head __user *head = curr->robust_list; 1945 struct robust_list_head __user *head = curr->robust_list;
1946 struct robust_list __user *entry, *pending; 1946 struct robust_list __user *entry, *next_entry, *pending;
1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1948 unsigned long futex_offset; 1948 unsigned long futex_offset;
1949 int rc;
1949 1950
1950 /* 1951 /*
1951 * Fetch the list head (which was registered earlier, via 1952 * Fetch the list head (which was registered earlier, via
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr)
1965 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1966 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1966 return; 1967 return;
1967 1968
1968 if (pending) 1969 next_entry = NULL; /* avoid warning with gcc */
1969 handle_futex_death((void __user *)pending + futex_offset,
1970 curr, pip);
1971
1972 while (entry != &head->list) { 1970 while (entry != &head->list) {
1973 /* 1971 /*
1972 * Fetch the next entry in the list before calling
1973 * handle_futex_death:
1974 */
1975 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1976 /*
1974 * A pending lock might already be on the list, so 1977 * A pending lock might already be on the list, so
1975 * don't process it twice: 1978 * don't process it twice:
1976 */ 1979 */
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
1978 if (handle_futex_death((void __user *)entry + futex_offset, 1981 if (handle_futex_death((void __user *)entry + futex_offset,
1979 curr, pi)) 1982 curr, pi))
1980 return; 1983 return;
1981 /* 1984 if (rc)
1982 * Fetch the next entry in the list:
1983 */
1984 if (fetch_robust_entry(&entry, &entry->next, &pi))
1985 return; 1985 return;
1986 entry = next_entry;
1987 pi = next_pi;
1986 /* 1988 /*
1987 * Avoid excessively long or circular lists: 1989 * Avoid excessively long or circular lists:
1988 */ 1990 */
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
1991 1993
1992 cond_resched(); 1994 cond_resched();
1993 } 1995 }
1996
1997 if (pending)
1998 handle_futex_death((void __user *)pending + futex_offset,
1999 curr, pip);
1994} 2000}
1995 2001
1996long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 2002long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7e52eb051f22..2c2e2954b713 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
38void compat_exit_robust_list(struct task_struct *curr) 38void compat_exit_robust_list(struct task_struct *curr)
39{ 39{
40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
41 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *next_entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 42 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
43 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, next_uentry, upending;
44 compat_long_t futex_offset; 44 compat_long_t futex_offset;
45 int rc;
45 46
46 /* 47 /*
47 * Fetch the list head (which was registered earlier, via 48 * Fetch the list head (which was registered earlier, via
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr)
61 if (fetch_robust_entry(&upending, &pending, 62 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pip)) 63 &head->list_op_pending, &pip))
63 return; 64 return;
64 if (pending)
65 handle_futex_death((void __user *)pending + futex_offset, curr, pip);
66 65
66 next_entry = NULL; /* avoid warning with gcc */
67 while (entry != (struct robust_list __user *) &head->list) { 67 while (entry != (struct robust_list __user *) &head->list) {
68 /* 68 /*
69 * Fetch the next entry in the list before calling
70 * handle_futex_death:
71 */
72 rc = fetch_robust_entry(&next_uentry, &next_entry,
73 (compat_uptr_t __user *)&entry->next, &next_pi);
74 /*
69 * A pending lock might already be on the list, so 75 * A pending lock might already be on the list, so
70 * dont process it twice: 76 * dont process it twice:
71 */ 77 */
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
74 curr, pi)) 80 curr, pi))
75 return; 81 return;
76 82
77 /* 83 if (rc)
78 * Fetch the next entry in the list:
79 */
80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t __user *)&entry->next, &pi))
82 return; 84 return;
85 uentry = next_uentry;
86 entry = next_entry;
87 pi = next_pi;
83 /* 88 /*
84 * Avoid excessively long or circular lists: 89 * Avoid excessively long or circular lists:
85 */ 90 */
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
88 93
89 cond_resched(); 94 cond_resched();
90 } 95 }
96 if (pending)
97 handle_futex_death((void __user *)pending + futex_offset,
98 curr, pip);
91} 99}
92 100
93asmlinkage long 101asmlinkage long
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index c8580a1e6873..14b0e10dc95c 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -110,7 +110,7 @@ config SUSPEND
110 110
111config HIBERNATION_UP_POSSIBLE 111config HIBERNATION_UP_POSSIBLE
112 bool 112 bool
113 depends on X86 || PPC64_SWSUSP || FRV || PPC32 113 depends on X86 || PPC64_SWSUSP || PPC32
114 depends on !SMP 114 depends on !SMP
115 default y 115 default y
116 116
diff --git a/kernel/sys.c b/kernel/sys.c
index 1b33b05d346b..8ae2e636eb1b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
32#include <linux/getcpu.h> 32#include <linux/getcpu.h>
33#include <linux/task_io_accounting_ops.h> 33#include <linux/task_io_accounting_ops.h>
34#include <linux/seccomp.h> 34#include <linux/seccomp.h>
35#include <linux/cpu.h>
35 36
36#include <linux/compat.h> 37#include <linux/compat.h>
37#include <linux/syscalls.h> 38#include <linux/syscalls.h>
@@ -878,6 +879,7 @@ void kernel_power_off(void)
878 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 879 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
879 if (pm_power_off_prepare) 880 if (pm_power_off_prepare)
880 pm_power_off_prepare(); 881 pm_power_off_prepare();
882 disable_nonboot_cpus();
881 sysdev_shutdown(); 883 sysdev_shutdown();
882 printk(KERN_EMERG "Power down.\n"); 884 printk(KERN_EMERG "Power down.\n");
883 machine_power_off(); 885 machine_power_off();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 50a94eee4d92..495863a500cd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,7 +284,7 @@ config LOCKDEP
284 select KALLSYMS_ALL 284 select KALLSYMS_ALL
285 285
286config LOCK_STAT 286config LOCK_STAT
287 bool "Lock usage statisitics" 287 bool "Lock usage statistics"
288 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 288 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
289 select LOCKDEP 289 select LOCKDEP
290 select DEBUG_SPINLOCK 290 select DEBUG_SPINLOCK
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 84c795ee2d65..eab8c428cc93 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
42 might_sleep(); 42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched(); 44 cond_resched();
45 clear_user_highpage(page + i, addr); 45 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
46 } 46 }
47} 47}
48 48
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index afb6c6698b27..e475f2e1be13 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); 273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
274 if (ieee80211softmac_start_scan(mac)) { 274 if (ieee80211softmac_start_scan(mac)) {
275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); 275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
276 mac->associnfo.associating = 0;
277 mac->associnfo.associated = 0;
278 } 276 }
279 goto out; 277 goto out;
280 } else { 278 } else {
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index d054e9224b3e..442b9875f3fb 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
70 char *extra) 70 char *extra)
71{ 71{
72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 72 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
73 struct ieee80211softmac_network *n;
74 struct ieee80211softmac_auth_queue_item *authptr; 73 struct ieee80211softmac_auth_queue_item *authptr;
75 int length = 0; 74 int length = 0;
76 75
77check_assoc_again: 76check_assoc_again:
78 mutex_lock(&sm->associnfo.mutex); 77 mutex_lock(&sm->associnfo.mutex);
79 /* Check if we're already associating to this or another network
80 * If it's another network, cancel and start over with our new network
81 * If it's our network, ignore the change, we're already doing it!
82 */
83 if((sm->associnfo.associating || sm->associnfo.associated) && 78 if((sm->associnfo.associating || sm->associnfo.associated) &&
84 (data->essid.flags && data->essid.length)) { 79 (data->essid.flags && data->essid.length)) {
85 /* Get the associating network */ 80 dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
86 n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid); 81 /* Cancel assoc work */
87 if(n && n->essid.len == data->essid.length && 82 cancel_delayed_work(&sm->associnfo.work);
88 !memcmp(n->essid.data, extra, n->essid.len)) { 83 /* We don't have to do this, but it's a little cleaner */
89 dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n", 84 list_for_each_entry(authptr, &sm->auth_queue, list)
90 MAC_ARG(sm->associnfo.bssid)); 85 cancel_delayed_work(&authptr->work);
91 goto out; 86 sm->associnfo.bssvalid = 0;
92 } else { 87 sm->associnfo.bssfixed = 0;
93 dprintk(KERN_INFO PFX "Canceling existing associate request!\n"); 88 sm->associnfo.associating = 0;
94 /* Cancel assoc work */ 89 sm->associnfo.associated = 0;
95 cancel_delayed_work(&sm->associnfo.work); 90 /* We must unlock to avoid deadlocks with the assoc workqueue
96 /* We don't have to do this, but it's a little cleaner */ 91 * on the associnfo.mutex */
97 list_for_each_entry(authptr, &sm->auth_queue, list) 92 mutex_unlock(&sm->associnfo.mutex);
98 cancel_delayed_work(&authptr->work); 93 flush_scheduled_work();
99 sm->associnfo.bssvalid = 0; 94 /* Avoid race! Check assoc status again. Maybe someone started an
100 sm->associnfo.bssfixed = 0; 95 * association while we flushed. */
101 sm->associnfo.associating = 0; 96 goto check_assoc_again;
102 sm->associnfo.associated = 0;
103 /* We must unlock to avoid deadlocks with the assoc workqueue
104 * on the associnfo.mutex */
105 mutex_unlock(&sm->associnfo.mutex);
106 flush_scheduled_work();
107 /* Avoid race! Check assoc status again. Maybe someone started an
108 * association while we flushed. */
109 goto check_assoc_again;
110 }
111 } 97 }
112 98
113 sm->associnfo.static_essid = 0; 99 sm->associnfo.static_essid = 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
153 data->essid.length = sm->associnfo.req_essid.len; 139 data->essid.length = sm->associnfo.req_essid.len;
154 data->essid.flags = 1; /* active */ 140 data->essid.flags = 1; /* active */
155 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len); 141 memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
156 } 142 dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
157 143 } else if (sm->associnfo.associated || sm->associnfo.associating) {
158 /* If we're associating/associated, return that */ 144 /* If we're associating/associated, return that */
159 if (sm->associnfo.associated || sm->associnfo.associating) {
160 data->essid.length = sm->associnfo.associate_essid.len; 145 data->essid.length = sm->associnfo.associate_essid.len;
161 data->essid.flags = 1; /* active */ 146 data->essid.flags = 1; /* active */
162 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len); 147 memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
148 dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
163 } 149 }
164 mutex_unlock(&sm->associnfo.mutex); 150 mutex_unlock(&sm->associnfo.mutex);
165 151
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c94627c8c7e..e089a978e128 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
833 return NULL; 833 return NULL;
834 for (i = 0; i < tp->md5sig_info->entries4; i++) { 834 for (i = 0; i < tp->md5sig_info->entries4; i++) {
835 if (tp->md5sig_info->keys4[i].addr == addr) 835 if (tp->md5sig_info->keys4[i].addr == addr)
836 return (struct tcp_md5sig_key *) 836 return &tp->md5sig_info->keys4[i].base;
837 &tp->md5sig_info->keys4[i];
838 } 837 }
839 return NULL; 838 return NULL;
840} 839}
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
865 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); 864 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
866 if (key) { 865 if (key) {
867 /* Pre-existing entry - just update that one. */ 866 /* Pre-existing entry - just update that one. */
868 kfree(key->key); 867 kfree(key->base.key);
869 key->key = newkey; 868 key->base.key = newkey;
870 key->keylen = newkeylen; 869 key->base.keylen = newkeylen;
871 } else { 870 } else {
872 struct tcp_md5sig_info *md5sig; 871 struct tcp_md5sig_info *md5sig;
873 872
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
906 md5sig->alloced4++; 905 md5sig->alloced4++;
907 } 906 }
908 md5sig->entries4++; 907 md5sig->entries4++;
909 md5sig->keys4[md5sig->entries4 - 1].addr = addr; 908 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
910 md5sig->keys4[md5sig->entries4 - 1].key = newkey; 909 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
911 md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; 910 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
912 } 911 }
913 return 0; 912 return 0;
914} 913}
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
930 for (i = 0; i < tp->md5sig_info->entries4; i++) { 929 for (i = 0; i < tp->md5sig_info->entries4; i++) {
931 if (tp->md5sig_info->keys4[i].addr == addr) { 930 if (tp->md5sig_info->keys4[i].addr == addr) {
932 /* Free the key */ 931 /* Free the key */
933 kfree(tp->md5sig_info->keys4[i].key); 932 kfree(tp->md5sig_info->keys4[i].base.key);
934 tp->md5sig_info->entries4--; 933 tp->md5sig_info->entries4--;
935 934
936 if (tp->md5sig_info->entries4 == 0) { 935 if (tp->md5sig_info->entries4 == 0) {
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
964 if (tp->md5sig_info->entries4) { 963 if (tp->md5sig_info->entries4) {
965 int i; 964 int i;
966 for (i = 0; i < tp->md5sig_info->entries4; i++) 965 for (i = 0; i < tp->md5sig_info->entries4; i++)
967 kfree(tp->md5sig_info->keys4[i].key); 966 kfree(tp->md5sig_info->keys4[i].base.key);
968 tp->md5sig_info->entries4 = 0; 967 tp->md5sig_info->entries4 = 0;
969 tcp_free_md5sig_pool(); 968 tcp_free_md5sig_pool();
970 } 969 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f7defb482e9..3e06799b37a6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
539 539
540 for (i = 0; i < tp->md5sig_info->entries6; i++) { 540 for (i = 0; i < tp->md5sig_info->entries6; i++) {
541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) 541 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
542 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; 542 return &tp->md5sig_info->keys6[i].base;
543 } 543 }
544 return NULL; 544 return NULL;
545} 545}
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); 567 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
568 if (key) { 568 if (key) {
569 /* modify existing entry - just update that one */ 569 /* modify existing entry - just update that one */
570 kfree(key->key); 570 kfree(key->base.key);
571 key->key = newkey; 571 key->base.key = newkey;
572 key->keylen = newkeylen; 572 key->base.keylen = newkeylen;
573 } else { 573 } else {
574 /* reallocate new list if current one is full. */ 574 /* reallocate new list if current one is full. */
575 if (!tp->md5sig_info) { 575 if (!tp->md5sig_info) {
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
603 603
604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, 604 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
605 peer); 605 peer);
606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; 606 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; 607 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
608 608
609 tp->md5sig_info->entries6++; 609 tp->md5sig_info->entries6++;
610 } 610 }
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
626 for (i = 0; i < tp->md5sig_info->entries6; i++) { 626 for (i = 0; i < tp->md5sig_info->entries6; i++) {
627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { 627 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
628 /* Free the key */ 628 /* Free the key */
629 kfree(tp->md5sig_info->keys6[i].key); 629 kfree(tp->md5sig_info->keys6[i].base.key);
630 tp->md5sig_info->entries6--; 630 tp->md5sig_info->entries6--;
631 631
632 if (tp->md5sig_info->entries6 == 0) { 632 if (tp->md5sig_info->entries6 == 0) {
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
657 657
658 if (tp->md5sig_info->entries6) { 658 if (tp->md5sig_info->entries6) {
659 for (i = 0; i < tp->md5sig_info->entries6; i++) 659 for (i = 0; i < tp->md5sig_info->entries6; i++)
660 kfree(tp->md5sig_info->keys6[i].key); 660 kfree(tp->md5sig_info->keys6[i].base.key);
661 tp->md5sig_info->entries6 = 0; 661 tp->md5sig_info->entries6 = 0;
662 tcp_free_md5sig_pool(); 662 tcp_free_md5sig_pool();
663 } 663 }
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
668 668
669 if (tp->md5sig_info->entries4) { 669 if (tp->md5sig_info->entries4) {
670 for (i = 0; i < tp->md5sig_info->entries4; i++) 670 for (i = 0; i < tp->md5sig_info->entries4; i++)
671 kfree(tp->md5sig_info->keys4[i].key); 671 kfree(tp->md5sig_info->keys4[i].base.key);
672 tp->md5sig_info->entries4 = 0; 672 tp->md5sig_info->entries4 = 0;
673 tcp_free_md5sig_pool(); 673 tcp_free_md5sig_pool();
674 } 674 }
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 7286c389a4d0..ff2172ffd861 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
5259} 5259}
5260 5260
5261 5261
5262module_init(ieee80211_init); 5262subsys_initcall(ieee80211_init);
5263module_exit(ieee80211_exit); 5263module_exit(ieee80211_exit);
5264 5264
5265MODULE_DESCRIPTION("IEEE 802.11 subsystem"); 5265MODULE_DESCRIPTION("IEEE 802.11 subsystem");
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index f6780d63b342..17b9f46bbf2b 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
431} 431}
432 432
433 433
434module_init(rate_control_simple_init); 434subsys_initcall(rate_control_simple_init);
435module_exit(rate_control_simple_exit); 435module_exit(rate_control_simple_exit);
436 436
437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211"); 437MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89ce81529694..7ab82b376e1b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
424 skb_queue_head_init(&q->requeued[i]); 424 skb_queue_head_init(&q->requeued[i]);
425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, 425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
426 qd->handle); 426 qd->handle);
427 if (q->queues[i] == 0) { 427 if (!q->queues[i]) {
428 q->queues[i] = &noop_qdisc; 428 q->queues[i] = &noop_qdisc;
429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); 429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
430 } 430 }
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index d35cbf5aae33..dfffa94fb9f6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -181,7 +181,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
181 * structure. 181 * structure.
182 */ 182 */
183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
184 void (*rcu_call)(struct rcu_head *head, 184 void fastcall (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head))) 185 void (*func)(struct rcu_head *head)))
186{ 186{
187 struct sctp_sockaddr_entry *addr, *temp; 187 struct sctp_sockaddr_entry *addr, *temp;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 47e56017f4ce..f9a0c9276e3b 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) 622 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
623 goto discard; 623 goto discard;
624 624
625 /* RFC 4460, 2.11.2
626 * This will discard packets with INIT chunk bundled as
627 * subsequent chunks in the packet. When INIT is first,
628 * the normal INIT processing will discard the chunk.
629 */
630 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
631 goto discard;
632
625 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR 633 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
626 * or a COOKIE ACK the SCTP Packet should be silently 634 * or a COOKIE ACK the SCTP Packet should be silently
627 * discarded. 635 * discarded.
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 88aa22407549..e4ea7fdf36ed 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
130 /* Force chunk->skb->data to chunk->chunk_end. */ 130 /* Force chunk->skb->data to chunk->chunk_end. */
131 skb_pull(chunk->skb, 131 skb_pull(chunk->skb,
132 chunk->chunk_end - chunk->skb->data); 132 chunk->chunk_end - chunk->skb->data);
133
134 /* Verify that we have at least chunk headers
135 * worth of buffer left.
136 */
137 if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
138 sctp_chunk_free(chunk);
139 chunk = queue->in_progress = NULL;
140 }
133 } 141 }
134 } 142 }
135 143
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2e34220d94cd..23ae37ec8711 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2499,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2499 return SCTP_ERROR_NO_ERROR; 2499 return SCTP_ERROR_NO_ERROR;
2500} 2500}
2501 2501
2502/* Verify the ASCONF packet before we process it. */
2503int sctp_verify_asconf(const struct sctp_association *asoc,
2504 struct sctp_paramhdr *param_hdr, void *chunk_end,
2505 struct sctp_paramhdr **errp) {
2506 sctp_addip_param_t *asconf_param;
2507 union sctp_params param;
2508 int length, plen;
2509
2510 param.v = (sctp_paramhdr_t *) param_hdr;
2511 while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
2512 length = ntohs(param.p->length);
2513 *errp = param.p;
2514
2515 if (param.v > chunk_end - length ||
2516 length < sizeof(sctp_paramhdr_t))
2517 return 0;
2518
2519 switch (param.p->type) {
2520 case SCTP_PARAM_ADD_IP:
2521 case SCTP_PARAM_DEL_IP:
2522 case SCTP_PARAM_SET_PRIMARY:
2523 asconf_param = (sctp_addip_param_t *)param.v;
2524 plen = ntohs(asconf_param->param_hdr.length);
2525 if (plen < sizeof(sctp_addip_param_t) +
2526 sizeof(sctp_paramhdr_t))
2527 return 0;
2528 break;
2529 case SCTP_PARAM_SUCCESS_REPORT:
2530 case SCTP_PARAM_ADAPTATION_LAYER_IND:
2531 if (length != sizeof(sctp_addip_param_t))
2532 return 0;
2533
2534 break;
2535 default:
2536 break;
2537 }
2538
2539 param.v += WORD_ROUND(length);
2540 }
2541
2542 if (param.v != chunk_end)
2543 return 0;
2544
2545 return 1;
2546}
2547
2502/* Process an incoming ASCONF chunk with the next expected serial no. and 2548/* Process an incoming ASCONF chunk with the next expected serial no. and
2503 * return an ASCONF_ACK chunk to be sent in response. 2549 * return an ASCONF_ACK chunk to be sent in response.
2504 */ 2550 */
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 177528ed3e1b..a583d67cab63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
90 const sctp_subtype_t type, 90 const sctp_subtype_t type,
91 void *arg, 91 void *arg,
92 sctp_cmd_seq_t *commands); 92 sctp_cmd_seq_t *commands);
93static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
94 const struct sctp_association *asoc,
95 const sctp_subtype_t type,
96 void *arg,
97 sctp_cmd_seq_t *commands);
93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 98static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
94 99
95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 100static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
98 struct sctp_transport *transport); 103 struct sctp_transport *transport);
99 104
100static sctp_disposition_t sctp_sf_abort_violation( 105static sctp_disposition_t sctp_sf_abort_violation(
106 const struct sctp_endpoint *ep,
101 const struct sctp_association *asoc, 107 const struct sctp_association *asoc,
102 void *arg, 108 void *arg,
103 sctp_cmd_seq_t *commands, 109 sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
111 void *arg, 117 void *arg,
112 sctp_cmd_seq_t *commands); 118 sctp_cmd_seq_t *commands);
113 119
120static sctp_disposition_t sctp_sf_violation_paramlen(
121 const struct sctp_endpoint *ep,
122 const struct sctp_association *asoc,
123 const sctp_subtype_t type,
124 void *arg,
125 sctp_cmd_seq_t *commands);
126
114static sctp_disposition_t sctp_sf_violation_ctsn( 127static sctp_disposition_t sctp_sf_violation_ctsn(
115 const struct sctp_endpoint *ep, 128 const struct sctp_endpoint *ep,
116 const struct sctp_association *asoc, 129 const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
118 void *arg, 131 void *arg,
119 sctp_cmd_seq_t *commands); 132 sctp_cmd_seq_t *commands);
120 133
134static sctp_disposition_t sctp_sf_violation_chunk(
135 const struct sctp_endpoint *ep,
136 const struct sctp_association *asoc,
137 const sctp_subtype_t type,
138 void *arg,
139 sctp_cmd_seq_t *commands);
140
121/* Small helper function that checks if the chunk length 141/* Small helper function that checks if the chunk length
122 * is of the appropriate length. The 'required_length' argument 142 * is of the appropriate length. The 'required_length' argument
123 * is set to be the size of a specific chunk we are testing. 143 * is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
181 struct sctp_chunk *chunk = arg; 201 struct sctp_chunk *chunk = arg;
182 struct sctp_ulpevent *ev; 202 struct sctp_ulpevent *ev;
183 203
204 if (!sctp_vtag_verify_either(chunk, asoc))
205 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
206
184 /* RFC 2960 6.10 Bundling 207 /* RFC 2960 6.10 Bundling
185 * 208 *
186 * An endpoint MUST NOT bundle INIT, INIT ACK or 209 * An endpoint MUST NOT bundle INIT, INIT ACK or
187 * SHUTDOWN COMPLETE with any other chunks. 210 * SHUTDOWN COMPLETE with any other chunks.
188 */ 211 */
189 if (!chunk->singleton) 212 if (!chunk->singleton)
190 return SCTP_DISPOSITION_VIOLATION; 213 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
191 214
192 if (!sctp_vtag_verify_either(chunk, asoc)) 215 /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
193 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 216 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
217 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
218 commands);
194 219
195 /* RFC 2960 10.2 SCTP-to-ULP 220 /* RFC 2960 10.2 SCTP-to-ULP
196 * 221 *
@@ -450,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
450 if (!sctp_vtag_verify(chunk, asoc)) 475 if (!sctp_vtag_verify(chunk, asoc))
451 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 476 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
452 477
453 /* Make sure that the INIT-ACK chunk has a valid length */
454 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
455 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
456 commands);
457 /* 6.10 Bundling 478 /* 6.10 Bundling
458 * An endpoint MUST NOT bundle INIT, INIT ACK or 479 * An endpoint MUST NOT bundle INIT, INIT ACK or
459 * SHUTDOWN COMPLETE with any other chunks. 480 * SHUTDOWN COMPLETE with any other chunks.
460 */ 481 */
461 if (!chunk->singleton) 482 if (!chunk->singleton)
462 return SCTP_DISPOSITION_VIOLATION; 483 return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
463 484
485 /* Make sure that the INIT-ACK chunk has a valid length */
486 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
487 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
488 commands);
464 /* Grab the INIT header. */ 489 /* Grab the INIT header. */
465 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; 490 chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
466 491
@@ -585,7 +610,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
585 * control endpoint, respond with an ABORT. 610 * control endpoint, respond with an ABORT.
586 */ 611 */
587 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 612 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
588 return sctp_sf_ootb(ep, asoc, type, arg, commands); 613 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
589 614
590 /* Make sure that the COOKIE_ECHO chunk has a valid length. 615 /* Make sure that the COOKIE_ECHO chunk has a valid length.
591 * In this case, we check that we have enough for at least a 616 * In this case, we check that we have enough for at least a
@@ -2496,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
2496 struct sctp_chunk *chunk = (struct sctp_chunk *) arg; 2521 struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
2497 struct sctp_chunk *reply; 2522 struct sctp_chunk *reply;
2498 2523
2524 /* Make sure that the chunk has a valid length */
2525 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
2526 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
2527 commands);
2528
2499 /* Since we are not going to really process this INIT, there 2529 /* Since we are not going to really process this INIT, there
2500 * is no point in verifying chunk boundries. Just generate 2530 * is no point in verifying chunk boundries. Just generate
2501 * the SHUTDOWN ACK. 2531 * the SHUTDOWN ACK.
@@ -2929,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
2929 * 2959 *
2930 * The return value is the disposition of the chunk. 2960 * The return value is the disposition of the chunk.
2931*/ 2961*/
2932sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, 2962static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2933 const struct sctp_association *asoc, 2963 const struct sctp_association *asoc,
2934 const sctp_subtype_t type, 2964 const sctp_subtype_t type,
2935 void *arg, 2965 void *arg,
@@ -2965,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2965 2995
2966 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 2996 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
2967 2997
2998 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2968 return SCTP_DISPOSITION_CONSUME; 2999 return SCTP_DISPOSITION_CONSUME;
2969 } 3000 }
2970 3001
@@ -3125,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3125 3156
3126 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; 3157 ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
3127 do { 3158 do {
3128 /* Break out if chunk length is less then minimal. */ 3159 /* Report violation if the chunk is less then minimal */
3129 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) 3160 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
3130 break; 3161 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3131 3162 commands);
3132 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3133 if (ch_end > skb_tail_pointer(skb))
3134 break;
3135 3163
3164 /* Now that we know we at least have a chunk header,
3165 * do things that are type appropriate.
3166 */
3136 if (SCTP_CID_SHUTDOWN_ACK == ch->type) 3167 if (SCTP_CID_SHUTDOWN_ACK == ch->type)
3137 ootb_shut_ack = 1; 3168 ootb_shut_ack = 1;
3138 3169
@@ -3144,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3144 if (SCTP_CID_ABORT == ch->type) 3175 if (SCTP_CID_ABORT == ch->type)
3145 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3176 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3146 3177
3178 /* Report violation if chunk len overflows */
3179 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3180 if (ch_end > skb_tail_pointer(skb))
3181 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3182 commands);
3183
3147 ch = (sctp_chunkhdr_t *) ch_end; 3184 ch = (sctp_chunkhdr_t *) ch_end;
3148 } while (ch_end < skb_tail_pointer(skb)); 3185 } while (ch_end < skb_tail_pointer(skb));
3149 3186
3150 if (ootb_shut_ack) 3187 if (ootb_shut_ack)
3151 sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3188 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3152 else 3189 else
3153 sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3190 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3154
3155 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3156} 3191}
3157 3192
3158/* 3193/*
@@ -3218,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3218 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 3253 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3219 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3254 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3220 3255
3221 return SCTP_DISPOSITION_CONSUME; 3256 /* We need to discard the rest of the packet to prevent
3257 * potential bomming attacks from additional bundled chunks.
3258 * This is documented in SCTP Threats ID.
3259 */
3260 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3222 } 3261 }
3223 3262
3224 return SCTP_DISPOSITION_NOMEM; 3263 return SCTP_DISPOSITION_NOMEM;
@@ -3241,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
3241 void *arg, 3280 void *arg,
3242 sctp_cmd_seq_t *commands) 3281 sctp_cmd_seq_t *commands)
3243{ 3282{
3283 struct sctp_chunk *chunk = arg;
3284
3285 /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
3286 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3287 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3288 commands);
3289
3244 /* Although we do have an association in this case, it corresponds 3290 /* Although we do have an association in this case, it corresponds
3245 * to a restarted association. So the packet is treated as an OOTB 3291 * to a restarted association. So the packet is treated as an OOTB
3246 * packet and the state function that handles OOTB SHUTDOWN_ACK is 3292 * packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3257,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3257{ 3303{
3258 struct sctp_chunk *chunk = arg; 3304 struct sctp_chunk *chunk = arg;
3259 struct sctp_chunk *asconf_ack = NULL; 3305 struct sctp_chunk *asconf_ack = NULL;
3306 struct sctp_paramhdr *err_param = NULL;
3260 sctp_addiphdr_t *hdr; 3307 sctp_addiphdr_t *hdr;
3308 union sctp_addr_param *addr_param;
3261 __u32 serial; 3309 __u32 serial;
3310 int length;
3262 3311
3263 if (!sctp_vtag_verify(chunk, asoc)) { 3312 if (!sctp_vtag_verify(chunk, asoc)) {
3264 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, 3313 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3274,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3274 hdr = (sctp_addiphdr_t *)chunk->skb->data; 3323 hdr = (sctp_addiphdr_t *)chunk->skb->data;
3275 serial = ntohl(hdr->serial); 3324 serial = ntohl(hdr->serial);
3276 3325
3326 addr_param = (union sctp_addr_param *)hdr->params;
3327 length = ntohs(addr_param->p.length);
3328 if (length < sizeof(sctp_paramhdr_t))
3329 return sctp_sf_violation_paramlen(ep, asoc, type,
3330 (void *)addr_param, commands);
3331
3332 /* Verify the ASCONF chunk before processing it. */
3333 if (!sctp_verify_asconf(asoc,
3334 (sctp_paramhdr_t *)((void *)addr_param + length),
3335 (void *)chunk->chunk_end,
3336 &err_param))
3337 return sctp_sf_violation_paramlen(ep, asoc, type,
3338 (void *)&err_param, commands);
3339
3277 /* ADDIP 4.2 C1) Compare the value of the serial number to the value 3340 /* ADDIP 4.2 C1) Compare the value of the serial number to the value
3278 * the endpoint stored in a new association variable 3341 * the endpoint stored in a new association variable
3279 * 'Peer-Serial-Number'. 3342 * 'Peer-Serial-Number'.
@@ -3328,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3328 struct sctp_chunk *asconf_ack = arg; 3391 struct sctp_chunk *asconf_ack = arg;
3329 struct sctp_chunk *last_asconf = asoc->addip_last_asconf; 3392 struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
3330 struct sctp_chunk *abort; 3393 struct sctp_chunk *abort;
3394 struct sctp_paramhdr *err_param = NULL;
3331 sctp_addiphdr_t *addip_hdr; 3395 sctp_addiphdr_t *addip_hdr;
3332 __u32 sent_serial, rcvd_serial; 3396 __u32 sent_serial, rcvd_serial;
3333 3397
@@ -3345,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3345 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; 3409 addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
3346 rcvd_serial = ntohl(addip_hdr->serial); 3410 rcvd_serial = ntohl(addip_hdr->serial);
3347 3411
3412 /* Verify the ASCONF-ACK chunk before processing it. */
3413 if (!sctp_verify_asconf(asoc,
3414 (sctp_paramhdr_t *)addip_hdr->params,
3415 (void *)asconf_ack->chunk_end,
3416 &err_param))
3417 return sctp_sf_violation_paramlen(ep, asoc, type,
3418 (void *)&err_param, commands);
3419
3348 if (last_asconf) { 3420 if (last_asconf) {
3349 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; 3421 addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
3350 sent_serial = ntohl(addip_hdr->serial); 3422 sent_serial = ntohl(addip_hdr->serial);
@@ -3655,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
3655 void *arg, 3727 void *arg,
3656 sctp_cmd_seq_t *commands) 3728 sctp_cmd_seq_t *commands)
3657{ 3729{
3730 struct sctp_chunk *chunk = arg;
3731
3732 /* Make sure that the chunk has a valid length.
3733 * Since we don't know the chunk type, we use a general
3734 * chunkhdr structure to make a comparison.
3735 */
3736 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3737 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3738 commands);
3739
3658 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); 3740 SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
3659 return SCTP_DISPOSITION_DISCARD; 3741 return SCTP_DISPOSITION_DISCARD;
3660} 3742}
@@ -3710,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3710 void *arg, 3792 void *arg,
3711 sctp_cmd_seq_t *commands) 3793 sctp_cmd_seq_t *commands)
3712{ 3794{
3795 struct sctp_chunk *chunk = arg;
3796
3797 /* Make sure that the chunk has a valid length. */
3798 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
3799 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3800 commands);
3801
3713 return SCTP_DISPOSITION_VIOLATION; 3802 return SCTP_DISPOSITION_VIOLATION;
3714} 3803}
3715 3804
@@ -3717,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
3717 * Common function to handle a protocol violation. 3806 * Common function to handle a protocol violation.
3718 */ 3807 */
3719static sctp_disposition_t sctp_sf_abort_violation( 3808static sctp_disposition_t sctp_sf_abort_violation(
3809 const struct sctp_endpoint *ep,
3720 const struct sctp_association *asoc, 3810 const struct sctp_association *asoc,
3721 void *arg, 3811 void *arg,
3722 sctp_cmd_seq_t *commands, 3812 sctp_cmd_seq_t *commands,
3723 const __u8 *payload, 3813 const __u8 *payload,
3724 const size_t paylen) 3814 const size_t paylen)
3725{ 3815{
3816 struct sctp_packet *packet = NULL;
3726 struct sctp_chunk *chunk = arg; 3817 struct sctp_chunk *chunk = arg;
3727 struct sctp_chunk *abort = NULL; 3818 struct sctp_chunk *abort = NULL;
3728 3819
@@ -3731,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
3731 if (!abort) 3822 if (!abort)
3732 goto nomem; 3823 goto nomem;
3733 3824
3734 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 3825 if (asoc) {
3735 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 3826 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
3827 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3736 3828
3737 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3829 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
3738 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3830 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3739 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3831 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3740 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3832 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3741 SCTP_ERROR(ECONNREFUSED)); 3833 SCTP_ERROR(ECONNREFUSED));
3742 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3834 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
3743 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3835 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3836 } else {
3837 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3838 SCTP_ERROR(ECONNABORTED));
3839 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3840 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
3841 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
3842 }
3744 } else { 3843 } else {
3745 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 3844 packet = sctp_ootb_pkt_new(asoc, chunk);
3746 SCTP_ERROR(ECONNABORTED)); 3845
3747 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3846 if (!packet)
3748 SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); 3847 goto nomem_pkt;
3749 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3848
3849 if (sctp_test_T_bit(abort))
3850 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3851
3852 abort->skb->sk = ep->base.sk;
3853
3854 sctp_packet_append_chunk(packet, abort);
3855
3856 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
3857 SCTP_PACKET(packet));
3858
3859 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
3750 } 3860 }
3751 3861
3752 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); 3862 sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
3753 3863
3754 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3864 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
3755 3865
3756 return SCTP_DISPOSITION_ABORT; 3866 return SCTP_DISPOSITION_ABORT;
3757 3867
3868nomem_pkt:
3869 sctp_chunk_free(abort);
3758nomem: 3870nomem:
3759 return SCTP_DISPOSITION_NOMEM; 3871 return SCTP_DISPOSITION_NOMEM;
3760} 3872}
@@ -3787,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3787{ 3899{
3788 char err_str[]="The following chunk had invalid length:"; 3900 char err_str[]="The following chunk had invalid length:";
3789 3901
3790 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3902 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3903 sizeof(err_str));
3904}
3905
3906/*
3907 * Handle a protocol violation when the parameter length is invalid.
3908 * "Invalid" length is identified as smaller then the minimal length a
3909 * given parameter can be.
3910 */
3911static sctp_disposition_t sctp_sf_violation_paramlen(
3912 const struct sctp_endpoint *ep,
3913 const struct sctp_association *asoc,
3914 const sctp_subtype_t type,
3915 void *arg,
3916 sctp_cmd_seq_t *commands) {
3917 char err_str[] = "The following parameter had invalid length:";
3918
3919 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3791 sizeof(err_str)); 3920 sizeof(err_str));
3792} 3921}
3793 3922
@@ -3806,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
3806{ 3935{
3807 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:"; 3936 char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
3808 3937
3809 return sctp_sf_abort_violation(asoc, arg, commands, err_str, 3938 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3810 sizeof(err_str)); 3939 sizeof(err_str));
3811} 3940}
3812 3941
3942/* Handle protocol violation of an invalid chunk bundling. For example,
3943 * when we have an association and we recieve bundled INIT-ACK, or
3944 * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
3945 * statement from the specs. Additinally, there might be an attacker
3946 * on the path and we may not want to continue this communication.
3947 */
3948static sctp_disposition_t sctp_sf_violation_chunk(
3949 const struct sctp_endpoint *ep,
3950 const struct sctp_association *asoc,
3951 const sctp_subtype_t type,
3952 void *arg,
3953 sctp_cmd_seq_t *commands)
3954{
3955 char err_str[]="The following chunk violates protocol:";
3956
3957 if (!asoc)
3958 return sctp_sf_violation(ep, asoc, type, arg, commands);
3959
3960 return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
3961 sizeof(err_str));
3962}
3813/*************************************************************************** 3963/***************************************************************************
3814 * These are the state functions for handling primitive (Section 10) events. 3964 * These are the state functions for handling primitive (Section 10) events.
3815 ***************************************************************************/ 3965 ***************************************************************************/
@@ -5176,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
5176 * association exists, otherwise, use the peer's vtag. 5326 * association exists, otherwise, use the peer's vtag.
5177 */ 5327 */
5178 if (asoc) { 5328 if (asoc) {
5179 vtag = asoc->peer.i.init_tag; 5329 /* Special case the INIT-ACK as there is no peer's vtag
5330 * yet.
5331 */
5332 switch(chunk->chunk_hdr->type) {
5333 case SCTP_CID_INIT_ACK:
5334 {
5335 sctp_initack_chunk_t *initack;
5336
5337 initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
5338 vtag = ntohl(initack->init_hdr.init_tag);
5339 break;
5340 }
5341 default:
5342 vtag = asoc->peer.i.init_tag;
5343 break;
5344 }
5180 } else { 5345 } else {
5181 /* Special case the INIT and stale COOKIE_ECHO as there is no 5346 /* Special case the INIT and stale COOKIE_ECHO as there is no
5182 * vtag yet. 5347 * vtag yet.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 70a91ece3c49..ddb0ba3974b0 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
110 /* SCTP_STATE_EMPTY */ \ 110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 112 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 113 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 114 /* SCTP_STATE_COOKIE_WAIT */ \
115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 115 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
116 /* SCTP_STATE_COOKIE_ECHOED */ \ 116 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
173 /* SCTP_STATE_EMPTY */ \ 173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 175 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 176 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 177 /* SCTP_STATE_COOKIE_WAIT */ \
178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 178 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
179 /* SCTP_STATE_COOKIE_ECHOED */ \ 179 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
194 /* SCTP_STATE_EMPTY */ \ 194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 196 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 197 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 198 /* SCTP_STATE_COOKIE_WAIT */ \
199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 199 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
200 /* SCTP_STATE_COOKIE_ECHOED */ \ 200 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
216 /* SCTP_STATE_EMPTY */ \ 216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 218 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 219 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 220 /* SCTP_STATE_COOKIE_WAIT */ \
221 TYPE_SCTP_FUNC(sctp_sf_violation), \ 221 TYPE_SCTP_FUNC(sctp_sf_violation), \
222 /* SCTP_STATE_COOKIE_ECHOED */ \ 222 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
258 /* SCTP_STATE_EMPTY */ \ 258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 260 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 261 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 262 /* SCTP_STATE_COOKIE_WAIT */ \
263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 263 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
264 /* SCTP_STATE_COOKIE_ECHOED */ \ 264 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
300 /* SCTP_STATE_EMPTY */ \ 300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 302 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 303 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 304 /* SCTP_STATE_COOKIE_WAIT */ \
305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 305 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
306 /* SCTP_STATE_COOKIE_ECHOED */ \ 306 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
499 /* SCTP_STATE_EMPTY */ \ 499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 501 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ 502 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 503 /* SCTP_STATE_COOKIE_WAIT */ \
504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 504 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
505 /* SCTP_STATE_COOKIE_ECHOED */ \ 505 /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
528 /* SCTP_STATE_EMPTY */ 528 /* SCTP_STATE_EMPTY */
529 TYPE_SCTP_FUNC(sctp_sf_ootb), 529 TYPE_SCTP_FUNC(sctp_sf_ootb),
530 /* SCTP_STATE_CLOSED */ 530 /* SCTP_STATE_CLOSED */
531 TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), 531 TYPE_SCTP_FUNC(sctp_sf_ootb),
532 /* SCTP_STATE_COOKIE_WAIT */ 532 /* SCTP_STATE_COOKIE_WAIT */
533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk), 533 TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
534 /* SCTP_STATE_COOKIE_ECHOED */ 534 /* SCTP_STATE_COOKIE_ECHOED */
diff --git a/net/socket.c b/net/socket.c
index 7d44453dfae1..b09eb9036a17 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
777 if (pos != 0) 777 if (pos != 0)
778 return -ESPIPE; 778 return -ESPIPE;
779 779
780 if (iocb->ki_left == 0) /* Match SYS5 behaviour */
781 return 0;
782
783 x = alloc_sock_iocb(iocb, &siocb); 780 x = alloc_sock_iocb(iocb, &siocb);
784 if (!x) 781 if (!x)
785 return -ENOMEM; 782 return -ENOMEM;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7eabd55417a5..9771451eae21 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -213,7 +213,7 @@ out_fail_notifier:
213out_fail_sysfs: 213out_fail_sysfs:
214 return err; 214 return err;
215} 215}
216module_init(cfg80211_init); 216subsys_initcall(cfg80211_init);
217 217
218static void cfg80211_exit(void) 218static void cfg80211_exit(void)
219{ 219{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 88aaacd9f822..2d5d2255a27c 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
52 cfg80211_dev_free(rdev); 52 cfg80211_dev_free(rdev);
53} 53}
54 54
55#ifdef CONFIG_HOTPLUG
55static int wiphy_uevent(struct device *dev, char **envp, 56static int wiphy_uevent(struct device *dev, char **envp,
56 int num_envp, char *buf, int size) 57 int num_envp, char *buf, int size)
57{ 58{
58 /* TODO, we probably need stuff here */ 59 /* TODO, we probably need stuff here */
59 return 0; 60 return 0;
60} 61}
62#endif
61 63
62struct class ieee80211_class = { 64struct class ieee80211_class = {
63 .name = "ieee80211", 65 .name = "ieee80211",