aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--MAINTAINERS10
-rw-r--r--arch/arm/mach-msm/timer.c14
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/xen/events.c2
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/btrfs/ctree.h15
-rw-r--r--fs/btrfs/delayed-inode.c32
-rw-r--r--fs/btrfs/delayed-inode.h5
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/relocation.c30
-rw-r--r--fs/btrfs/sysfs.c146
-rw-r--r--fs/btrfs/transaction.c114
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/logfs/dir.c8
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/vfs.c19
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/device_cgroup.h10
-rw-r--r--include/linux/input/sh_keysc.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/trace/events/irq.h3
-rw-r--r--init/main.c1
-rw-r--r--kernel/rcutree.c398
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h419
-rw-r--r--kernel/rcutree_trace.c32
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/clocksource.c24
-rw-r--r--kernel/trace/trace_printk.c5
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/caif/cfmuxl.c2
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--security/device_cgroup.c8
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/util/trace-event-parse.c1
112 files changed, 1366 insertions, 785 deletions
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f48178024067..db3b1aba32a3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
843 TASKLET: 0 0 0 290 843 TASKLET: 0 0 0 290
844 SCHED: 27035 26983 26971 26746 844 SCHED: 27035 26983 26971 26746
845 HRTIMER: 0 0 0 0 845 HRTIMER: 0 0 0 0
846 RCU: 1678 1769 2178 2250
846 847
847 848
8481.3 IDE devices in /proc/ide 8491.3 IDE devices in /proc/ide
diff --git a/MAINTAINERS b/MAINTAINERS
index 502f2dd761eb..f0358cd91de3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2291,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
2291 2291
2292EBTABLES 2292EBTABLES
2293M: Bart De Schuymer <bart.de.schuymer@pandora.be> 2293M: Bart De Schuymer <bart.de.schuymer@pandora.be>
2294L: ebtables-user@lists.sourceforge.net 2294L: netfilter-devel@vger.kernel.org
2295L: ebtables-devel@lists.sourceforge.net
2296W: http://ebtables.sourceforge.net/ 2295W: http://ebtables.sourceforge.net/
2297S: Maintained 2296S: Maintained
2298F: include/linux/netfilter_bridge/ebt_*.h 2297F: include/linux/netfilter_bridge/ebt_*.h
@@ -7007,6 +7006,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
7007S: Maintained 7006S: Maintained
7008F: drivers/platform/x86 7007F: drivers/platform/x86
7009 7008
7009X86 MCE INFRASTRUCTURE
7010M: Tony Luck <tony.luck@intel.com>
7011M: Borislav Petkov <bp@amd64.org>
7012L: linux-edac@vger.kernel.org
7013S: Maintained
7014F: arch/x86/kernel/cpu/mcheck/*
7015
7010XEN HYPERVISOR INTERFACE 7016XEN HYPERVISOR INTERFACE
7011M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 7017M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
7012M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7018M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 38b95e949d13..63621f152c98 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,6 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <asm/mach/time.h> 25#include <asm/mach/time.h>
26#include <asm/hardware/gic.h>
27
26#include <mach/msm_iomap.h> 28#include <mach/msm_iomap.h>
27#include <mach/cpu.h> 29#include <mach/cpu.h>
28 30
@@ -55,10 +57,12 @@ enum timer_location {
55#if defined(CONFIG_ARCH_QSD8X50) 57#if defined(CONFIG_ARCH_QSD8X50)
56#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ 58#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
57#define MSM_DGT_SHIFT (0) 59#define MSM_DGT_SHIFT (0)
58#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ 60#elif defined(CONFIG_ARCH_MSM7X30)
59 defined(CONFIG_ARCH_MSM8960)
60#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ 61#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
61#define MSM_DGT_SHIFT (0) 62#define MSM_DGT_SHIFT (0)
63#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
64#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
65#define MSM_DGT_SHIFT (0)
62#else 66#else
63#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ 67#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
64#define MSM_DGT_SHIFT (5) 68#define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
100{ 104{
101 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); 105 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
102 106
103 return readl(clk->global_counter); 107 /*
108 * Shift timer count down by a constant due to unreliable lower bits
109 * on some targets.
110 */
111 return readl(clk->global_counter) >> clk->shift;
104} 112}
105 113
106static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) 114static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
1033 xen_reboot(SHUTDOWN_poweroff); 1033 xen_reboot(SHUTDOWN_poweroff);
1034} 1034}
1035 1035
1036static void xen_machine_power_off(void)
1037{
1038 if (pm_power_off)
1039 pm_power_off();
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1036static void xen_crash_shutdown(struct pt_regs *regs) 1043static void xen_crash_shutdown(struct pt_regs *regs)
1037{ 1044{
1038 xen_reboot(SHUTDOWN_crash); 1045 xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
1058static const struct machine_ops xen_machine_ops __initconst = { 1065static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1066 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1067 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1068 .power_off = xen_machine_power_off,
1062 .shutdown = xen_machine_halt, 1069 .shutdown = xen_machine_halt,
1063 .crash_shutdown = xen_crash_shutdown, 1070 .crash_shutdown = xen_crash_shutdown,
1064 .emergency_restart = xen_emergency_restart, 1071 .emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..673e968df3cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
59#include <asm/page.h> 59#include <asm/page.h>
60#include <asm/init.h> 60#include <asm/init.h>
61#include <asm/pat.h> 61#include <asm/pat.h>
62#include <asm/smp.h>
62 63
63#include <asm/xen/hypercall.h> 64#include <asm/xen/hypercall.h>
64#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1231{ 1232{
1232 struct { 1233 struct {
1233 struct mmuext_op op; 1234 struct mmuext_op op;
1234 DECLARE_BITMAP(mask, NR_CPUS); 1235 DECLARE_BITMAP(mask, num_processors);
1235 } *args; 1236 } *args;
1236 struct multicall_space mcs; 1237 struct multicall_space mcs;
1237 1238
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1600 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1601 pte_t pte;
1601 1602
1603#ifdef CONFIG_X86_32
1604 if (pfn > max_pfn_mapped)
1605 max_pfn_mapped = pfn;
1606#endif
1607
1602 if (!pte_none(pte_page[pteidx])) 1608 if (!pte_none(pte_page[pteidx]))
1603 continue; 1609 continue;
1604 1610
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1772 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1773 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1774
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1775 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1776 xen_start_info->nr_pt_frames * PAGE_SIZE +
1777 512*1024);
1770 1778
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1779 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1780 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
232#else
233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
235 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
236 unsigned long long end; 232 unsigned long long end;
237 233
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
266 if (map[i].size > 0) 262 if (map[i].size > 0)
267 e820_add_region(map[i].addr, map[i].size, map[i].type); 263 e820_add_region(map[i].addr, map[i].size, map[i].type);
268 } 264 }
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
270 xen_extra_mem_start = (1ULL<<32);
269 271
270 /* 272 /*
271 * In domU, the ISA region is normal, usable memory, but we 273 * In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
205static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 205static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
206{ 206{
207 unsigned cpu; 207 unsigned cpu;
208 unsigned int i;
208 209
209 xen_init_lock_cpu(0); 210 xen_init_lock_cpu(0);
210 211
211 smp_store_cpu_info(0); 212 smp_store_cpu_info(0);
212 cpu_data(0).x86_max_cores = 1; 213 cpu_data(0).x86_max_cores = 1;
214
215 for_each_possible_cpu(i) {
216 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
217 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
218 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
219 }
213 set_cpu_sibling_map(0); 220 set_cpu_sibling_map(0);
214 221
215 if (xen_smp_intr_init(0)) 222 if (xen_smp_intr_init(0))
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305bf953e..8ecf4c6c2874 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
64 return -EFAULT; 64 return -EFAULT;
65 65
66 ret = strict_strtol(buf, 10, &result); 66 ret = strict_strtol(buf, 10, &result);
67 if (ret)
68 return ret;
67 69
68 priv->btmrvl_dev.hscfgcmd = result; 70 priv->btmrvl_dev.hscfgcmd = result;
69 71
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
108 return -EFAULT; 110 return -EFAULT;
109 111
110 ret = strict_strtol(buf, 10, &result); 112 ret = strict_strtol(buf, 10, &result);
113 if (ret)
114 return ret;
111 115
112 priv->btmrvl_dev.psmode = result; 116 priv->btmrvl_dev.psmode = result;
113 117
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
147 return -EFAULT; 151 return -EFAULT;
148 152
149 ret = strict_strtol(buf, 10, &result); 153 ret = strict_strtol(buf, 10, &result);
154 if (ret)
155 return ret;
150 156
151 priv->btmrvl_dev.pscmd = result; 157 priv->btmrvl_dev.pscmd = result;
152 158
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
191 return -EFAULT; 197 return -EFAULT;
192 198
193 ret = strict_strtol(buf, 16, &result); 199 ret = strict_strtol(buf, 16, &result);
200 if (ret)
201 return ret;
194 202
195 priv->btmrvl_dev.gpio_gap = result; 203 priv->btmrvl_dev.gpio_gap = result;
196 204
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
230 return -EFAULT; 238 return -EFAULT;
231 239
232 ret = strict_strtol(buf, 10, &result); 240 ret = strict_strtol(buf, 10, &result);
241 if (ret)
242 return ret;
233 243
234 priv->btmrvl_dev.hscmd = result; 244 priv->btmrvl_dev.hscmd = result;
235 if (priv->btmrvl_dev.hscmd) { 245 if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
272 return -EFAULT; 282 return -EFAULT;
273 283
274 ret = strict_strtol(buf, 10, &result); 284 ret = strict_strtol(buf, 10, &result);
285 if (ret)
286 return ret;
275 287
276 priv->btmrvl_dev.hsmode = result; 288 priv->btmrvl_dev.hsmode = result;
277 289
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921ef6b52..4cf25347b015 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
111 111
112 rcu_read_unlock(); 112 rcu_read_unlock();
113 113
114 wake_up_interruptible(&evdev->wait); 114 if (type == EV_SYN && code == SYN_REPORT)
115 wake_up_interruptible(&evdev->wait);
115} 116}
116 117
117static int evdev_fasync(int fd, struct file *file, int on) 118static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7b70fd..da38d97a51b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1759 clamp(mt_slots, 2, 32); 1759 mt_slots = clamp(mt_slots, 2, 32);
1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1761 mt_slots = 2; 1761 mt_slots = 2;
1762 } else { 1762 } else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743817db..33d0bdc837c0 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
209#endif 209#endif
210 } 210 }
211 } 211 }
212 input_sync(omap_kp_data->input);
212 memcpy(keypad_state, new_state, sizeof(keypad_state)); 213 memcpy(keypad_state, new_state, sizeof(keypad_state));
213 214
214 if (key_down) { 215 if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98e7efb..6876700a4469 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
32 [SH_KEYSC_MODE_3] = { 2, 4, 7 }, 32 [SH_KEYSC_MODE_3] = { 2, 4, 7 },
33 [SH_KEYSC_MODE_4] = { 3, 6, 6 }, 33 [SH_KEYSC_MODE_4] = { 3, 6, 6 },
34 [SH_KEYSC_MODE_5] = { 4, 6, 7 }, 34 [SH_KEYSC_MODE_5] = { 4, 6, 7 },
35 [SH_KEYSC_MODE_6] = { 5, 7, 7 }, 35 [SH_KEYSC_MODE_6] = { 5, 8, 8 },
36}; 36};
37 37
38struct sh_keysc_priv { 38struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033986e4..0110b5a3a167 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
187 if (size == 0) 187 if (size == 0)
188 size = xres ? : 1; 188 size = xres ? : 1;
189 189
190 clamp(value, min, max); 190 value = clamp(value, min, max);
191 191
192 mousedev->packet.x = ((value - min) * xres) / size; 192 mousedev->packet.x = ((value - min) * xres) / size;
193 mousedev->packet.abs_event = 1; 193 mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
201 if (size == 0) 201 if (size == 0)
202 size = yres ? : 1; 202 size = yres ? : 1;
203 203
204 clamp(value, min, max); 204 value = clamp(value, min, max);
205 205
206 mousedev->packet.y = yres - ((value - min) * yres) / size; 206 mousedev->packet.y = yres - ((value - min) * yres) / size;
207 mousedev->packet.abs_event = 1; 207 mousedev->packet.abs_event = 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638225fe..e35058bcd7b9 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
156 if (!cs || !try_module_get(cs->driver->owner)) 156 if (!cs || !try_module_get(cs->driver->owner))
157 return -ENODEV; 157 return -ENODEV;
158 158
159 if (mutex_lock_interruptible(&cs->mutex)) 159 if (mutex_lock_interruptible(&cs->mutex)) {
160 module_put(cs->driver->owner);
160 return -ERESTARTSYS; 161 return -ERESTARTSYS;
162 }
161 tty->driver_data = cs; 163 tty->driver_data = cs;
162 164
163 ++cs->open_count; 165 ++cs->open_count;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a3cd98..42f067347bc7 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
495 } 495 }
496 } 496 }
497 497
498 dev->stats.tx_packets++;
499 dev->stats.tx_bytes += skb->len;
500
498 if (atomic_dec_return(&queued_msg->use_count) == 0) { 501 if (atomic_dec_return(&queued_msg->use_count) == 0) {
499 dev_kfree_skb(skb); 502 dev_kfree_skb(skb);
500 kfree(queued_msg); 503 kfree(queued_msg);
501 } 504 }
502 505
503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += skb->len;
505
506 return NETDEV_TX_OK; 506 return NETDEV_TX_OK;
507} 507}
508 508
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index d84f6e8903a5..5b732988d493 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
413 outb_p(0x00, E33G_IDCFR); 413 outb_p(0x00, E33G_IDCFR);
414 msleep(1); 414 msleep(1);
415 free_irq(*irqp, el2_probe_interrupt); 415 free_irq(*irqp, &seen);
416 if (!seen) 416 if (!seen)
417 continue; 417 continue;
418 418
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
422 continue; 422 continue;
423 if (retval < 0) 423 if (retval < 0)
424 goto err_disable; 424 goto err_disable;
425 break;
425 } while (*++irqp); 426 } while (*++irqp);
426 427
427 if (*irqp == 0) { 428 if (*irqp == 0) {
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba2d9b9..6c019e148546 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
53 53
54#if defined(CONFIG_BFIN_MAC_USE_L1) 54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) 55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) 56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else 57#else
58# define bfin_mac_alloc(dma_handle, size) \ 58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) 59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr) \ 60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) 61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif 62#endif
63 63
64#define PKT_BUF_SZ 1580 64#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
95 t = t->next; 95 t = t->next;
96 } 96 }
97 } 97 }
98 bfin_mac_free(dma_handle, tx_desc); 98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 } 99 }
100 100
101 if (rx_desc) { 101 if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
109 r = r->next; 109 r = r->next;
110 } 110 }
111 } 111 }
112 bfin_mac_free(dma_handle, rx_desc); 112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 } 113 }
114} 114}
115 115
@@ -126,13 +126,13 @@ static int desc_list_init(void)
126#endif 126#endif
127 127
128 tx_desc = bfin_mac_alloc(&dma_handle, 128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx) * 129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM); 130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL) 131 if (tx_desc == NULL)
132 goto init_error; 132 goto init_error;
133 133
134 rx_desc = bfin_mac_alloc(&dma_handle, 134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx) * 135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM); 136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL) 137 if (rx_desc == NULL)
138 goto init_error; 138 goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 652b30e525d0..eafe44a528ac 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1297,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
1297 goto out; 1297 goto out;
1298 1298
1299 np->dev = slave->dev; 1299 np->dev = slave->dev;
1300 strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
1300 err = __netpoll_setup(np); 1301 err = __netpoll_setup(np);
1301 if (err) { 1302 if (err) {
1302 kfree(np); 1303 kfree(np);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45487e8..7583a9572bcc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
105 goto out_ep; 105 goto out_ep;
106 106
107 fep->fcc.mem = (void __iomem *)cpm2_immr; 107 fep->fcc.mem = (void __iomem *)cpm2_immr;
108 fpi->dpram_offset = cpm_dpalloc(128, 8); 108 fpi->dpram_offset = cpm_dpalloc(128, 32);
109 if (IS_ERR_VALUE(fpi->dpram_offset)) { 109 if (IS_ERR_VALUE(fpi->dpram_offset)) {
110 ret = fpi->dpram_offset; 110 ret = fpi->dpram_offset;
111 goto out_fcccp; 111 goto out_fcccp;
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f6a5ad..c3ecb118c1df 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ 1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
1581 1581
1582 lp->txrcommit++; 1582 lp->txrcommit++;
1583 spin_unlock_irqrestore(&lp->lock, flags);
1584 1583
1585 /* Update statistics */
1586 dev->stats.tx_packets++; 1584 dev->stats.tx_packets++;
1587 dev->stats.tx_bytes += skb->len; 1585 dev->stats.tx_bytes += skb->len;
1588 1586
1587 spin_unlock_irqrestore(&lp->lock, flags);
1588
1589 return NETDEV_TX_OK; 1589 return NETDEV_TX_OK;
1590 1590
1591drop: 1591drop:
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7538df..a900d5bf2948 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
135} 135}
136 136
137/* Initialise a single lance board at the given DIO device */ 137/* Initialise a single lance board at the given DIO device */
138static void __init hplance_init(struct net_device *dev, struct dio_dev *d) 138static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
139{ 139{
140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE); 140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
141 struct hplance_private *lp; 141 struct hplance_private *lp;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b644383017f9..c0788a31ff0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1965 1965
1966 netxen_tso_check(netdev, tx_ring, first_desc, skb); 1966 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1967 1967
1968 netxen_nic_update_cmd_producer(adapter, tx_ring);
1969
1970 adapter->stats.txbytes += skb->len; 1968 adapter->stats.txbytes += skb->len;
1971 adapter->stats.xmitcalled++; 1969 adapter->stats.xmitcalled++;
1972 1970
1971 netxen_nic_update_cmd_producer(adapter, tx_ring);
1972
1973 return NETDEV_TX_OK; 1973 return NETDEV_TX_OK;
1974 1974
1975drop_packet: 1975drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4b72e5..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
58 58
59config BCM63XX_PHY 59config BCM63XX_PHY
60 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 60 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
61 depends on BCM63XX
61 ---help--- 62 ---help---
62 Currently supports the 6348 and 6358 PHYs. 63 Currently supports the 6348 and 6358 PHYs.
63 64
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522bb535..2cd8dc5847b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
543 543
544/* time stamping methods */ 544/* time stamping methods */
545 545
546static void decode_evnt(struct dp83640_private *dp83640, 546static int decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests) 547 void *data, u16 ests)
548{ 548{
549 struct phy_txts *phy_txts;
549 struct ptp_clock_event event; 550 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; 551 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
552 u16 ext_status = 0;
553
554 if (ests & MULT_EVNT) {
555 ext_status = *(u16 *) data;
556 data += sizeof(ext_status);
557 }
558
559 phy_txts = data;
551 560
552 switch (words) { /* fall through in every case */ 561 switch (words) { /* fall through in every case */
553 case 3: 562 case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
565 event.timestamp = phy2txts(&dp83640->edata); 574 event.timestamp = phy2txts(&dp83640->edata);
566 575
567 ptp_clock_event(dp83640->clock->ptp_clock, &event); 576 ptp_clock_event(dp83640->clock->ptp_clock, &event);
577
578 words = ext_status ? words + 2 : words + 1;
579 return words * sizeof(u16);
568} 580}
569 581
570static void decode_rxts(struct dp83640_private *dp83640, 582static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
643 655
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { 656 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645 657
646 phy_txts = (struct phy_txts *) ptr; 658 size = decode_evnt(dp83640, ptr, ests);
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649 659
650 } else { 660 } else {
651 size = 0; 661 size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1034 1044
1035 if (is_status_frame(skb, type)) { 1045 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb); 1046 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */ 1047 kfree_skb(skb);
1038 return false; 1048 return true;
1039 } 1049 }
1040 1050
1041 SKB_PTP_TYPE(skb) = type; 1051 SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index a1b82c9c67d2..c554a397e558 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
523#define PUT_BYTE(ap, buf, c, islcp) do { \ 523#define PUT_BYTE(ap, buf, c, islcp) do { \
524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
525 *buf++ = PPP_ESCAPE; \ 525 *buf++ = PPP_ESCAPE; \
526 *buf++ = c ^ 0x20; \ 526 *buf++ = c ^ PPP_TRANS; \
527 } else \ 527 } else \
528 *buf++ = c; \ 528 *buf++ = c; \
529} while (0) 529} while (0)
@@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
896 sp = skb_put(skb, n); 896 sp = skb_put(skb, n);
897 memcpy(sp, buf, n); 897 memcpy(sp, buf, n);
898 if (ap->state & SC_ESCAPE) { 898 if (ap->state & SC_ESCAPE) {
899 sp[0] ^= 0x20; 899 sp[0] ^= PPP_TRANS;
900 ap->state &= ~SC_ESCAPE; 900 ap->state &= ~SC_ESCAPE;
901 } 901 }
902 } 902 }
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 89f7540d90f9..5f597ca592bb 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1273 wmb(); 1273 wmb();
1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); 1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1275 1275
1276 stats->tx_bytes += skb->len; 1276 stats->tx_bytes += length;
1277 stats->tx_packets++; 1277 stats->tx_packets++;
1278 dev->trans_start = jiffies; 1278 dev->trans_start = jiffies;
1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) { 1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef1ce2ebeb4a..05d81780d1fd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1621 * 1621 *
1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec 1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1623 */ 1623 */
1624 static const struct { 1624 static const struct rtl_mac_info {
1625 u32 mask; 1625 u32 mask;
1626 u32 val; 1626 u32 val;
1627 int mac_version; 1627 int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1689 1689
1690 /* Catch-all */ 1690 /* Catch-all */
1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } 1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1692 }, *p = mac_info; 1692 };
1693 const struct rtl_mac_info *p = mac_info;
1693 u32 reg; 1694 u32 reg;
1694 1695
1695 reg = RTL_R32(TxConfig); 1696 reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3681 3682
3682static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3683static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3683{ 3684{
3684 static const struct { 3685 static const struct rtl_cfg2_info {
3685 u32 mac_version; 3686 u32 mac_version;
3686 u32 clk; 3687 u32 clk;
3687 u32 val; 3688 u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3690 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, 3691 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
3691 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe 3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } 3693 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3693 }, *p = cfg2_info; 3694 };
3695 const struct rtl_cfg2_info *p = cfg2_info;
3694 unsigned int i; 3696 unsigned int i;
3695 u32 clk; 3697 u32 clk;
3696 3698
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 74e94054ab1a..5235f48be1be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
460 460
461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
462} 462}
463 463#ifdef CONFIG_NET_POLL_CONTROLLER
464static void tun_poll_controller(struct net_device *dev)
465{
466 /*
467 * Tun only receives frames when:
468 * 1) the char device endpoint gets data from user space
469 * 2) the tun socket gets a sendmsg call from user space
470 * Since both of those are syncronous operations, we are guaranteed
471 * never to have pending data when we poll for it
472 * so theres nothing to do here but return.
473 * We need this though so netpoll recognizes us as an interface that
474 * supports polling, which enables bridge devices in virt setups to
475 * still use netconsole
476 */
477 return;
478}
479#endif
464static const struct net_device_ops tun_netdev_ops = { 480static const struct net_device_ops tun_netdev_ops = {
465 .ndo_uninit = tun_net_uninit, 481 .ndo_uninit = tun_net_uninit,
466 .ndo_open = tun_net_open, 482 .ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
468 .ndo_start_xmit = tun_net_xmit, 484 .ndo_start_xmit = tun_net_xmit,
469 .ndo_change_mtu = tun_net_change_mtu, 485 .ndo_change_mtu = tun_net_change_mtu,
470 .ndo_fix_features = tun_net_fix_features, 486 .ndo_fix_features = tun_net_fix_features,
487#ifdef CONFIG_NET_POLL_CONTROLLER
488 .ndo_poll_controller = tun_poll_controller,
489#endif
471}; 490};
472 491
473static const struct net_device_ops tap_netdev_ops = { 492static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
480 .ndo_set_multicast_list = tun_net_mclist, 499 .ndo_set_multicast_list = tun_net_mclist,
481 .ndo_set_mac_address = eth_mac_addr, 500 .ndo_set_mac_address = eth_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 501 .ndo_validate_addr = eth_validate_addr,
502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = tun_poll_controller,
504#endif
483}; 505};
484 506
485/* Initialize net device. */ 507/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f9117260f..84d4608153c9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
385 router with USB ethernet port. This driver is for routers only, 385 router with USB ethernet port. This driver is for routers only,
386 it will not work with ADSL modems (use cxacru driver instead). 386 it will not work with ADSL modems (use cxacru driver instead).
387 387
388config USB_NET_KALMIA
389 tristate "Samsung Kalmia based LTE USB modem"
390 depends on USB_USBNET
391 help
392 Choose this option if you have a Samsung Kalmia based USB modem
393 as Samsung GT-B3730.
394
395 To compile this driver as a module, choose M here: the
396 module will be called kalmia.
397
388config USB_HSO 398config USB_HSO
389 tristate "Option USB High Speed Mobile Devices" 399 tristate "Option USB High Speed Mobile Devices"
390 depends on USB && RFKILL 400 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5f0a90..c203fa21f6b1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 27obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 28obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 000000000000..d965fb1e013e
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
1/*
2 * USB network interface driver for Samsung Kalmia based LTE USB modem like the
3 * Samsung GT-B3730 and GT-B3710.
4 *
5 * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
6 *
7 * Sponsored by Quicklink Video Distribution Services Ltd.
8 *
9 * Based on the cdc_eem module.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ctype.h>
22#include <linux/ethtool.h>
23#include <linux/workqueue.h>
24#include <linux/mii.h>
25#include <linux/usb.h>
26#include <linux/crc32.h>
27#include <linux/usb/cdc.h>
28#include <linux/usb/usbnet.h>
29#include <linux/gfp.h>
30
31/*
32 * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
33 * handled by the "option" module and an ethernet data port handled by this
34 * module.
35 *
36 * The stick must first be switched into modem mode by usb_modeswitch
37 * or similar tool. Then the modem gets sent two initialization packets by
38 * this module, which gives the MAC address of the device. User space can then
39 * connect the modem using AT commands through the ACM port and then use
40 * DHCP on the network interface exposed by this module. Network packets are
41 * sent to and from the modem in a proprietary format discovered after watching
42 * the behavior of the windows driver for the modem.
43 *
44 * More information about the use of the modem is available in usb_modeswitch
45 * forum and the project page:
46 *
47 * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
48 * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
49 */
50
51/* #define DEBUG */
52/* #define VERBOSE */
53
54#define KALMIA_HEADER_LENGTH 6
55#define KALMIA_ALIGN_SIZE 4
56#define KALMIA_USB_TIMEOUT 10000
57
58/*-------------------------------------------------------------------------*/
59
60static int
61kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
62 u8 *buffer, u8 expected_len)
63{
64 int act_len;
65 int status;
66
67 netdev_dbg(dev->net, "Sending init packet");
68
69 status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
70 init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
71 if (status != 0) {
72 netdev_err(dev->net,
73 "Error sending init packet. Status %i, length %i\n",
74 status, act_len);
75 return status;
76 }
77 else if (act_len != init_msg_len) {
78 netdev_err(dev->net,
79 "Did not send all of init packet. Bytes sent: %i",
80 act_len);
81 }
82 else {
83 netdev_dbg(dev->net, "Successfully sent init packet.");
84 }
85
86 status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
87 buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
88
89 if (status != 0)
90 netdev_err(dev->net,
91 "Error receiving init result. Status %i, length %i\n",
92 status, act_len);
93 else if (act_len != expected_len)
94 netdev_err(dev->net, "Unexpected init result length: %i\n",
95 act_len);
96
97 return status;
98}
99
100static int
101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
102{
103 char init_msg_1[] =
104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
105 0x00, 0x00 };
106 char init_msg_2[] =
107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
108 0x00, 0x00 };
109 char receive_buf[28];
110 int status;
111
112 status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
113 / sizeof(init_msg_1[0]), receive_buf, 24);
114 if (status != 0)
115 return status;
116
117 status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
118 / sizeof(init_msg_2[0]), receive_buf, 28);
119 if (status != 0)
120 return status;
121
122 memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
123
124 return status;
125}
126
127static int
128kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
129{
130 u8 status;
131 u8 ethernet_addr[ETH_ALEN];
132
133 /* Don't bind to AT command interface */
134 if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
135 return -EINVAL;
136
137 dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
138 dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
139 dev->status = NULL;
140
141 dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
142 dev->hard_mtu = 1400;
143 dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
144
145 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
146
147 if (status < 0) {
148 usb_set_intfdata(intf, NULL);
149 usb_driver_release_interface(driver_of(intf), intf);
150 return status;
151 }
152
153 memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
154 memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
155
156 return status;
157}
158
159static struct sk_buff *
160kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
161{
162 struct sk_buff *skb2 = NULL;
163 u16 content_len;
164 unsigned char *header_start;
165 unsigned char ether_type_1, ether_type_2;
166 u8 remainder, padlen = 0;
167
168 if (!skb_cloned(skb)) {
169 int headroom = skb_headroom(skb);
170 int tailroom = skb_tailroom(skb);
171
172 if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
173 >= KALMIA_HEADER_LENGTH))
174 goto done;
175
176 if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
177 + KALMIA_ALIGN_SIZE)) {
178 skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
179 skb->data, skb->len);
180 skb_set_tail_pointer(skb, skb->len);
181 goto done;
182 }
183 }
184
185 skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
186 KALMIA_ALIGN_SIZE, flags);
187 if (!skb2)
188 return NULL;
189
190 dev_kfree_skb_any(skb);
191 skb = skb2;
192
193 done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
194 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
195 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
196
197 netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
198 ether_type_2);
199
200 /* According to empiric data for data packages */
201 header_start[0] = 0x57;
202 header_start[1] = 0x44;
203 content_len = skb->len - KALMIA_HEADER_LENGTH;
204 header_start[2] = (content_len & 0xff); /* low byte */
205 header_start[3] = (content_len >> 8); /* high byte */
206
207 header_start[4] = ether_type_1;
208 header_start[5] = ether_type_2;
209
210 /* Align to 4 bytes by padding with zeros */
211 remainder = skb->len % KALMIA_ALIGN_SIZE;
212 if (remainder > 0) {
213 padlen = KALMIA_ALIGN_SIZE - remainder;
214 memset(skb_put(skb, padlen), 0, padlen);
215 }
216
217 netdev_dbg(
218 dev->net,
219 "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
220 content_len, padlen, header_start[0], header_start[1],
221 header_start[2], header_start[3], header_start[4],
222 header_start[5]);
223
224 return skb;
225}
226
227static int
228kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
229{
230 /*
231 * Our task here is to strip off framing, leaving skb with one
232 * data frame for the usbnet framework code to process.
233 */
234 const u8 HEADER_END_OF_USB_PACKET[] =
235 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
236 const u8 EXPECTED_UNKNOWN_HEADER_1[] =
237 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
238 const u8 EXPECTED_UNKNOWN_HEADER_2[] =
239 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
240 u8 i = 0;
241
242 /* incomplete header? */
243 if (skb->len < KALMIA_HEADER_LENGTH)
244 return 0;
245
246 do {
247 struct sk_buff *skb2 = NULL;
248 u8 *header_start;
249 u16 usb_packet_length, ether_packet_length;
250 int is_last;
251
252 header_start = skb->data;
253
254 if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
255 if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
256 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
257 header_start, EXPECTED_UNKNOWN_HEADER_2,
258 sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
259 netdev_dbg(
260 dev->net,
261 "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
262 header_start[0], header_start[1],
263 header_start[2], header_start[3],
264 header_start[4], header_start[5],
265 skb->len - KALMIA_HEADER_LENGTH);
266 }
267 else {
268 netdev_err(
269 dev->net,
270 "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
271 header_start[0], header_start[1],
272 header_start[2], header_start[3],
273 header_start[4], header_start[5],
274 skb->len - KALMIA_HEADER_LENGTH);
275 return 0;
276 }
277 }
278 else
279 netdev_dbg(
280 dev->net,
281 "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
282 header_start[0], header_start[1], header_start[2],
283 header_start[3], header_start[4], header_start[5],
284 skb->len - KALMIA_HEADER_LENGTH);
285
286 /* subtract start header and end header */
287 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
288 ether_packet_length = header_start[2] + (header_start[3] << 8);
289 skb_pull(skb, KALMIA_HEADER_LENGTH);
290
291 /* Some small packets misses end marker */
292 if (usb_packet_length < ether_packet_length) {
293 ether_packet_length = usb_packet_length
294 + KALMIA_HEADER_LENGTH;
295 is_last = true;
296 }
297 else {
298 netdev_dbg(dev->net, "Correct package length #%i", i
299 + 1);
300
301 is_last = (memcmp(skb->data + ether_packet_length,
302 HEADER_END_OF_USB_PACKET,
303 sizeof(HEADER_END_OF_USB_PACKET)) == 0);
304 if (!is_last) {
305 header_start = skb->data + ether_packet_length;
306 netdev_dbg(
307 dev->net,
308 "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
309 header_start[0], header_start[1],
310 header_start[2], header_start[3],
311 header_start[4], header_start[5],
312 skb->len - KALMIA_HEADER_LENGTH);
313 }
314 }
315
316 if (is_last) {
317 skb2 = skb;
318 }
319 else {
320 skb2 = skb_clone(skb, GFP_ATOMIC);
321 if (unlikely(!skb2))
322 return 0;
323 }
324
325 skb_trim(skb2, ether_packet_length);
326
327 if (is_last) {
328 return 1;
329 }
330 else {
331 usbnet_skb_return(dev, skb2);
332 skb_pull(skb, ether_packet_length);
333 }
334
335 i++;
336 }
337 while (skb->len);
338
339 return 1;
340}
341
342static const struct driver_info kalmia_info = {
343 .description = "Samsung Kalmia LTE USB dongle",
344 .flags = FLAG_WWAN,
345 .bind = kalmia_bind,
346 .rx_fixup = kalmia_rx_fixup,
347 .tx_fixup = kalmia_tx_fixup
348};
349
350/*-------------------------------------------------------------------------*/
351
352static const struct usb_device_id products[] = {
353 /* The unswitched USB ID, to get the module auto loaded: */
354 { USB_DEVICE(0x04e8, 0x689a) },
355 /* The stick swithed into modem (by e.g. usb_modeswitch): */
356 { USB_DEVICE(0x04e8, 0x6889),
357 .driver_info = (unsigned long) &kalmia_info, },
358 { /* EMPTY == end of list */} };
359MODULE_DEVICE_TABLE( usb, products);
360
361static struct usb_driver kalmia_driver = {
362 .name = "kalmia",
363 .id_table = products,
364 .probe = usbnet_probe,
365 .disconnect = usbnet_disconnect,
366 .suspend = usbnet_suspend,
367 .resume = usbnet_resume
368};
369
370static int __init kalmia_init(void)
371{
372 return usb_register(&kalmia_driver);
373}
374module_init( kalmia_init);
375
376static void __exit kalmia_exit(void)
377{
378 usb_deregister(&kalmia_driver);
379}
380module_exit( kalmia_exit);
381
382MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
383MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
384MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e050bd65e037..777d1a4e81b2 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
2203 2203
2204 if (port->mode != FST_RAW) { 2204 if (port->mode != FST_RAW) {
2205 err = hdlc_open(dev); 2205 err = hdlc_open(dev);
2206 if (err) 2206 if (err) {
2207 module_put(THIS_MODULE);
2207 return err; 2208 return err;
2209 }
2208 } 2210 }
2209 2211
2210 fst_openport(port); 2212 fst_openport(port);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831ce293c..687c1f223497 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1288 1288
1289 *(unsigned long *) wdev_priv = (unsigned long) priv; 1289 *(unsigned long *) wdev_priv = (unsigned long) priv;
1290 1290
1291 set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
1292
1291 ret = wiphy_register(wdev->wiphy); 1293 ret = wiphy_register(wdev->wiphy);
1292 if (ret < 0) { 1294 if (ret < 0) {
1293 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", 1295 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 32261189bcef..aeac3cc4dbe4 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
2474 * faster client. 2474 * faster client.
2475 */ 2475 */
2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2477#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
2477#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2478#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2478#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2480#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2510 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2511 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2511 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | 2513 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
2513 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY); 2514 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
2515 MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
2514 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2516 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
2515 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2517 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
2516 2518
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 553da68bd510..30df85d8fca8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
395static void xen_irq_init(unsigned irq) 395static void xen_irq_init(unsigned irq)
396{ 396{
397 struct irq_info *info; 397 struct irq_info *info;
398#ifdef CONFIG_SMP
398 struct irq_desc *desc = irq_to_desc(irq); 399 struct irq_desc *desc = irq_to_desc(irq);
399 400
400#ifdef CONFIG_SMP
401 /* By default all event channels notify CPU#0. */ 401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403#endif 403#endif
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9ad2369d9e35..bfcb18feb1df 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
231 231
232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) 232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
233{ 233{
234 if (flags & IPERM_FLAG_RCU)
235 return -ECHILD;
236
237 return -EIO; 234 return -EIO;
238} 235}
239 236
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 378b5b4443f3..300628795fdb 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -967,6 +967,12 @@ struct btrfs_fs_info {
967 struct srcu_struct subvol_srcu; 967 struct srcu_struct subvol_srcu;
968 968
969 spinlock_t trans_lock; 969 spinlock_t trans_lock;
970 /*
971 * the reloc mutex goes with the trans lock, it is taken
972 * during commit to protect us from the relocation code
973 */
974 struct mutex reloc_mutex;
975
970 struct list_head trans_list; 976 struct list_head trans_list;
971 struct list_head hashers; 977 struct list_head hashers;
972 struct list_head dead_roots; 978 struct list_head dead_roots;
@@ -1172,6 +1178,14 @@ struct btrfs_root {
1172 u32 type; 1178 u32 type;
1173 1179
1174 u64 highest_objectid; 1180 u64 highest_objectid;
1181
1182 /* btrfs_record_root_in_trans is a multi-step process,
1183 * and it can race with the balancing code. But the
1184 * race is very small, and only the first time the root
1185 * is added to each transaction. So in_trans_setup
1186 * is used to tell us when more checks are required
1187 */
1188 unsigned long in_trans_setup;
1175 int ref_cows; 1189 int ref_cows;
1176 int track_dirty; 1190 int track_dirty;
1177 int in_radix; 1191 int in_radix;
@@ -1181,7 +1195,6 @@ struct btrfs_root {
1181 struct btrfs_key defrag_max; 1195 struct btrfs_key defrag_max;
1182 int defrag_running; 1196 int defrag_running;
1183 char *name; 1197 char *name;
1184 int in_sysfs;
1185 1198
1186 /* the dirty list is only used by non-reference counted roots */ 1199 /* the dirty list is only used by non-reference counted roots */
1187 struct list_head dirty_list; 1200 struct list_head dirty_list;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6462c29d2d37..f1cbd028f7b3 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -297,7 +297,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
297 item->data_len = data_len; 297 item->data_len = data_len;
298 item->ins_or_del = 0; 298 item->ins_or_del = 0;
299 item->bytes_reserved = 0; 299 item->bytes_reserved = 0;
300 item->block_rsv = NULL;
301 item->delayed_node = NULL; 300 item->delayed_node = NULL;
302 atomic_set(&item->refs, 1); 301 atomic_set(&item->refs, 1);
303 } 302 }
@@ -593,10 +592,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
593 592
594 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 593 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 594 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596 if (!ret) { 595 if (!ret)
597 item->bytes_reserved = num_bytes; 596 item->bytes_reserved = num_bytes;
598 item->block_rsv = dst_rsv;
599 }
600 597
601 return ret; 598 return ret;
602} 599}
@@ -604,10 +601,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 601static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item) 602 struct btrfs_delayed_item *item)
606{ 603{
604 struct btrfs_block_rsv *rsv;
605
607 if (!item->bytes_reserved) 606 if (!item->bytes_reserved)
608 return; 607 return;
609 608
610 btrfs_block_rsv_release(root, item->block_rsv, 609 rsv = &root->fs_info->global_block_rsv;
610 btrfs_block_rsv_release(root, rsv,
611 item->bytes_reserved); 611 item->bytes_reserved);
612} 612}
613 613
@@ -1014,6 +1014,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1014 struct btrfs_delayed_root *delayed_root; 1014 struct btrfs_delayed_root *delayed_root;
1015 struct btrfs_delayed_node *curr_node, *prev_node; 1015 struct btrfs_delayed_node *curr_node, *prev_node;
1016 struct btrfs_path *path; 1016 struct btrfs_path *path;
1017 struct btrfs_block_rsv *block_rsv;
1017 int ret = 0; 1018 int ret = 0;
1018 1019
1019 path = btrfs_alloc_path(); 1020 path = btrfs_alloc_path();
@@ -1021,6 +1022,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1021 return -ENOMEM; 1022 return -ENOMEM;
1022 path->leave_spinning = 1; 1023 path->leave_spinning = 1;
1023 1024
1025 block_rsv = trans->block_rsv;
1026 trans->block_rsv = &root->fs_info->global_block_rsv;
1027
1024 delayed_root = btrfs_get_delayed_root(root); 1028 delayed_root = btrfs_get_delayed_root(root);
1025 1029
1026 curr_node = btrfs_first_delayed_node(delayed_root); 1030 curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1045,6 +1049,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1045 } 1049 }
1046 1050
1047 btrfs_free_path(path); 1051 btrfs_free_path(path);
1052 trans->block_rsv = block_rsv;
1048 return ret; 1053 return ret;
1049} 1054}
1050 1055
@@ -1052,6 +1057,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1052 struct btrfs_delayed_node *node) 1057 struct btrfs_delayed_node *node)
1053{ 1058{
1054 struct btrfs_path *path; 1059 struct btrfs_path *path;
1060 struct btrfs_block_rsv *block_rsv;
1055 int ret; 1061 int ret;
1056 1062
1057 path = btrfs_alloc_path(); 1063 path = btrfs_alloc_path();
@@ -1059,6 +1065,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1059 return -ENOMEM; 1065 return -ENOMEM;
1060 path->leave_spinning = 1; 1066 path->leave_spinning = 1;
1061 1067
1068 block_rsv = trans->block_rsv;
1069 trans->block_rsv = &node->root->fs_info->global_block_rsv;
1070
1062 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1071 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1063 if (!ret) 1072 if (!ret)
1064 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1073 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1066,6 +1075,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1066 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1075 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1067 btrfs_free_path(path); 1076 btrfs_free_path(path);
1068 1077
1078 trans->block_rsv = block_rsv;
1069 return ret; 1079 return ret;
1070} 1080}
1071 1081
@@ -1116,6 +1126,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1116 struct btrfs_path *path; 1126 struct btrfs_path *path;
1117 struct btrfs_delayed_node *delayed_node = NULL; 1127 struct btrfs_delayed_node *delayed_node = NULL;
1118 struct btrfs_root *root; 1128 struct btrfs_root *root;
1129 struct btrfs_block_rsv *block_rsv;
1119 unsigned long nr = 0; 1130 unsigned long nr = 0;
1120 int need_requeue = 0; 1131 int need_requeue = 0;
1121 int ret; 1132 int ret;
@@ -1134,6 +1145,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1134 if (IS_ERR(trans)) 1145 if (IS_ERR(trans))
1135 goto free_path; 1146 goto free_path;
1136 1147
1148 block_rsv = trans->block_rsv;
1149 trans->block_rsv = &root->fs_info->global_block_rsv;
1150
1137 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); 1151 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1138 if (!ret) 1152 if (!ret)
1139 ret = btrfs_delete_delayed_items(trans, path, root, 1153 ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1176,6 +1190,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1176 1190
1177 nr = trans->blocks_used; 1191 nr = trans->blocks_used;
1178 1192
1193 trans->block_rsv = block_rsv;
1179 btrfs_end_transaction_dmeta(trans, root); 1194 btrfs_end_transaction_dmeta(trans, root);
1180 __btrfs_btree_balance_dirty(root, nr); 1195 __btrfs_btree_balance_dirty(root, nr);
1181free_path: 1196free_path:
@@ -1222,6 +1237,13 @@ again:
1222 return 0; 1237 return 0;
1223} 1238}
1224 1239
1240void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1241{
1242 struct btrfs_delayed_root *delayed_root;
1243 delayed_root = btrfs_get_delayed_root(root);
1244 WARN_ON(btrfs_first_delayed_node(delayed_root));
1245}
1246
1225void btrfs_balance_delayed_items(struct btrfs_root *root) 1247void btrfs_balance_delayed_items(struct btrfs_root *root)
1226{ 1248{
1227 struct btrfs_delayed_root *delayed_root; 1249 struct btrfs_delayed_root *delayed_root;
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index eb7d240aa648..d1a6a2915c66 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
75 struct list_head tree_list; /* used for batch insert/delete items */ 75 struct list_head tree_list; /* used for batch insert/delete items */
76 struct list_head readdir_list; /* used for readdir items */ 76 struct list_head readdir_list; /* used for readdir items */
77 u64 bytes_reserved; 77 u64 bytes_reserved;
78 struct btrfs_block_rsv *block_rsv;
79 struct btrfs_delayed_node *delayed_node; 78 struct btrfs_delayed_node *delayed_node;
80 atomic_t refs; 79 atomic_t refs;
81 int ins_or_del; 80 int ins_or_del;
@@ -138,4 +137,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
138/* for init */ 137/* for init */
139int __init btrfs_delayed_inode_init(void); 138int __init btrfs_delayed_inode_init(void);
140void btrfs_delayed_inode_exit(void); 139void btrfs_delayed_inode_exit(void);
140
141/* for debugging */
142void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
143
141#endif 144#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9f68c6898653..1ac8db5dc0a3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1044 root->last_trans = 0; 1044 root->last_trans = 0;
1045 root->highest_objectid = 0; 1045 root->highest_objectid = 0;
1046 root->name = NULL; 1046 root->name = NULL;
1047 root->in_sysfs = 0;
1048 root->inode_tree = RB_ROOT; 1047 root->inode_tree = RB_ROOT;
1049 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1048 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1050 root->block_rsv = NULL; 1049 root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
1300 return root; 1299 return root;
1301 1300
1302 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1301 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1303 if (!root->free_ino_ctl)
1304 goto fail;
1305 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1302 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1306 GFP_NOFS); 1303 GFP_NOFS);
1307 if (!root->free_ino_pinned) 1304 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1305 ret = -ENOMEM;
1308 goto fail; 1306 goto fail;
1307 }
1309 1308
1310 btrfs_init_free_ino_ctl(root); 1309 btrfs_init_free_ino_ctl(root);
1311 mutex_init(&root->fs_commit_mutex); 1310 mutex_init(&root->fs_commit_mutex);
1312 spin_lock_init(&root->cache_lock); 1311 spin_lock_init(&root->cache_lock);
1313 init_waitqueue_head(&root->cache_wait); 1312 init_waitqueue_head(&root->cache_wait);
1314 1313
1315 set_anon_super(&root->anon_super, NULL); 1314 ret = set_anon_super(&root->anon_super, NULL);
1315 if (ret)
1316 goto fail;
1316 1317
1317 if (btrfs_root_refs(&root->root_item) == 0) { 1318 if (btrfs_root_refs(&root->root_item) == 0) {
1318 ret = -ENOENT; 1319 ret = -ENOENT;
@@ -1618,6 +1619,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1619 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1620 spin_lock_init(&fs_info->delayed_iput_lock);
1620 spin_lock_init(&fs_info->defrag_inodes_lock); 1621 spin_lock_init(&fs_info->defrag_inodes_lock);
1622 mutex_init(&fs_info->reloc_mutex);
1621 1623
1622 init_completion(&fs_info->kobj_unregister); 1624 init_completion(&fs_info->kobj_unregister);
1623 fs_info->tree_root = tree_root; 1625 fs_info->tree_root = tree_root;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b42efc2ded51..1f61bf5b4960 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3314,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3314 if (reserved == 0) 3314 if (reserved == 0)
3315 return 0; 3315 return 0;
3316 3316
3317 /* nothing to shrink - nothing to reclaim */
3318 if (root->fs_info->delalloc_bytes == 0)
3319 return 0;
3320
3321 max_reclaim = min(reserved, to_reclaim); 3317 max_reclaim = min(reserved, to_reclaim);
3322 3318
3323 while (loops < 1024) { 3319 while (loops < 1024) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 751ddf8fc58a..0a9b10c5b0a7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3076,6 +3076,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3076 ret = btrfs_update_inode(trans, root, dir); 3076 ret = btrfs_update_inode(trans, root, dir);
3077 BUG_ON(ret); 3077 BUG_ON(ret);
3078 3078
3079 btrfs_free_path(path);
3079 return 0; 3080 return 0;
3080} 3081}
3081 3082
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b793d112d1f6..a3c4751e07db 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); 482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
483 BUG_ON(ret); 483 BUG_ON(ret);
484 484
485 spin_lock(&root->fs_info->trans_lock);
485 list_add(&pending_snapshot->list, 486 list_add(&pending_snapshot->list,
486 &trans->transaction->pending_snapshots); 487 &trans->transaction->pending_snapshots);
488 spin_unlock(&root->fs_info->trans_lock);
487 if (async_transid) { 489 if (async_transid) {
488 *async_transid = trans->transid; 490 *async_transid = trans->transid;
489 ret = btrfs_commit_transaction_async(trans, 491 ret = btrfs_commit_transaction_async(trans,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b1ef27cc673b..5e0a3dc79a45 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1368 int ret; 1368 int ret;
1369 1369
1370 if (!root->reloc_root) 1370 if (!root->reloc_root)
1371 return 0; 1371 goto out;
1372 1372
1373 reloc_root = root->reloc_root; 1373 reloc_root = root->reloc_root;
1374 root_item = &reloc_root->root_item; 1374 root_item = &reloc_root->root_item;
@@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1390 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1390 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1391 &reloc_root->root_key, root_item); 1391 &reloc_root->root_key, root_item);
1392 BUG_ON(ret); 1392 BUG_ON(ret);
1393
1394out:
1393 return 0; 1395 return 0;
1394} 1396}
1395 1397
@@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2142 u64 num_bytes = 0; 2144 u64 num_bytes = 0;
2143 int ret; 2145 int ret;
2144 2146
2145 spin_lock(&root->fs_info->trans_lock); 2147 mutex_lock(&root->fs_info->reloc_mutex);
2146 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2148 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2147 rc->merging_rsv_size += rc->nodes_relocated * 2; 2149 rc->merging_rsv_size += rc->nodes_relocated * 2;
2148 spin_unlock(&root->fs_info->trans_lock); 2150 mutex_unlock(&root->fs_info->reloc_mutex);
2151
2149again: 2152again:
2150 if (!err) { 2153 if (!err) {
2151 num_bytes = rc->merging_rsv_size; 2154 num_bytes = rc->merging_rsv_size;
@@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
2214 int ret; 2217 int ret;
2215again: 2218again:
2216 root = rc->extent_root; 2219 root = rc->extent_root;
2217 spin_lock(&root->fs_info->trans_lock); 2220
2221 /*
2222 * this serializes us with btrfs_record_root_in_transaction,
2223 * we have to make sure nobody is in the middle of
2224 * adding their roots to the list while we are
2225 * doing this splice
2226 */
2227 mutex_lock(&root->fs_info->reloc_mutex);
2218 list_splice_init(&rc->reloc_roots, &reloc_roots); 2228 list_splice_init(&rc->reloc_roots, &reloc_roots);
2219 spin_unlock(&root->fs_info->trans_lock); 2229 mutex_unlock(&root->fs_info->reloc_mutex);
2220 2230
2221 while (!list_empty(&reloc_roots)) { 2231 while (!list_empty(&reloc_roots)) {
2222 found = 1; 2232 found = 1;
@@ -3590,17 +3600,19 @@ next:
3590static void set_reloc_control(struct reloc_control *rc) 3600static void set_reloc_control(struct reloc_control *rc)
3591{ 3601{
3592 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3602 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3593 spin_lock(&fs_info->trans_lock); 3603
3604 mutex_lock(&fs_info->reloc_mutex);
3594 fs_info->reloc_ctl = rc; 3605 fs_info->reloc_ctl = rc;
3595 spin_unlock(&fs_info->trans_lock); 3606 mutex_unlock(&fs_info->reloc_mutex);
3596} 3607}
3597 3608
3598static void unset_reloc_control(struct reloc_control *rc) 3609static void unset_reloc_control(struct reloc_control *rc)
3599{ 3610{
3600 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3611 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3601 spin_lock(&fs_info->trans_lock); 3612
3613 mutex_lock(&fs_info->reloc_mutex);
3602 fs_info->reloc_ctl = NULL; 3614 fs_info->reloc_ctl = NULL;
3603 spin_unlock(&fs_info->trans_lock); 3615 mutex_unlock(&fs_info->reloc_mutex);
3604} 3616}
3605 3617
3606static int check_extent_flags(u64 flags) 3618static int check_extent_flags(u64 flags)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c3c223ae6691..daac9ae6d731 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,152 +28,6 @@
28#include "disk-io.h" 28#include "disk-io.h"
29#include "transaction.h" 29#include "transaction.h"
30 30
31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "%llu\n",
34 (unsigned long long)btrfs_root_used(&root->root_item));
35}
36
37static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
38{
39 return snprintf(buf, PAGE_SIZE, "%llu\n",
40 (unsigned long long)btrfs_root_limit(&root->root_item));
41}
42
43static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
44{
45
46 return snprintf(buf, PAGE_SIZE, "%llu\n",
47 (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
48}
49
50static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
51{
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
54}
55
56static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%llu\n",
59 (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
60}
61
62/* this is for root attrs (subvols/snapshots) */
63struct btrfs_root_attr {
64 struct attribute attr;
65 ssize_t (*show)(struct btrfs_root *, char *);
66 ssize_t (*store)(struct btrfs_root *, const char *, size_t);
67};
68
69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
72
73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
75
76static struct attribute *btrfs_root_attrs[] = {
77 &btrfs_root_attr_blocks_used.attr,
78 &btrfs_root_attr_block_limit.attr,
79 NULL,
80};
81
82/* this is for super attrs (actual full fs) */
83struct btrfs_super_attr {
84 struct attribute attr;
85 ssize_t (*show)(struct btrfs_fs_info *, char *);
86 ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
87};
88
89#define SUPER_ATTR(name, mode, show, store) \
90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
92
93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
95SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
96
97static struct attribute *btrfs_super_attrs[] = {
98 &btrfs_super_attr_blocks_used.attr,
99 &btrfs_super_attr_total_blocks.attr,
100 &btrfs_super_attr_blocksize.attr,
101 NULL,
102};
103
104static ssize_t btrfs_super_attr_show(struct kobject *kobj,
105 struct attribute *attr, char *buf)
106{
107 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
108 super_kobj);
109 struct btrfs_super_attr *a = container_of(attr,
110 struct btrfs_super_attr,
111 attr);
112
113 return a->show ? a->show(fs, buf) : 0;
114}
115
116static ssize_t btrfs_super_attr_store(struct kobject *kobj,
117 struct attribute *attr,
118 const char *buf, size_t len)
119{
120 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
121 super_kobj);
122 struct btrfs_super_attr *a = container_of(attr,
123 struct btrfs_super_attr,
124 attr);
125
126 return a->store ? a->store(fs, buf, len) : 0;
127}
128
129static ssize_t btrfs_root_attr_show(struct kobject *kobj,
130 struct attribute *attr, char *buf)
131{
132 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
133 root_kobj);
134 struct btrfs_root_attr *a = container_of(attr,
135 struct btrfs_root_attr,
136 attr);
137
138 return a->show ? a->show(root, buf) : 0;
139}
140
141static ssize_t btrfs_root_attr_store(struct kobject *kobj,
142 struct attribute *attr,
143 const char *buf, size_t len)
144{
145 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
146 root_kobj);
147 struct btrfs_root_attr *a = container_of(attr,
148 struct btrfs_root_attr,
149 attr);
150 return a->store ? a->store(root, buf, len) : 0;
151}
152
153static void btrfs_super_release(struct kobject *kobj)
154{
155 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
156 super_kobj);
157 complete(&fs->kobj_unregister);
158}
159
160static void btrfs_root_release(struct kobject *kobj)
161{
162 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
163 root_kobj);
164 complete(&root->kobj_unregister);
165}
166
167static const struct sysfs_ops btrfs_super_attr_ops = {
168 .show = btrfs_super_attr_show,
169 .store = btrfs_super_attr_store,
170};
171
172static const struct sysfs_ops btrfs_root_attr_ops = {
173 .show = btrfs_root_attr_show,
174 .store = btrfs_root_attr_store,
175};
176
177/* /sys/fs/btrfs/ entry */ 31/* /sys/fs/btrfs/ entry */
178static struct kset *btrfs_kset; 32static struct kset *btrfs_kset;
179 33
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2b3590b9fe98..51dcec86757f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
126 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits 127 * when the transaction commits
128 */ 128 */
129int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 129static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root) 130 struct btrfs_root *root)
131{ 131{
132 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
135 135
136 /*
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
140 */
141 root->in_trans_setup = 1;
142
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
145 */
146 smp_wmb();
147
136 spin_lock(&root->fs_info->fs_roots_radix_lock); 148 spin_lock(&root->fs_info->fs_roots_radix_lock);
137 if (root->last_trans == trans->transid) { 149 if (root->last_trans == trans->transid) {
138 spin_unlock(&root->fs_info->fs_roots_radix_lock); 150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
139 return 0; 151 return 0;
140 } 152 }
141 root->last_trans = trans->transid;
142 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
143 (unsigned long)root->root_key.objectid, 154 (unsigned long)root->root_key.objectid,
144 BTRFS_ROOT_TRANS_TAG); 155 BTRFS_ROOT_TRANS_TAG);
145 spin_unlock(&root->fs_info->fs_roots_radix_lock); 156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
158
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
162 * this transaction.
163 *
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
166 *
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
172 *
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
177 */
146 btrfs_init_reloc_root(trans, root); 178 btrfs_init_reloc_root(trans, root);
179 smp_wmb();
180 root->in_trans_setup = 0;
147 } 181 }
148 return 0; 182 return 0;
149} 183}
150 184
185
186int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
188{
189 if (!root->ref_cows)
190 return 0;
191
192 /*
193 * see record_root_in_trans for comments about in_trans_setup usage
194 * and barriers
195 */
196 smp_rmb();
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
199 return 0;
200
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
204
205 return 0;
206}
207
151/* wait for commit against the current transaction to become unblocked 208/* wait for commit against the current transaction to become unblocked
152 * when this is done, it is safe to start a new transaction, but the current 209 * when this is done, it is safe to start a new transaction, but the current
153 * transaction might not be fully on disk. 210 * transaction might not be fully on disk.
@@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 parent = dget_parent(dentry); 939 parent = dget_parent(dentry);
883 parent_inode = parent->d_inode; 940 parent_inode = parent->d_inode;
884 parent_root = BTRFS_I(parent_inode)->root; 941 parent_root = BTRFS_I(parent_inode)->root;
885 btrfs_record_root_in_trans(trans, parent_root); 942 record_root_in_trans(trans, parent_root);
886 943
887 /* 944 /*
888 * insert the directory item 945 * insert the directory item
@@ -900,7 +957,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
900 ret = btrfs_update_inode(trans, parent_root, parent_inode); 957 ret = btrfs_update_inode(trans, parent_root, parent_inode);
901 BUG_ON(ret); 958 BUG_ON(ret);
902 959
903 btrfs_record_root_in_trans(trans, root); 960 /*
961 * pull in the delayed directory update
962 * and the delayed inode item
963 * otherwise we corrupt the FS during
964 * snapshot
965 */
966 ret = btrfs_run_delayed_items(trans, root);
967 BUG_ON(ret);
968
969 record_root_in_trans(trans, root);
904 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 970 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
905 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 971 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
906 btrfs_check_and_init_root_item(new_root_item); 972 btrfs_check_and_init_root_item(new_root_item);
@@ -961,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
961 int ret; 1027 int ret;
962 1028
963 list_for_each_entry(pending, head, list) { 1029 list_for_each_entry(pending, head, list) {
964 /*
965 * We must deal with the delayed items before creating
966 * snapshots, or we will create a snapthot with inconsistent
967 * information.
968 */
969 ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
970 BUG_ON(ret);
971
972 ret = create_pending_snapshot(trans, fs_info, pending); 1030 ret = create_pending_snapshot(trans, fs_info, pending);
973 BUG_ON(ret); 1031 BUG_ON(ret);
974 } 1032 }
@@ -1241,21 +1299,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1241 schedule_timeout(1); 1299 schedule_timeout(1);
1242 1300
1243 finish_wait(&cur_trans->writer_wait, &wait); 1301 finish_wait(&cur_trans->writer_wait, &wait);
1244 spin_lock(&root->fs_info->trans_lock);
1245 root->fs_info->trans_no_join = 1;
1246 spin_unlock(&root->fs_info->trans_lock);
1247 } while (atomic_read(&cur_trans->num_writers) > 1 || 1302 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1248 (should_grow && cur_trans->num_joined != joined)); 1303 (should_grow && cur_trans->num_joined != joined));
1249 1304
1250 ret = create_pending_snapshots(trans, root->fs_info); 1305 /*
1251 BUG_ON(ret); 1306 * Ok now we need to make sure to block out any other joins while we
1307 * commit the transaction. We could have started a join before setting
1308 * no_join so make sure to wait for num_writers to == 1 again.
1309 */
1310 spin_lock(&root->fs_info->trans_lock);
1311 root->fs_info->trans_no_join = 1;
1312 spin_unlock(&root->fs_info->trans_lock);
1313 wait_event(cur_trans->writer_wait,
1314 atomic_read(&cur_trans->num_writers) == 1);
1315
1316 /*
1317 * the reloc mutex makes sure that we stop
1318 * the balancing code from coming in and moving
1319 * extents around in the middle of the commit
1320 */
1321 mutex_lock(&root->fs_info->reloc_mutex);
1252 1322
1253 ret = btrfs_run_delayed_items(trans, root); 1323 ret = btrfs_run_delayed_items(trans, root);
1254 BUG_ON(ret); 1324 BUG_ON(ret);
1255 1325
1326 ret = create_pending_snapshots(trans, root->fs_info);
1327 BUG_ON(ret);
1328
1256 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1329 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1257 BUG_ON(ret); 1330 BUG_ON(ret);
1258 1331
1332 /*
1333 * make sure none of the code above managed to slip in a
1334 * delayed item
1335 */
1336 btrfs_assert_delayed_root_empty(root);
1337
1259 WARN_ON(cur_trans != trans->transaction); 1338 WARN_ON(cur_trans != trans->transaction);
1260 1339
1261 btrfs_scrub_pause(root); 1340 btrfs_scrub_pause(root);
@@ -1312,6 +1391,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1312 root->fs_info->running_transaction = NULL; 1391 root->fs_info->running_transaction = NULL;
1313 root->fs_info->trans_no_join = 0; 1392 root->fs_info->trans_no_join = 0;
1314 spin_unlock(&root->fs_info->trans_lock); 1393 spin_unlock(&root->fs_info->trans_lock);
1394 mutex_unlock(&root->fs_info->reloc_mutex);
1315 1395
1316 wake_up(&root->fs_info->transaction_wait); 1396 wake_up(&root->fs_info->transaction_wait);
1317 1397
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 592396c6dc47..4ce8a9f41d1e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3177,7 +3177,7 @@ again:
3177 tmp_key.offset = (u64)-1; 3177 tmp_key.offset = (u64)-1;
3178 3178
3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3180 BUG_ON(!wc.replay_dest); 3180 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3181 3181
3182 wc.replay_dest->log_root = log; 3182 wc.replay_dest->log_root = log;
3183 btrfs_record_root_in_trans(trans, wc.replay_dest); 3183 btrfs_record_root_in_trans(trans, wc.replay_dest);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e9def996e383..2f0c58646c10 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -257,9 +257,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
257{ 257{
258 struct cifs_sb_info *cifs_sb; 258 struct cifs_sb_info *cifs_sb;
259 259
260 if (flags & IPERM_FLAG_RCU)
261 return -ECHILD;
262
263 cifs_sb = CIFS_SB(inode->i_sb); 260 cifs_sb = CIFS_SB(inode->i_sb);
264 261
265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 6cbb3afb36dc..cb140ef293e4 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
43/* the coda pioctl inode ops */ 43/* the coda pioctl inode ops */
44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) 44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
45{ 45{
46 if (flags & IPERM_FLAG_RCU)
47 return -ECHILD;
48 return (mask & MAY_EXEC) ? -EACCES : 0; 46 return (mask & MAY_EXEC) ? -EACCES : 0;
49} 47}
50 48
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1663f8..1afae26cf236 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
555 return __logfs_create(dir, dentry, inode, target, destlen); 555 return __logfs_create(dir, dentry, inode, target, destlen);
556} 556}
557 557
558static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
559{
560 if (flags & IPERM_FLAG_RCU)
561 return -ECHILD;
562 return generic_permission(inode, mask, flags, NULL);
563}
564
565static int logfs_link(struct dentry *old_dentry, struct inode *dir, 558static int logfs_link(struct dentry *old_dentry, struct inode *dir,
566 struct dentry *dentry) 559 struct dentry *dentry)
567{ 560{
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
820 .mknod = logfs_mknod, 813 .mknod = logfs_mknod,
821 .rename = logfs_rename, 814 .rename = logfs_rename,
822 .rmdir = logfs_rmdir, 815 .rmdir = logfs_rmdir,
823 .permission = logfs_permission,
824 .symlink = logfs_symlink, 816 .symlink = logfs_symlink,
825 .unlink = logfs_unlink, 817 .unlink = logfs_unlink,
826}; 818};
diff --git a/fs/namei.c b/fs/namei.c
index 9e425e7e6c8f..0223c41fb114 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
238 238
239 /* 239 /*
240 * Read/write DACs are always overridable. 240 * Read/write DACs are always overridable.
241 * Executable DACs are overridable if at least one exec bit is set. 241 * Executable DACs are overridable for all directories and
242 * for non-directories that have least one exec bit set.
242 */ 243 */
243 if (!(mask & MAY_EXEC) || execute_ok(inode)) 244 if (!(mask & MAY_EXEC) || execute_ok(inode))
244 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 245 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -1011,9 +1012,6 @@ failed:
1011 * Follow down to the covering mount currently visible to userspace. At each 1012 * Follow down to the covering mount currently visible to userspace. At each
1012 * point, the filesystem owning that dentry may be queried as to whether the 1013 * point, the filesystem owning that dentry may be queried as to whether the
1013 * caller is permitted to proceed or not. 1014 * caller is permitted to proceed or not.
1014 *
1015 * Care must be taken as namespace_sem may be held (indicated by mounting_here
1016 * being true).
1017 */ 1015 */
1018int follow_down(struct path *path) 1016int follow_down(struct path *path)
1019{ 1017{
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 18b3e8975fe0..fbb2a5ef5817 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,7 @@ config NFSD_V4
82 select NFSD_V3 82 select NFSD_V3
83 select FS_POSIX_ACL 83 select FS_POSIX_ACL
84 select SUNRPC_GSS 84 select SUNRPC_GSS
85 select CRYPTO
85 help 86 help
86 This option enables support in your system's NFS server for 87 This option enables support in your system's NFS server for
87 version 4 of the NFS protocol (RFC 3530). 88 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1f5eae40f34e..2b1449dd2f49 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,6 +13,7 @@
13#include <linux/lockd/lockd.h> 13#include <linux/lockd/lockd.h>
14#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/gss_api.h> 15#include <linux/sunrpc/gss_api.h>
16#include <linux/sunrpc/gss_krb5_enctypes.h>
16 17
17#include "idmap.h" 18#include "idmap.h"
18#include "nfsd.h" 19#include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
189 .release = single_release, 190 .release = single_release,
190}; 191};
191 192
192#ifdef CONFIG_SUNRPC_GSS 193#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
193static int supported_enctypes_show(struct seq_file *m, void *v) 194static int supported_enctypes_show(struct seq_file *m, void *v)
194{ 195{
195 struct gss_api_mech *k5mech; 196 seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
196
197 k5mech = gss_mech_get_by_name("krb5");
198 if (k5mech == NULL)
199 goto out;
200 if (k5mech->gm_upcall_enctypes != NULL)
201 seq_printf(m, k5mech->gm_upcall_enctypes);
202 gss_mech_put(k5mech);
203out:
204 return 0; 197 return 0;
205} 198}
206 199
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
215 .llseek = seq_lseek, 208 .llseek = seq_lseek,
216 .release = single_release, 209 .release = single_release,
217}; 210};
218#endif /* CONFIG_SUNRPC_GSS */ 211#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
219 212
220extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 213extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
221extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 214extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1427 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 1420 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
1428 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 1421 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
1429 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, 1422 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
1430#ifdef CONFIG_SUNRPC_GSS 1423#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
1431 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, 1424 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
1432#endif /* CONFIG_SUNRPC_GSS */ 1425#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
1433#ifdef CONFIG_NFSD_V4 1426#ifdef CONFIG_NFSD_V4
1434 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1427 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
1435 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1428 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d5718273bb32..fd0acca5370a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
696} 696}
697#endif /* CONFIG_NFSD_V3 */ 697#endif /* CONFIG_NFSD_V3 */
698 698
699static int nfsd_open_break_lease(struct inode *inode, int access)
700{
701 unsigned int mode;
699 702
703 if (access & NFSD_MAY_NOT_BREAK_LEASE)
704 return 0;
705 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
706 return break_lease(inode, mode | O_NONBLOCK);
707}
700 708
701/* 709/*
702 * Open an existing file or directory. 710 * Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
744 if (!inode->i_fop) 752 if (!inode->i_fop)
745 goto out; 753 goto out;
746 754
747 /* 755 host_err = nfsd_open_break_lease(inode, access);
748 * Check to see if there are any leases on this file.
749 * This may block while leases are broken.
750 */
751 if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
752 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
753 if (host_err) /* NOMEM or WOULDBLOCK */ 756 if (host_err) /* NOMEM or WOULDBLOCK */
754 goto out_nfserr; 757 goto out_nfserr;
755 758
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1660 if (!dold->d_inode) 1663 if (!dold->d_inode)
1661 goto out_drop_write; 1664 goto out_drop_write;
1662 host_err = nfsd_break_lease(dold->d_inode); 1665 host_err = nfsd_break_lease(dold->d_inode);
1663 if (host_err) 1666 if (host_err) {
1667 err = nfserrno(host_err);
1664 goto out_drop_write; 1668 goto out_drop_write;
1669 }
1665 host_err = vfs_link(dold, dirp, dnew); 1670 host_err = vfs_link(dold, dirp, dnew);
1666 if (!host_err) { 1671 if (!host_err) {
1667 err = nfserrno(commit_metadata(ffhp)); 1672 err = nfserrno(commit_metadata(ffhp));
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b954878ad6ce..b9b45fc2903e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -801,12 +801,7 @@ out_err:
801 801
802int nilfs_permission(struct inode *inode, int mask, unsigned int flags) 802int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
803{ 803{
804 struct nilfs_root *root; 804 struct nilfs_root *root = NILFS_I(inode)->i_root;
805
806 if (flags & IPERM_FLAG_RCU)
807 return -ECHILD;
808
809 root = NILFS_I(inode)->i_root;
810 if ((mask & MAY_WRITE) && root && 805 if ((mask & MAY_WRITE) && root &&
811 root->cno != NILFS_CPTREE_CURRENT_CNO) 806 root->cno != NILFS_CPTREE_CURRENT_CNO)
812 return -EROFS; /* snapshot is not writable */ 807 return -EROFS; /* snapshot is not writable */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 14def991d9dd..8a84210ca080 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
2169 */ 2169 */
2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) 2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
2171{ 2171{
2172 int rv; 2172 int rv = generic_permission(inode, mask, flags, NULL);
2173
2174 if (flags & IPERM_FLAG_RCU)
2175 return -ECHILD;
2176 rv = generic_permission(inode, mask, flags, NULL);
2177 if (rv == 0) 2173 if (rv == 0)
2178 return 0; 2174 return 0;
2179 if (task_pid(current) == proc_pid(inode)) 2175 if (task_pid(current) == proc_pid(inode))
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f50133c11c24..d167de365a8d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
304 struct ctl_table *table; 304 struct ctl_table *table;
305 int error; 305 int error;
306 306
307 if (flags & IPERM_FLAG_RCU)
308 return -ECHILD;
309
310 /* Executable files are not allowed under /proc/sys/ */ 307 /* Executable files are not allowed under /proc/sys/ */
311 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) 308 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
312 return -EACCES; 309 return -EACCES;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e8a62f41b458..d78089690965 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
954 954
955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) 955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
956{ 956{
957 if (flags & IPERM_FLAG_RCU)
958 return -ECHILD;
959 /* 957 /*
960 * We don't do permission checks on the internal objects. 958 * We don't do permission checks on the internal objects.
961 * Permissions are determined by the "owning" object. 959 * Permissions are determined by the "owning" object.
diff --git a/fs/timerfd.c b/fs/timerfd.c
index f67acbdda5e8..dffeb3795af1 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
61 61
62/* 62/*
63 * Called when the clock was set to cancel the timers in the cancel 63 * Called when the clock was set to cancel the timers in the cancel
64 * list. 64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
65 */ 67 */
66void timerfd_clock_was_set(void) 68void timerfd_clock_was_set(void)
67{ 69{
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
76 spin_lock_irqsave(&ctx->wqh.lock, flags); 78 spin_lock_irqsave(&ctx->wqh.lock, flags);
77 if (ctx->moffs.tv64 != moffs.tv64) { 79 if (ctx->moffs.tv64 != moffs.tv64) {
78 ctx->moffs.tv64 = KTIME_MAX; 80 ctx->moffs.tv64 = KTIME_MAX;
81 ctx->ticks++;
79 wake_up_locked(&ctx->wqh); 82 wake_up_locked(&ctx->wqh);
80 } 83 }
81 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 84 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 8c892c2d5300..529be0582029 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2146,6 +2146,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2146 if (IS_ERR(sb)) { 2146 if (IS_ERR(sb)) {
2147 err = PTR_ERR(sb); 2147 err = PTR_ERR(sb);
2148 kfree(c); 2148 kfree(c);
2149 goto out_close;
2149 } 2150 }
2150 2151
2151 if (sb->s_root) { 2152 if (sb->s_root) {
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d4646b48dc4a..18a1baf31f2d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -188,6 +188,7 @@ struct clocksource {
188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
189 /* Watchdog related data, used by the framework */ 189 /* Watchdog related data, used by the framework */
190 struct list_head wd_list; 190 struct list_head wd_list;
191 cycle_t cs_last;
191 cycle_t wd_last; 192 cycle_t wd_last;
192#endif 193#endif
193} ____cacheline_aligned; 194} ____cacheline_aligned;
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0b0d9c39ed67..7aad1f440867 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -2,8 +2,16 @@
2#include <linux/fs.h> 2#include <linux/fs.h>
3 3
4#ifdef CONFIG_CGROUP_DEVICE 4#ifdef CONFIG_CGROUP_DEVICE
5extern int devcgroup_inode_permission(struct inode *inode, int mask); 5extern int __devcgroup_inode_permission(struct inode *inode, int mask);
6extern int devcgroup_inode_mknod(int mode, dev_t dev); 6extern int devcgroup_inode_mknod(int mode, dev_t dev);
7static inline int devcgroup_inode_permission(struct inode *inode, int mask)
8{
9 if (likely(!inode->i_rdev))
10 return 0;
11 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
12 return 0;
13 return __devcgroup_inode_permission(inode, mask);
14}
7#else 15#else
8static inline int devcgroup_inode_permission(struct inode *inode, int mask) 16static inline int devcgroup_inode_permission(struct inode *inode, int mask)
9{ return 0; } 17{ return 0; }
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 649dc7f12925..5d253cd93691 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,7 +1,7 @@
1#ifndef __SH_KEYSC_H__ 1#ifndef __SH_KEYSC_H__
2#define __SH_KEYSC_H__ 2#define __SH_KEYSC_H__
3 3
4#define SH_KEYSC_MAXKEYS 49 4#define SH_KEYSC_MAXKEYS 64
5 5
6struct sh_keysc_info { 6struct sh_keysc_info {
7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, 7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6c12989839d9..f6efed0039ed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,6 +414,7 @@ enum
414 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ,
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
417 418
418 NR_SOFTIRQS 419 NR_SOFTIRQS
419}; 420};
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ad824d510a2..8cc38d3bab0c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
85 * Generic and arch helpers 85 * Generic and arch helpers
86 */ 86 */
87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
88void __init call_function_init(void);
88void generic_smp_call_function_single_interrupt(void); 89void generic_smp_call_function_single_interrupt(void);
89void generic_smp_call_function_interrupt(void); 90void generic_smp_call_function_interrupt(void);
90void ipi_call_lock(void); 91void ipi_call_lock(void);
91void ipi_call_unlock(void); 92void ipi_call_unlock(void);
92void ipi_call_lock_irq(void); 93void ipi_call_lock_irq(void);
93void ipi_call_unlock_irq(void); 94void ipi_call_unlock_irq(void);
95#else
96static inline void call_function_init(void) { }
94#endif 97#endif
95 98
96/* 99/*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
134#define smp_prepare_boot_cpu() do {} while (0) 137#define smp_prepare_boot_cpu() do {} while (0)
135#define smp_call_function_many(mask, func, info, wait) \ 138#define smp_call_function_many(mask, func, info, wait) \
136 (up_smp_call_function(func, info)) 139 (up_smp_call_function(func, info))
137static inline void init_call_single_data(void) { } 140static inline void call_function_init(void) { }
138 141
139static inline int 142static inline int
140smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 143smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 000000000000..ec6234eee89c
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
1/*
2 * Dumb way to share this static piece of information with nfsd
3 */
4#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7c42e7acc31..5d4f8e586e32 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
307 return test_bit(IPS_UNTRACKED_BIT, &ct->status); 307 return test_bit(IPS_UNTRACKED_BIT, &ct->status);
308} 308}
309 309
310/* Packet is received from loopback */
311static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
312{
313 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
314}
315
310extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 316extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
311extern unsigned int nf_conntrack_htable_size; 317extern unsigned int nf_conntrack_htable_size;
312extern unsigned int nf_conntrack_max; 318extern unsigned int nf_conntrack_max;
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ae045ca7d356..1c09820df585 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,7 +20,8 @@ struct softirq_action;
20 softirq_name(BLOCK_IOPOLL), \ 20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \ 21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \ 22 softirq_name(SCHED), \
23 softirq_name(HRTIMER)) 23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
24 25
25/** 26/**
26 * irq_handler_entry - called immediately before the irq action handler 27 * irq_handler_entry - called immediately before the irq action handler
diff --git a/init/main.c b/init/main.c
index cafba67c13bf..d7211faed2ad 100644
--- a/init/main.c
+++ b/init/main.c
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
542 timekeeping_init(); 542 timekeeping_init();
543 time_init(); 543 time_init();
544 profile_init(); 544 profile_init();
545 call_function_init();
545 if (!irqs_disabled()) 546 if (!irqs_disabled())
546 printk(KERN_CRIT "start_kernel(): bug: interrupts were " 547 printk(KERN_CRIT "start_kernel(): bug: interrupts were "
547 "enabled early\n"); 548 "enabled early\n");
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 89419ff92e99..7e59ffb3d0ba 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
87int rcu_scheduler_active __read_mostly; 87int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 88EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 89
90#ifdef CONFIG_RCU_BOOST
91
90/* 92/*
91 * Control variables for per-CPU and per-rcu_node kthreads. These 93 * Control variables for per-CPU and per-rcu_node kthreads. These
92 * handle all flavors of RCU. 94 * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98DEFINE_PER_CPU(char, rcu_cpu_has_work); 100DEFINE_PER_CPU(char, rcu_cpu_has_work);
99static char rcu_kthreads_spawnable; 101static char rcu_kthreads_spawnable;
100 102
103#endif /* #ifdef CONFIG_RCU_BOOST */
104
101static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 105static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
102static void invoke_rcu_cpu_kthread(void); 106static void invoke_rcu_core(void);
107static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
103 108
104#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ 109#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
105 110
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1088 int need_report = 0; 1093 int need_report = 0;
1089 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1094 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1090 struct rcu_node *rnp; 1095 struct rcu_node *rnp;
1091 struct task_struct *t;
1092 1096
1093 /* Stop the CPU's kthread. */ 1097 rcu_stop_cpu_kthread(cpu);
1094 t = per_cpu(rcu_cpu_kthread_task, cpu);
1095 if (t != NULL) {
1096 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1097 kthread_stop(t);
1098 }
1099 1098
1100 /* Exclude any attempts to start a new grace period. */ 1099 /* Exclude any attempts to start a new grace period. */
1101 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1100 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1231 1230
1232 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1231 /* Re-raise the RCU softirq if there are callbacks remaining. */
1233 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1232 if (cpu_has_callbacks_ready_to_invoke(rdp))
1234 invoke_rcu_cpu_kthread(); 1233 invoke_rcu_core();
1235} 1234}
1236 1235
1237/* 1236/*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
1277 } 1276 }
1278 rcu_preempt_check_callbacks(cpu); 1277 rcu_preempt_check_callbacks(cpu);
1279 if (rcu_pending(cpu)) 1278 if (rcu_pending(cpu))
1280 invoke_rcu_cpu_kthread(); 1279 invoke_rcu_core();
1281} 1280}
1282 1281
1283#ifdef CONFIG_SMP 1282#ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1442 } 1441 }
1443 1442
1444 /* If there are callbacks ready, invoke them. */ 1443 /* If there are callbacks ready, invoke them. */
1445 rcu_do_batch(rsp, rdp); 1444 if (cpu_has_callbacks_ready_to_invoke(rdp))
1445 invoke_rcu_callbacks(rsp, rdp);
1446} 1446}
1447 1447
1448/* 1448/*
1449 * Do softirq processing for the current CPU. 1449 * Do softirq processing for the current CPU.
1450 */ 1450 */
1451static void rcu_process_callbacks(void) 1451static void rcu_process_callbacks(struct softirq_action *unused)
1452{ 1452{
1453 __rcu_process_callbacks(&rcu_sched_state, 1453 __rcu_process_callbacks(&rcu_sched_state,
1454 &__get_cpu_var(rcu_sched_data)); 1454 &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1466 * cannot disappear out from under us. 1466 * cannot disappear out from under us.
1467 */ 1467 */
1468static void invoke_rcu_cpu_kthread(void) 1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 __this_cpu_write(rcu_cpu_has_work, 1);
1474 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1475 local_irq_restore(flags);
1476 return;
1477 }
1478 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1479 local_irq_restore(flags);
1480}
1481
1482/*
1483 * Wake up the specified per-rcu_node-structure kthread.
1484 * Because the per-rcu_node kthreads are immortal, we don't need
1485 * to do anything to keep them alive.
1486 */
1487static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1488{
1489 struct task_struct *t;
1490
1491 t = rnp->node_kthread_task;
1492 if (t != NULL)
1493 wake_up_process(t);
1494}
1495
1496/*
1497 * Set the specified CPU's kthread to run RT or not, as specified by
1498 * the to_rt argument. The CPU-hotplug locks are held, so the task
1499 * is not going away.
1500 */
1501static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1502{
1503 int policy;
1504 struct sched_param sp;
1505 struct task_struct *t;
1506
1507 t = per_cpu(rcu_cpu_kthread_task, cpu);
1508 if (t == NULL)
1509 return;
1510 if (to_rt) {
1511 policy = SCHED_FIFO;
1512 sp.sched_priority = RCU_KTHREAD_PRIO;
1513 } else {
1514 policy = SCHED_NORMAL;
1515 sp.sched_priority = 0;
1516 }
1517 sched_setscheduler_nocheck(t, policy, &sp);
1518}
1519
1520/*
1521 * Timer handler to initiate the waking up of per-CPU kthreads that
1522 * have yielded the CPU due to excess numbers of RCU callbacks.
1523 * We wake up the per-rcu_node kthread, which in turn will wake up
1524 * the booster kthread.
1525 */
1526static void rcu_cpu_kthread_timer(unsigned long arg)
1527{
1528 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1529 struct rcu_node *rnp = rdp->mynode;
1530
1531 atomic_or(rdp->grpmask, &rnp->wakemask);
1532 invoke_rcu_node_kthread(rnp);
1533}
1534
1535/*
1536 * Drop to non-real-time priority and yield, but only after posting a
1537 * timer that will cause us to regain our real-time priority if we
1538 * remain preempted. Either way, we restore our real-time priority
1539 * before returning.
1540 */
1541static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1542{
1543 struct sched_param sp;
1544 struct timer_list yield_timer;
1545
1546 setup_timer_on_stack(&yield_timer, f, arg);
1547 mod_timer(&yield_timer, jiffies + 2);
1548 sp.sched_priority = 0;
1549 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1550 set_user_nice(current, 19);
1551 schedule();
1552 sp.sched_priority = RCU_KTHREAD_PRIO;
1553 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1554 del_timer(&yield_timer);
1555}
1556
1557/*
1558 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1559 * This can happen while the corresponding CPU is either coming online
1560 * or going offline. We cannot wait until the CPU is fully online
1561 * before starting the kthread, because the various notifier functions
1562 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1563 * the corresponding CPU is online.
1564 *
1565 * Return 1 if the kthread needs to stop, 0 otherwise.
1566 *
1567 * Caller must disable bh. This function can momentarily enable it.
1568 */
1569static int rcu_cpu_kthread_should_stop(int cpu)
1570{
1571 while (cpu_is_offline(cpu) ||
1572 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1573 smp_processor_id() != cpu) {
1574 if (kthread_should_stop())
1575 return 1;
1576 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1577 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1578 local_bh_enable();
1579 schedule_timeout_uninterruptible(1);
1580 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1581 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1582 local_bh_disable();
1583 }
1584 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1585 return 0;
1586}
1587
1588/*
1589 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1590 * earlier RCU softirq.
1591 */
1592static int rcu_cpu_kthread(void *arg)
1593{
1594 int cpu = (int)(long)arg;
1595 unsigned long flags;
1596 int spincnt = 0;
1597 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1598 char work;
1599 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1600
1601 for (;;) {
1602 *statusp = RCU_KTHREAD_WAITING;
1603 rcu_wait(*workp != 0 || kthread_should_stop());
1604 local_bh_disable();
1605 if (rcu_cpu_kthread_should_stop(cpu)) {
1606 local_bh_enable();
1607 break;
1608 }
1609 *statusp = RCU_KTHREAD_RUNNING;
1610 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1611 local_irq_save(flags);
1612 work = *workp;
1613 *workp = 0;
1614 local_irq_restore(flags);
1615 if (work)
1616 rcu_process_callbacks();
1617 local_bh_enable();
1618 if (*workp != 0)
1619 spincnt++;
1620 else
1621 spincnt = 0;
1622 if (spincnt > 10) {
1623 *statusp = RCU_KTHREAD_YIELDING;
1624 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1625 spincnt = 0;
1626 }
1627 }
1628 *statusp = RCU_KTHREAD_STOPPED;
1629 return 0;
1630}
1631
1632/*
1633 * Spawn a per-CPU kthread, setting up affinity and priority.
1634 * Because the CPU hotplug lock is held, no other CPU will be attempting
1635 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1636 * attempting to access it during boot, but the locking in kthread_bind()
1637 * will enforce sufficient ordering.
1638 */
1639static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1640{ 1469{
1641 struct sched_param sp; 1470 if (likely(!rsp->boost)) {
1642 struct task_struct *t; 1471 rcu_do_batch(rsp, rdp);
1643
1644 if (!rcu_kthreads_spawnable ||
1645 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1646 return 0;
1647 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 kthread_bind(t, cpu);
1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1654 sp.sched_priority = RCU_KTHREAD_PRIO;
1655 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1656 return 0;
1657}
1658
1659/*
1660 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1661 * kthreads when needed. We ignore requests to wake up kthreads
1662 * for offline CPUs, which is OK because force_quiescent_state()
1663 * takes care of this case.
1664 */
1665static int rcu_node_kthread(void *arg)
1666{
1667 int cpu;
1668 unsigned long flags;
1669 unsigned long mask;
1670 struct rcu_node *rnp = (struct rcu_node *)arg;
1671 struct sched_param sp;
1672 struct task_struct *t;
1673
1674 for (;;) {
1675 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1676 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1677 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1678 raw_spin_lock_irqsave(&rnp->lock, flags);
1679 mask = atomic_xchg(&rnp->wakemask, 0);
1680 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1681 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1682 if ((mask & 0x1) == 0)
1683 continue;
1684 preempt_disable();
1685 t = per_cpu(rcu_cpu_kthread_task, cpu);
1686 if (!cpu_online(cpu) || t == NULL) {
1687 preempt_enable();
1688 continue;
1689 }
1690 per_cpu(rcu_cpu_has_work, cpu) = 1;
1691 sp.sched_priority = RCU_KTHREAD_PRIO;
1692 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1693 preempt_enable();
1694 }
1695 }
1696 /* NOTREACHED */
1697 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1698 return 0;
1699}
1700
1701/*
1702 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1703 * served by the rcu_node in question. The CPU hotplug lock is still
1704 * held, so the value of rnp->qsmaskinit will be stable.
1705 *
1706 * We don't include outgoingcpu in the affinity set, use -1 if there is
1707 * no outgoing CPU. If there are no CPUs left in the affinity set,
1708 * this function allows the kthread to execute on any CPU.
1709 */
1710static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1711{
1712 cpumask_var_t cm;
1713 int cpu;
1714 unsigned long mask = rnp->qsmaskinit;
1715
1716 if (rnp->node_kthread_task == NULL)
1717 return;
1718 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1719 return; 1472 return;
1720 cpumask_clear(cm);
1721 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1722 if ((mask & 0x1) && cpu != outgoingcpu)
1723 cpumask_set_cpu(cpu, cm);
1724 if (cpumask_weight(cm) == 0) {
1725 cpumask_setall(cm);
1726 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1727 cpumask_clear_cpu(cpu, cm);
1728 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1729 } 1473 }
1730 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1474 invoke_rcu_callbacks_kthread();
1731 rcu_boost_kthread_setaffinity(rnp, cm);
1732 free_cpumask_var(cm);
1733} 1475}
1734 1476
1735/* 1477static void invoke_rcu_core(void)
1736 * Spawn a per-rcu_node kthread, setting priority and affinity.
1737 * Called during boot before online/offline can happen, or, if
1738 * during runtime, with the main CPU-hotplug locks held. So only
1739 * one of these can be executing at a time.
1740 */
1741static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1742 struct rcu_node *rnp)
1743{ 1478{
1744 unsigned long flags; 1479 raise_softirq(RCU_SOFTIRQ);
1745 int rnp_index = rnp - &rsp->node[0];
1746 struct sched_param sp;
1747 struct task_struct *t;
1748
1749 if (!rcu_kthreads_spawnable ||
1750 rnp->qsmaskinit == 0)
1751 return 0;
1752 if (rnp->node_kthread_task == NULL) {
1753 t = kthread_create(rcu_node_kthread, (void *)rnp,
1754 "rcun%d", rnp_index);
1755 if (IS_ERR(t))
1756 return PTR_ERR(t);
1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1758 rnp->node_kthread_task = t;
1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1760 sp.sched_priority = 99;
1761 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1762 }
1763 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1764} 1480}
1765 1481
1766static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
1767
1768/*
1769 * Spawn all kthreads -- called as soon as the scheduler is running.
1770 */
1771static int __init rcu_spawn_kthreads(void)
1772{
1773 int cpu;
1774 struct rcu_node *rnp;
1775 struct task_struct *t;
1776
1777 rcu_kthreads_spawnable = 1;
1778 for_each_possible_cpu(cpu) {
1779 per_cpu(rcu_cpu_has_work, cpu) = 0;
1780 if (cpu_online(cpu)) {
1781 (void)rcu_spawn_one_cpu_kthread(cpu);
1782 t = per_cpu(rcu_cpu_kthread_task, cpu);
1783 if (t)
1784 wake_up_process(t);
1785 }
1786 }
1787 rnp = rcu_get_root(rcu_state);
1788 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1789 if (rnp->node_kthread_task)
1790 wake_up_process(rnp->node_kthread_task);
1791 if (NUM_RCU_NODES > 1) {
1792 rcu_for_each_leaf_node(rcu_state, rnp) {
1793 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1794 t = rnp->node_kthread_task;
1795 if (t)
1796 wake_up_process(t);
1797 rcu_wake_one_boost_kthread(rnp);
1798 }
1799 }
1800 return 0;
1801}
1802early_initcall(rcu_spawn_kthreads);
1803
1804static void 1482static void
1805__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1483__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1806 struct rcu_state *rsp) 1484 struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2207 rcu_preempt_init_percpu_data(cpu); 1885 rcu_preempt_init_percpu_data(cpu);
2208} 1886}
2209 1887
2210static void __cpuinit rcu_prepare_kthreads(int cpu)
2211{
2212 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2213 struct rcu_node *rnp = rdp->mynode;
2214
2215 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
2216 if (rcu_kthreads_spawnable) {
2217 (void)rcu_spawn_one_cpu_kthread(cpu);
2218 if (rnp->node_kthread_task == NULL)
2219 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
2220 }
2221}
2222
2223/*
2224 * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
2225 * but the RCU threads are woken on demand, and if demand is low this
2226 * could be a while triggering the hung task watchdog.
2227 *
2228 * In order to avoid this, poke all tasks once the CPU is fully
2229 * up and running.
2230 */
2231static void __cpuinit rcu_online_kthreads(int cpu)
2232{
2233 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2234 struct rcu_node *rnp = rdp->mynode;
2235 struct task_struct *t;
2236
2237 t = per_cpu(rcu_cpu_kthread_task, cpu);
2238 if (t)
2239 wake_up_process(t);
2240
2241 t = rnp->node_kthread_task;
2242 if (t)
2243 wake_up_process(t);
2244
2245 rcu_wake_one_boost_kthread(rnp);
2246}
2247
2248/* 1888/*
2249 * Handle CPU online/offline notification events. 1889 * Handle CPU online/offline notification events.
2250 */ 1890 */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2262 rcu_prepare_kthreads(cpu); 1902 rcu_prepare_kthreads(cpu);
2263 break; 1903 break;
2264 case CPU_ONLINE: 1904 case CPU_ONLINE:
2265 rcu_online_kthreads(cpu);
2266 case CPU_DOWN_FAILED: 1905 case CPU_DOWN_FAILED:
2267 rcu_node_kthread_setaffinity(rnp, -1); 1906 rcu_node_kthread_setaffinity(rnp, -1);
2268 rcu_cpu_kthread_setrt(cpu, 1); 1907 rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
2410 rcu_init_one(&rcu_sched_state, &rcu_sched_data); 2049 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2411 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 2050 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2412 __rcu_init_preempt(); 2051 __rcu_init_preempt();
2052 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
2413 2053
2414 /* 2054 /*
2415 * We don't need protection against CPU-hotplug here because 2055 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7b9a08b4aaea..01b2ccda26fb 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
369 /* period because */ 369 /* period because */
370 /* force_quiescent_state() */ 370 /* force_quiescent_state() */
371 /* was running. */ 371 /* was running. */
372 u8 boost; /* Subject to priority boost. */
372 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
373 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
374 375
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
426#ifdef CONFIG_HOTPLUG_CPU 427#ifdef CONFIG_HOTPLUG_CPU
427static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
428 unsigned long flags); 429 unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
429#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
430static void rcu_print_detail_task_stall(struct rcu_state *rsp); 432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
431static void rcu_print_task_stall(struct rcu_node *rnp); 433static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
450static void __init __rcu_init_preempt(void); 452static void __init __rcu_init_preempt(void);
451static void rcu_needs_cpu_flush(void); 453static void rcu_needs_cpu_flush(void);
452static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static void invoke_rcu_callbacks_kthread(void);
457#ifdef CONFIG_RCU_BOOST
458static void rcu_preempt_do_callbacks(void);
453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
454 cpumask_var_t cm); 460 cpumask_var_t cm);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
457 struct rcu_node *rnp, 462 struct rcu_node *rnp,
458 int rnp_index); 463 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
468static void __cpuinit rcu_prepare_kthreads(int cpu);
459 469
460#endif /* #ifndef RCU_TREE_NONCORE */ 470#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c8bff3099a89..14dc7dd00902 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
602 &__get_cpu_var(rcu_preempt_data)); 602 &__get_cpu_var(rcu_preempt_data));
603} 603}
604 604
605#ifdef CONFIG_RCU_BOOST
606
607static void rcu_preempt_do_callbacks(void)
608{
609 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
610}
611
612#endif /* #ifdef CONFIG_RCU_BOOST */
613
605/* 614/*
606 * Queue a preemptible-RCU callback for invocation after a grace period. 615 * Queue a preemptible-RCU callback for invocation after a grace period.
607 */ 616 */
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1249} 1258}
1250 1259
1251/* 1260/*
1261 * Wake up the per-CPU kthread to invoke RCU callbacks.
1262 */
1263static void invoke_rcu_callbacks_kthread(void)
1264{
1265 unsigned long flags;
1266
1267 local_irq_save(flags);
1268 __this_cpu_write(rcu_cpu_has_work, 1);
1269 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1270 local_irq_restore(flags);
1271 return;
1272 }
1273 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1274 local_irq_restore(flags);
1275}
1276
1277/*
1252 * Set the affinity of the boost kthread. The CPU-hotplug locks are 1278 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1253 * held, so no one should be messing with the existence of the boost 1279 * held, so no one should be messing with the existence of the boost
1254 * kthread. 1280 * kthread.
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1288 1314
1289 if (&rcu_preempt_state != rsp) 1315 if (&rcu_preempt_state != rsp)
1290 return 0; 1316 return 0;
1317 rsp->boost = 1;
1291 if (rnp->boost_kthread_task != NULL) 1318 if (rnp->boost_kthread_task != NULL)
1292 return 0; 1319 return 0;
1293 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1299 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1326 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1300 sp.sched_priority = RCU_KTHREAD_PRIO; 1327 sp.sched_priority = RCU_KTHREAD_PRIO;
1301 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1328 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1329 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1302 return 0; 1330 return 0;
1303} 1331}
1304 1332
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1333#ifdef CONFIG_HOTPLUG_CPU
1334
1335/*
1336 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1337 */
1338static void rcu_stop_cpu_kthread(int cpu)
1306{ 1339{
1307 if (rnp->boost_kthread_task) 1340 struct task_struct *t;
1308 wake_up_process(rnp->boost_kthread_task); 1341
1342 /* Stop the CPU's kthread. */
1343 t = per_cpu(rcu_cpu_kthread_task, cpu);
1344 if (t != NULL) {
1345 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1346 kthread_stop(t);
1347 }
1348}
1349
1350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1351
1352static void rcu_kthread_do_work(void)
1353{
1354 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1355 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1356 rcu_preempt_do_callbacks();
1357}
1358
1359/*
1360 * Wake up the specified per-rcu_node-structure kthread.
1361 * Because the per-rcu_node kthreads are immortal, we don't need
1362 * to do anything to keep them alive.
1363 */
1364static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1365{
1366 struct task_struct *t;
1367
1368 t = rnp->node_kthread_task;
1369 if (t != NULL)
1370 wake_up_process(t);
1371}
1372
1373/*
1374 * Set the specified CPU's kthread to run RT or not, as specified by
1375 * the to_rt argument. The CPU-hotplug locks are held, so the task
1376 * is not going away.
1377 */
1378static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1379{
1380 int policy;
1381 struct sched_param sp;
1382 struct task_struct *t;
1383
1384 t = per_cpu(rcu_cpu_kthread_task, cpu);
1385 if (t == NULL)
1386 return;
1387 if (to_rt) {
1388 policy = SCHED_FIFO;
1389 sp.sched_priority = RCU_KTHREAD_PRIO;
1390 } else {
1391 policy = SCHED_NORMAL;
1392 sp.sched_priority = 0;
1393 }
1394 sched_setscheduler_nocheck(t, policy, &sp);
1395}
1396
1397/*
1398 * Timer handler to initiate the waking up of per-CPU kthreads that
1399 * have yielded the CPU due to excess numbers of RCU callbacks.
1400 * We wake up the per-rcu_node kthread, which in turn will wake up
1401 * the booster kthread.
1402 */
1403static void rcu_cpu_kthread_timer(unsigned long arg)
1404{
1405 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1406 struct rcu_node *rnp = rdp->mynode;
1407
1408 atomic_or(rdp->grpmask, &rnp->wakemask);
1409 invoke_rcu_node_kthread(rnp);
1410}
1411
1412/*
1413 * Drop to non-real-time priority and yield, but only after posting a
1414 * timer that will cause us to regain our real-time priority if we
1415 * remain preempted. Either way, we restore our real-time priority
1416 * before returning.
1417 */
1418static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1419{
1420 struct sched_param sp;
1421 struct timer_list yield_timer;
1422
1423 setup_timer_on_stack(&yield_timer, f, arg);
1424 mod_timer(&yield_timer, jiffies + 2);
1425 sp.sched_priority = 0;
1426 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1427 set_user_nice(current, 19);
1428 schedule();
1429 sp.sched_priority = RCU_KTHREAD_PRIO;
1430 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1431 del_timer(&yield_timer);
1432}
1433
1434/*
1435 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1436 * This can happen while the corresponding CPU is either coming online
1437 * or going offline. We cannot wait until the CPU is fully online
1438 * before starting the kthread, because the various notifier functions
1439 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1440 * the corresponding CPU is online.
1441 *
1442 * Return 1 if the kthread needs to stop, 0 otherwise.
1443 *
1444 * Caller must disable bh. This function can momentarily enable it.
1445 */
1446static int rcu_cpu_kthread_should_stop(int cpu)
1447{
1448 while (cpu_is_offline(cpu) ||
1449 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1450 smp_processor_id() != cpu) {
1451 if (kthread_should_stop())
1452 return 1;
1453 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1454 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1455 local_bh_enable();
1456 schedule_timeout_uninterruptible(1);
1457 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1458 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1459 local_bh_disable();
1460 }
1461 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1462 return 0;
1463}
1464
1465/*
1466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1467 * earlier RCU softirq.
1468 */
1469static int rcu_cpu_kthread(void *arg)
1470{
1471 int cpu = (int)(long)arg;
1472 unsigned long flags;
1473 int spincnt = 0;
1474 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1475 char work;
1476 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1477
1478 for (;;) {
1479 *statusp = RCU_KTHREAD_WAITING;
1480 rcu_wait(*workp != 0 || kthread_should_stop());
1481 local_bh_disable();
1482 if (rcu_cpu_kthread_should_stop(cpu)) {
1483 local_bh_enable();
1484 break;
1485 }
1486 *statusp = RCU_KTHREAD_RUNNING;
1487 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1488 local_irq_save(flags);
1489 work = *workp;
1490 *workp = 0;
1491 local_irq_restore(flags);
1492 if (work)
1493 rcu_kthread_do_work();
1494 local_bh_enable();
1495 if (*workp != 0)
1496 spincnt++;
1497 else
1498 spincnt = 0;
1499 if (spincnt > 10) {
1500 *statusp = RCU_KTHREAD_YIELDING;
1501 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 return 0;
1507}
1508
1509/*
1510 * Spawn a per-CPU kthread, setting up affinity and priority.
1511 * Because the CPU hotplug lock is held, no other CPU will be attempting
1512 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1513 * attempting to access it during boot, but the locking in kthread_bind()
1514 * will enforce sufficient ordering.
1515 *
1516 * Please note that we cannot simply refuse to wake up the per-CPU
1517 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1518 * which can result in softlockup complaints if the task ends up being
1519 * idle for more than a couple of minutes.
1520 *
1521 * However, please note also that we cannot bind the per-CPU kthread to its
1522 * CPU until that CPU is fully online. We also cannot wait until the
1523 * CPU is fully online before we create its per-CPU kthread, as this would
1524 * deadlock the system when CPU notifiers tried waiting for grace
1525 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1526 * is online. If its CPU is not yet fully online, then the code in
1527 * rcu_cpu_kthread() will wait until it is fully online, and then do
1528 * the binding.
1529 */
1530static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1531{
1532 struct sched_param sp;
1533 struct task_struct *t;
1534
1535 if (!rcu_kthreads_spawnable ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1539 if (IS_ERR(t))
1540 return PTR_ERR(t);
1541 if (cpu_online(cpu))
1542 kthread_bind(t, cpu);
1543 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1544 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1545 sp.sched_priority = RCU_KTHREAD_PRIO;
1546 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1547 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1548 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1549 return 0;
1550}
1551
1552/*
1553 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1554 * kthreads when needed. We ignore requests to wake up kthreads
1555 * for offline CPUs, which is OK because force_quiescent_state()
1556 * takes care of this case.
1557 */
1558static int rcu_node_kthread(void *arg)
1559{
1560 int cpu;
1561 unsigned long flags;
1562 unsigned long mask;
1563 struct rcu_node *rnp = (struct rcu_node *)arg;
1564 struct sched_param sp;
1565 struct task_struct *t;
1566
1567 for (;;) {
1568 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1569 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1570 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1571 raw_spin_lock_irqsave(&rnp->lock, flags);
1572 mask = atomic_xchg(&rnp->wakemask, 0);
1573 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1574 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1575 if ((mask & 0x1) == 0)
1576 continue;
1577 preempt_disable();
1578 t = per_cpu(rcu_cpu_kthread_task, cpu);
1579 if (!cpu_online(cpu) || t == NULL) {
1580 preempt_enable();
1581 continue;
1582 }
1583 per_cpu(rcu_cpu_has_work, cpu) = 1;
1584 sp.sched_priority = RCU_KTHREAD_PRIO;
1585 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1586 preempt_enable();
1587 }
1588 }
1589 /* NOTREACHED */
1590 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1591 return 0;
1592}
1593
1594/*
1595 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1596 * served by the rcu_node in question. The CPU hotplug lock is still
1597 * held, so the value of rnp->qsmaskinit will be stable.
1598 *
1599 * We don't include outgoingcpu in the affinity set, use -1 if there is
1600 * no outgoing CPU. If there are no CPUs left in the affinity set,
1601 * this function allows the kthread to execute on any CPU.
1602 */
1603static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1604{
1605 cpumask_var_t cm;
1606 int cpu;
1607 unsigned long mask = rnp->qsmaskinit;
1608
1609 if (rnp->node_kthread_task == NULL)
1610 return;
1611 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1612 return;
1613 cpumask_clear(cm);
1614 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1615 if ((mask & 0x1) && cpu != outgoingcpu)
1616 cpumask_set_cpu(cpu, cm);
1617 if (cpumask_weight(cm) == 0) {
1618 cpumask_setall(cm);
1619 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1620 cpumask_clear_cpu(cpu, cm);
1621 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1622 }
1623 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1624 rcu_boost_kthread_setaffinity(rnp, cm);
1625 free_cpumask_var(cm);
1626}
1627
1628/*
1629 * Spawn a per-rcu_node kthread, setting priority and affinity.
1630 * Called during boot before online/offline can happen, or, if
1631 * during runtime, with the main CPU-hotplug locks held. So only
1632 * one of these can be executing at a time.
1633 */
1634static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1635 struct rcu_node *rnp)
1636{
1637 unsigned long flags;
1638 int rnp_index = rnp - &rsp->node[0];
1639 struct sched_param sp;
1640 struct task_struct *t;
1641
1642 if (!rcu_kthreads_spawnable ||
1643 rnp->qsmaskinit == 0)
1644 return 0;
1645 if (rnp->node_kthread_task == NULL) {
1646 t = kthread_create(rcu_node_kthread, (void *)rnp,
1647 "rcun%d", rnp_index);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 rnp->node_kthread_task = t;
1652 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1653 sp.sched_priority = 99;
1654 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1655 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1656 }
1657 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1658}
1659
1660/*
1661 * Spawn all kthreads -- called as soon as the scheduler is running.
1662 */
1663static int __init rcu_spawn_kthreads(void)
1664{
1665 int cpu;
1666 struct rcu_node *rnp;
1667
1668 rcu_kthreads_spawnable = 1;
1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu))
1672 (void)rcu_spawn_one_cpu_kthread(cpu);
1673 }
1674 rnp = rcu_get_root(rcu_state);
1675 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1676 if (NUM_RCU_NODES > 1) {
1677 rcu_for_each_leaf_node(rcu_state, rnp)
1678 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1679 }
1680 return 0;
1681}
1682early_initcall(rcu_spawn_kthreads);
1683
1684static void __cpuinit rcu_prepare_kthreads(int cpu)
1685{
1686 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1687 struct rcu_node *rnp = rdp->mynode;
1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1694 }
1309} 1695}
1310 1696
1311#else /* #ifdef CONFIG_RCU_BOOST */ 1697#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1315 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1701 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1316} 1702}
1317 1703
1318static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 1704static void invoke_rcu_callbacks_kthread(void)
1319 cpumask_var_t cm)
1320{ 1705{
1706 WARN_ON_ONCE(1);
1321} 1707}
1322 1708
1323static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1709static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1324{ 1710{
1325} 1711}
1326 1712
1327static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1713#ifdef CONFIG_HOTPLUG_CPU
1328 struct rcu_node *rnp, 1714
1329 int rnp_index) 1715static void rcu_stop_cpu_kthread(int cpu)
1716{
1717}
1718
1719#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1720
1721static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1722{
1723}
1724
1725static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1330{ 1726{
1331 return 0;
1332} 1727}
1333 1728
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1729static void __cpuinit rcu_prepare_kthreads(int cpu)
1335{ 1730{
1336} 1731}
1337 1732
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1509 * 1904 *
1510 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1905 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1511 * disabled, we do one pass of force_quiescent_state(), then do a 1906 * disabled, we do one pass of force_quiescent_state(), then do a
1512 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked 1907 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1513 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 1908 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1514 */ 1909 */
1515int rcu_needs_cpu(int cpu) 1910int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
1560 1955
1561 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1956 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1562 if (c) 1957 if (c)
1563 invoke_rcu_cpu_kthread(); 1958 invoke_rcu_core();
1564 return c; 1959 return c;
1565} 1960}
1566 1961
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3650f5..4e144876dc68 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "rcutree.h" 47#include "rcutree.h"
48 48
49#ifdef CONFIG_RCU_BOOST
50
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 52DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 53DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
58 return "SRWOY"[kthread_status]; 60 return "SRWOY"[kthread_status];
59} 61}
60 62
63#endif /* #ifdef CONFIG_RCU_BOOST */
64
61static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 65static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
62{ 66{
63 if (!rdp->beenonline) 67 if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 rdp->dynticks_fqs); 80 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 81#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 82 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", 83 seq_printf(m, " ql=%ld qs=%c%c%c%c",
80 rdp->qlen, 84 rdp->qlen,
81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 85 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
82 rdp->nxttail[RCU_NEXT_TAIL]], 86 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
84 rdp->nxttail[RCU_NEXT_READY_TAIL]], 88 rdp->nxttail[RCU_NEXT_READY_TAIL]],
85 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 89 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
86 rdp->nxttail[RCU_WAIT_TAIL]], 90 rdp->nxttail[RCU_WAIT_TAIL]],
87 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 91 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
92#ifdef CONFIG_RCU_BOOST
93 seq_printf(m, " kt=%d/%c/%d ktl=%x",
88 per_cpu(rcu_cpu_has_work, rdp->cpu), 94 per_cpu(rcu_cpu_has_work, rdp->cpu),
89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 95 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
90 rdp->cpu)), 96 rdp->cpu)),
91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 97 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, 98 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
93 rdp->blimit); 99#endif /* #ifdef CONFIG_RCU_BOOST */
100 seq_printf(m, " b=%ld", rdp->blimit);
94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 101 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 102 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
96} 103}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
147 rdp->dynticks_fqs); 154 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 155#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 156 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
150 seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, 157 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
151 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 158 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
152 rdp->nxttail[RCU_NEXT_TAIL]], 159 rdp->nxttail[RCU_NEXT_TAIL]],
153 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 160 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
154 rdp->nxttail[RCU_NEXT_READY_TAIL]], 161 rdp->nxttail[RCU_NEXT_READY_TAIL]],
155 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 162 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
156 rdp->nxttail[RCU_WAIT_TAIL]], 163 rdp->nxttail[RCU_WAIT_TAIL]],
157 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 164 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
165#ifdef CONFIG_RCU_BOOST
166 seq_printf(m, ",%d,\"%c\"",
158 per_cpu(rcu_cpu_has_work, rdp->cpu), 167 per_cpu(rcu_cpu_has_work, rdp->cpu),
159 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 168 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
160 rdp->cpu)), 169 rdp->cpu)));
161 rdp->blimit); 170#endif /* #ifdef CONFIG_RCU_BOOST */
171 seq_printf(m, ",%ld", rdp->blimit);
162 seq_printf(m, ",%lu,%lu,%lu\n", 172 seq_printf(m, ",%lu,%lu,%lu\n",
163 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 173 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
164} 174}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
169#ifdef CONFIG_NO_HZ 179#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 180 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 181#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 182 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
183#ifdef CONFIG_RCU_BOOST
184 seq_puts(m, "\"kt\",\"ktl\"");
185#endif /* #ifdef CONFIG_RCU_BOOST */
186 seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 187#ifdef CONFIG_TREE_PREEMPT_RCU
174 seq_puts(m, "\"rcu_preempt:\"\n"); 188 seq_puts(m, "\"rcu_preempt:\"\n");
175 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 189 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
diff --git a/kernel/smp.c b/kernel/smp.c
index 73a195193558..fb67dfa8394e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd, 74 .notifier_call = hotplug_cfd,
75}; 75};
76 76
77static int __cpuinit init_call_single_data(void) 77void __init call_function_init(void)
78{ 78{
79 void *cpu = (void *)(long)smp_processor_id(); 79 void *cpu = (void *)(long)smp_processor_id();
80 int i; 80 int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
88 88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); 89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier); 90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93} 91}
94early_initcall(init_call_single_data);
95 92
96/* 93/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 94 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 13960170cad4..40cf63ddd4b3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 58
59char *softirq_to_name[NR_SOFTIRQS] = { 59char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER" 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62}; 62};
63 63
64/* 64/*
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1c95fd677328..e0980f0d9a0a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
185static struct timer_list watchdog_timer; 185static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static cycle_t watchdog_last;
189static int watchdog_running; 188static int watchdog_running;
190 189
191static int clocksource_watchdog_kthread(void *data); 190static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
254 if (!watchdog_running) 253 if (!watchdog_running)
255 goto out; 254 goto out;
256 255
257 wdnow = watchdog->read(watchdog);
258 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
259 watchdog->mult, watchdog->shift);
260 watchdog_last = wdnow;
261
262 list_for_each_entry(cs, &watchdog_list, wd_list) { 256 list_for_each_entry(cs, &watchdog_list, wd_list) {
263 257
264 /* Clocksource already marked unstable? */ 258 /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
268 continue; 262 continue;
269 } 263 }
270 264
265 local_irq_disable();
271 csnow = cs->read(cs); 266 csnow = cs->read(cs);
267 wdnow = watchdog->read(watchdog);
268 local_irq_enable();
272 269
273 /* Clocksource initialized ? */ 270 /* Clocksource initialized ? */
274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
275 cs->flags |= CLOCK_SOURCE_WATCHDOG; 272 cs->flags |= CLOCK_SOURCE_WATCHDOG;
276 cs->wd_last = csnow; 273 cs->wd_last = wdnow;
274 cs->cs_last = csnow;
277 continue; 275 continue;
278 } 276 }
279 277
280 /* Check the deviation from the watchdog clocksource. */ 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
281 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 279 watchdog->mult, watchdog->shift);
280
281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
282 cs->mask, cs->mult, cs->shift); 282 cs->mask, cs->mult, cs->shift);
283 cs->wd_last = csnow; 283 cs->cs_last = csnow;
284 cs->wd_last = wdnow;
285
286 /* Check the deviation from the watchdog clocksource. */
284 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
285 clocksource_unstable(cs, cs_nsec - wd_nsec); 288 clocksource_unstable(cs, cs_nsec - wd_nsec);
286 continue; 289 continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
318 return; 321 return;
319 init_timer(&watchdog_timer); 322 init_timer(&watchdog_timer);
320 watchdog_timer.function = clocksource_watchdog; 323 watchdog_timer.function = clocksource_watchdog;
321 watchdog_last = watchdog->read(watchdog);
322 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
323 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
324 watchdog_running = 1; 326 watchdog_running = 1;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index dff763b7baf1..1f06468a10d7 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
240 const char **fmt = v; 240 const char **fmt = v;
241 int start_index; 241 int start_index;
242 242
243 if (!fmt)
244 fmt = __start___trace_bprintk_fmt + *pos;
245
246 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; 243 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
247 244
248 if (*pos < start_index) 245 if (*pos < start_index)
249 return fmt; 246 return __start___trace_bprintk_fmt + *pos;
250 247
251 return find_next_mod_format(start_index, v, fmt, pos); 248 return find_next_mod_format(start_index, v, fmt, pos);
252} 249}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index c7a581a96894..917ecb93ea28 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
205 grp->nr_vlans++; 205 grp->nr_vlans++;
206 206
207 if (ngrp) { 207 if (ngrp) {
208 if (ops->ndo_vlan_rx_register) 208 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
209 ops->ndo_vlan_rx_register(real_dev, ngrp); 209 ops->ndo_vlan_rx_register(real_dev, ngrp);
210 rcu_assign_pointer(real_dev->vlgrp, ngrp); 210 rcu_assign_pointer(real_dev->vlgrp, ngrp);
211 } 211 }
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f13ddbf858ba..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
477 * command otherwise */ 477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479 479
480 /* Events for 1.2 and newer controllers */ 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
481 if (hdev->lmp_ver > 1) { 481 * any event mask for pre 1.2 devices */
482 events[4] |= 0x01; /* Flow Specification Complete */ 482 if (hdev->lmp_ver <= 1)
483 events[4] |= 0x02; /* Inquiry Result with RSSI */ 483 return;
484 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 484
485 events[5] |= 0x08; /* Synchronous Connection Complete */ 485 events[4] |= 0x01; /* Flow Specification Complete */
486 events[5] |= 0x10; /* Synchronous Connection Changed */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */
487 } 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
488 490
489 if (hdev->features[3] & LMP_RSSI_INQ) 491 if (hdev->features[3] & LMP_RSSI_INQ)
490 events[4] |= 0x04; /* Inquiry Result with RSSI */ 492 events[4] |= 0x04; /* Inquiry Result with RSSI */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 18dc9888d8c2..8248303f44e8 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
413 break; 413 break;
414 } 414 }
415 415
416 memset(&cinfo, 0, sizeof(cinfo));
416 cinfo.hci_handle = chan->conn->hcon->handle; 417 cinfo.hci_handle = chan->conn->hcon->handle;
417 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); 418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
418 419
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 386cfaffd4b7..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 790
791 memset(&cinfo, 0, sizeof(cinfo));
791 cinfo.hci_handle = conn->hcon->handle; 792 cinfo.hci_handle = conn->hcon->handle;
792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
793 794
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76c..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
372 case BT_CONNECT: 381 case BT_CONNECT:
373 case BT_DISCONN: 382 case BT_DISCONN:
374 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
819 conn->sk = NULL; 828 conn->sk = NULL;
820 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
821 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
822 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
823 } 834 }
824 835
825 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index a6b2f86378c7..c188c803c09c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -243,6 +243,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
243 goto out; 243 goto out;
244 244
245 np->dev = p->dev; 245 np->dev = p->dev;
246 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
246 247
247 err = __netpoll_setup(np); 248 err = __netpoll_setup(np);
248 if (err) { 249 if (err) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2f14eafdeeab..29b9812c8da0 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1424 switch (ih->type) { 1424 switch (ih->type) {
1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1425 case IGMP_HOST_MEMBERSHIP_REPORT:
1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1426 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1427 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1428 err = br_ip4_multicast_add_group(br, port, ih->group);
1429 break; 1429 break;
1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1430 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1543 goto out; 1543 goto out;
1544 } 1544 }
1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1545 mld = (struct mld_msg *)skb_transport_header(skb2);
1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1546 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1548 break; 1548 break;
1549 } 1549 }
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 3a66b8c10e09..c23979e79dfa 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
255 255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { 256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257 257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || 258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 260 layer->id != 0) {
261 261
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab39f531..02548b292b53 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
44 pr_debug("%s\n", __func__); 44 pr_debug("%s\n", __func__);
45 45
46 if (!buf) 46 if (!buf)
47 goto out; 47 return -EMSGSIZE;
48 48
49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, 49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
50 IEEE802154_LIST_PHY); 50 IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
65 pages * sizeof(uint32_t), buf); 65 pages * sizeof(uint32_t), buf);
66 66
67 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
68 kfree(buf);
68 return genlmsg_end(msg, hdr); 69 return genlmsg_end(msg, hdr);
69 70
70nla_put_failure: 71nla_put_failure:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c1926027a26..eae1f676f870 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -676,6 +676,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
676 676
677 lock_sock(sk2); 677 lock_sock(sk2);
678 678
679 sock_rps_record_flow(sk2);
679 WARN_ON(!((1 << sk2->sk_state) & 680 WARN_ON(!((1 << sk2->sk_state) &
680 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 681 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
681 682
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 6ffe94ca5bc9..3267d3898437 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
437 return 0; 437 return 0;
438 if (cc == len) 438 if (cc == len)
439 return 1; 439 return 1;
440 if (op->yes < 4) 440 if (op->yes < 4 || op->yes & 3)
441 return 0; 441 return 0;
442 len -= op->yes; 442 len -= op->yes;
443 bc += op->yes; 443 bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
447 447
448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
449{ 449{
450 const unsigned char *bc = bytecode; 450 const void *bc = bytecode;
451 int len = bytecode_len; 451 int len = bytecode_len;
452 452
453 while (len > 0) { 453 while (len > 0) {
454 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 454 const struct inet_diag_bc_op *op = bc;
455 455
456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
457 switch (op->code) { 457 switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
462 case INET_DIAG_BC_S_LE: 462 case INET_DIAG_BC_S_LE:
463 case INET_DIAG_BC_D_GE: 463 case INET_DIAG_BC_D_GE:
464 case INET_DIAG_BC_D_LE: 464 case INET_DIAG_BC_D_LE:
465 if (op->yes < 4 || op->yes > len + 4)
466 return -EINVAL;
467 case INET_DIAG_BC_JMP: 465 case INET_DIAG_BC_JMP:
468 if (op->no < 4 || op->no > len + 4) 466 if (op->no < 4 || op->no > len + 4 || op->no & 3)
469 return -EINVAL; 467 return -EINVAL;
470 if (op->no < len && 468 if (op->no < len &&
471 !valid_cc(bytecode, bytecode_len, len - op->no)) 469 !valid_cc(bytecode, bytecode_len, len - op->no))
472 return -EINVAL; 470 return -EINVAL;
473 break; 471 break;
474 case INET_DIAG_BC_NOP: 472 case INET_DIAG_BC_NOP:
475 if (op->yes < 4 || op->yes > len + 4)
476 return -EINVAL;
477 break; 473 break;
478 default: 474 default:
479 return -EINVAL; 475 return -EINVAL;
480 } 476 }
477 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
478 return -EINVAL;
481 bc += op->yes; 479 bc += op->yes;
482 len -= op->yes; 480 len -= op->yes;
483 } 481 }
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index f7f9bd7ba12d..5c9b9d963918 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
203 else 203 else
204 pmsg->outdev_name[0] = '\0'; 204 pmsg->outdev_name[0] = '\0';
205 205
206 if (entry->indev && entry->skb->dev) { 206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
207 pmsg->hw_type = entry->skb->dev->type; 208 pmsg->hw_type = entry->skb->dev->type;
208 pmsg->hw_addrlen = dev_parse_header(entry->skb, 209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
209 pmsg->hw_addr); 210 pmsg->hw_addr);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 764743843503..24e556e83a3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
566 const struct xt_entry_target *t; 566 const struct xt_entry_target *t;
567 567
568 if (!ip_checkentry(&e->ip)) { 568 if (!ip_checkentry(&e->ip)) {
569 duprintf("ip check failed %p %s.\n", e, par->match->name); 569 duprintf("ip check failed %p %s.\n", e, name);
570 return -EINVAL; 570 return -EINVAL;
571 } 571 }
572 572
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c778345..2b57e52c746c 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
25static inline bool match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
29 !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
29} 30}
30 31
31static inline bool match_tcp(const struct sk_buff *skb, 32static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 77 return false;
77 78
78 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 79 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
79 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
80 return false;
81 if (!match_tcp(skb, info, &par->hotdrop)) 80 if (!match_tcp(skb, info, &par->hotdrop))
82 return false; 81 return false;
83 } 82 }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
100 ip->proto != IPPROTO_TCP) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
101 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info("cannot match TCP bits in rule for non-tcp packets\n");
102 return -EINVAL; 101 return -EINVAL;
103 } 102 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index db10075dd88e..de9da21113a1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
121 return ret; 121 return ret;
122 } 122 }
123 123
124 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 124 /* adjust seqs for loopback traffic only in outgoing direction */
125 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
126 !nf_is_loopback_packet(skb)) {
125 typeof(nf_nat_seq_adjust_hook) seq_adjust; 127 typeof(nf_nat_seq_adjust_hook) seq_adjust;
126 128
127 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 129 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9aaa67165f42..39b403f854c6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -41,7 +41,6 @@
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/ping.h> 43#include <net/ping.h>
44#include <net/icmp.h>
45#include <net/udp.h> 44#include <net/udp.h>
46#include <net/route.h> 45#include <net/route.h>
47#include <net/inet_common.h> 46#include <net/inet_common.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 045f0ec6a4a0..aa13ef105110 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1902,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1902 1902
1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1905 err = 0; 1905 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1906 if (IS_ERR(rth))
1907 err = PTR_ERR(rth);
1908 1906
1909e_nobufs: 1907e_nobufs:
1910 return -ENOBUFS; 1908 return -ENOBUFS;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7d6671e33b8..708dc203b034 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1589,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1589 goto discard; 1589 goto discard;
1590 1590
1591 if (nsk != sk) { 1591 if (nsk != sk) {
1592 sock_rps_save_rxhash(nsk, skb->rxhash);
1592 if (tcp_child_process(sk, nsk, skb)) { 1593 if (tcp_child_process(sk, nsk, skb)) {
1593 rsk = nsk; 1594 rsk = nsk;
1594 goto reset; 1595 goto reset;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 065fe405fb58..249394863284 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
204 else 204 else
205 pmsg->outdev_name[0] = '\0'; 205 pmsg->outdev_name[0] = '\0';
206 206
207 if (entry->indev && entry->skb->dev) { 207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type; 209 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
210 } 211 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fd28711ba5..87551ca568cd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1644 * the new socket.. 1644 * the new socket..
1645 */ 1645 */
1646 if(nsk != sk) { 1646 if(nsk != sk) {
1647 sock_rps_save_rxhash(nsk, skb->rxhash);
1647 if (tcp_child_process(sk, nsk, skb)) 1648 if (tcp_child_process(sk, nsk, skb))
1648 goto reset; 1649 goto reset;
1649 if (opt_skb) 1650 if (opt_skb)
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index bf28ac2fc99b..782db275ac53 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
776 if (cp->control) 776 if (cp->control)
777 ip_vs_control_del(cp); 777 ip_vs_control_del(cp);
778 778
779 if (cp->flags & IP_VS_CONN_F_NFCT) 779 if (cp->flags & IP_VS_CONN_F_NFCT) {
780 ip_vs_conn_drop_conntrack(cp); 780 ip_vs_conn_drop_conntrack(cp);
781 /* Do not access conntracks during subsys cleanup
782 * because nf_conntrack_find_get can not be used after
783 * conntrack cleanup for the net.
784 */
785 smp_rmb();
786 if (ipvs->enable)
787 ip_vs_conn_drop_conntrack(cp);
788 }
781 789
782 ip_vs_pe_put(cp->pe); 790 ip_vs_pe_put(cp->pe);
783 kfree(cp->pe_data); 791 kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 55af2242bccd..24c28d238dcb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1945{ 1945{
1946 EnterFunction(2); 1946 EnterFunction(2);
1947 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1947 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb();
1948 __ip_vs_sync_cleanup(net); 1949 __ip_vs_sync_cleanup(net);
1949 LeaveFunction(2); 1950 LeaveFunction(2);
1950} 1951}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0ee010935e7..2e7ccbb43ddb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
456 if (skb->mark) 456 if (skb->mark)
457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
458 458
459 if (indev && skb->dev) { 459 if (indev && skb->dev &&
460 skb->mac_header != skb->network_header) {
460 struct nfulnl_msg_packet_hw phw; 461 struct nfulnl_msg_packet_hw phw;
461 int len = dev_parse_header(skb, phw.hw_addr); 462 int len = dev_parse_header(skb, phw.hw_addr);
462 if (len > 0) { 463 if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f12b42..fdd2fafe0a14 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
335 if (entskb->mark) 335 if (entskb->mark)
336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337 337
338 if (indev && entskb->dev) { 338 if (indev && entskb->dev &&
339 entskb->mac_header != entskb->network_header) {
339 struct nfqnl_msg_packet_hw phw; 340 struct nfqnl_msg_packet_hw phw;
340 int len = dev_parse_header(entskb, phw.hw_addr); 341 int len = dev_parse_header(entskb, phw.hw_addr);
341 if (len) { 342 if (len) {
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec2e469..c3b75333b821 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/gss_krb5.h> 43#include <linux/sunrpc/gss_krb5.h>
44#include <linux/sunrpc/xdr.h> 44#include <linux/sunrpc/xdr.h>
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/sunrpc/gss_krb5_enctypes.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
750 .gm_ops = &gss_kerberos_ops, 751 .gm_ops = &gss_kerberos_ops,
751 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 752 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
752 .gm_pfs = gss_kerberos_pfs, 753 .gm_pfs = gss_kerberos_pfs,
753 .gm_upcall_enctypes = "18,17,16,23,3,1,2", 754 .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
754}; 755};
755 756
756static int __init init_kerberos_module(void) 757static int __init init_kerberos_module(void)
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd1f779fa51d..1be68269e1c2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
474 .subsys_id = devices_subsys_id, 474 .subsys_id = devices_subsys_id,
475}; 475};
476 476
477int devcgroup_inode_permission(struct inode *inode, int mask) 477int __devcgroup_inode_permission(struct inode *inode, int mask)
478{ 478{
479 struct dev_cgroup *dev_cgroup; 479 struct dev_cgroup *dev_cgroup;
480 struct dev_whitelist_item *wh; 480 struct dev_whitelist_item *wh;
481 481
482 dev_t device = inode->i_rdev;
483 if (!device)
484 return 0;
485 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
486 return 0;
487
488 rcu_read_lock(); 482 rcu_read_lock();
489 483
490 dev_cgroup = task_devcgroup(current); 484 dev_cgroup = task_devcgroup(current);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 032ba6398a5c..940257b5774e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
633 633
634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) 634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
635 635
636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) 636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
637 637
638ALL_CFLAGS += $(BASIC_CFLAGS) 638ALL_CFLAGS += $(BASIC_CFLAGS)
639ALL_CFLAGS += $(ARCH_CFLAGS) 639ALL_CFLAGS += $(ARCH_CFLAGS)
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 1e88485c16a0..0a7ed5b5e281 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
2187 { "TASKLET_SOFTIRQ", 6 }, 2187 { "TASKLET_SOFTIRQ", 6 },
2188 { "SCHED_SOFTIRQ", 7 }, 2188 { "SCHED_SOFTIRQ", 7 },
2189 { "HRTIMER_SOFTIRQ", 8 }, 2189 { "HRTIMER_SOFTIRQ", 8 },
2190 { "RCU_SOFTIRQ", 9 },
2190 2191
2191 { "HRTIMER_NORESTART", 0 }, 2192 { "HRTIMER_NORESTART", 0 },
2192 { "HRTIMER_RESTART", 1 }, 2193 { "HRTIMER_RESTART", 1 },