aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/filesystems.tmpl4
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/porting5
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/laptops/laptop-mode.txt12
-rw-r--r--Documentation/sysctl/vm.txt14
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/blackfin/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/m68k/Kconfig12
-rw-r--r--arch/m68k/Kconfig.cpu14
-rw-r--r--arch/m68k/apollo/config.c16
-rw-r--r--arch/m68k/include/asm/Kbuild25
-rw-r--r--arch/m68k/include/asm/MC68332.h152
-rw-r--r--arch/m68k/include/asm/apollodma.h248
-rw-r--r--arch/m68k/include/asm/apollohw.h2
-rw-r--r--arch/m68k/include/asm/bitsperlong.h1
-rw-r--r--arch/m68k/include/asm/cputime.h6
-rw-r--r--arch/m68k/include/asm/delay.h2
-rw-r--r--arch/m68k/include/asm/device.h7
-rw-r--r--arch/m68k/include/asm/emergency-restart.h6
-rw-r--r--arch/m68k/include/asm/errno.h6
-rw-r--r--arch/m68k/include/asm/futex.h6
-rw-r--r--arch/m68k/include/asm/ioctl.h1
-rw-r--r--arch/m68k/include/asm/ipcbuf.h1
-rw-r--r--arch/m68k/include/asm/irq_regs.h1
-rw-r--r--arch/m68k/include/asm/kdebug.h1
-rw-r--r--arch/m68k/include/asm/kmap_types.h6
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/m68k/include/asm/local.h6
-rw-r--r--arch/m68k/include/asm/local64.h1
-rw-r--r--arch/m68k/include/asm/mac_mouse.h23
-rw-r--r--arch/m68k/include/asm/mcfmbus.h77
-rw-r--r--arch/m68k/include/asm/mman.h1
-rw-r--r--arch/m68k/include/asm/mutex.h9
-rw-r--r--arch/m68k/include/asm/percpu.h6
-rw-r--r--arch/m68k/include/asm/resource.h6
-rw-r--r--arch/m68k/include/asm/sbus.h45
-rw-r--r--arch/m68k/include/asm/scatterlist.h6
-rw-r--r--arch/m68k/include/asm/sections.h8
-rw-r--r--arch/m68k/include/asm/shm.h31
-rw-r--r--arch/m68k/include/asm/siginfo.h6
-rw-r--r--arch/m68k/include/asm/statfs.h6
-rw-r--r--arch/m68k/include/asm/topology.h6
-rw-r--r--arch/m68k/include/asm/types.h22
-rw-r--r--arch/m68k/include/asm/unaligned.h4
-rw-r--r--arch/m68k/include/asm/xor.h1
-rw-r--r--arch/m68k/kernel/setup_no.c11
-rw-r--r--arch/m68k/kernel/sys_m68k.c8
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/lib/muldi3.c2
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/m68k/mm/init_no.c2
-rw-r--r--arch/m68k/platform/68328/head-de2.S8
-rw-r--r--arch/m68k/platform/68328/head-pilot.S10
-rw-r--r--arch/m68k/platform/68328/head-ram.S4
-rw-r--r--arch/m68k/platform/68328/head-rom.S6
-rw-r--r--arch/m68k/platform/68360/head-ram.S6
-rw-r--r--arch/m68k/platform/68360/head-rom.S8
-rw-r--r--arch/m68k/platform/coldfire/head.S10
-rw-r--r--arch/m68k/sun3/prom/init.c48
-rw-r--r--arch/microblaze/include/asm/sections.h4
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c3
-rw-r--r--arch/microblaze/kernel/setup.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/mcount.S8
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/perf_event.h11
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c43
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h20
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h2
-rw-r--r--arch/x86/kernel/irq.c1
-rw-r--r--arch/x86/kernel/kdebugfs.c6
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/vmx.c20
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/srat.c15
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--drivers/acpi/acpica/achware.h12
-rw-r--r--drivers/acpi/acpica/hwesleep.c19
-rw-r--r--drivers/acpi/acpica/hwsleep.c20
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c22
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sleep.c75
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/iommu/amd_iommu.c25
-rw-r--r--drivers/iommu/amd_iommu_init.c6
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c11
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c71
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/super.c4
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/exofs/inode.c27
-rw-r--r--fs/exofs/ore.c14
-rw-r--r--fs/exofs/super.c11
-rw-r--r--fs/ext3/inode.c8
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/inode.c10
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/nilfs2/super.c4
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/super.c40
-rw-r--r--fs/ubifs/file.c10
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/actypes.h2
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h6
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/jiffies.h29
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/trace/ftrace.h6
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c11
-rw-r--r--kernel/debug/kdb/kdb_main.c15
-rw-r--r--kernel/events/callchain.c9
-rw-r--r--kernel/events/core.c30
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/cpupri.c10
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/timekeeping.c407
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--mm/backing-dev.c52
-rw-r--r--mm/page-writeback.c1
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv6/tcp_ipv6.c27
-rw-r--r--net/llc/llc_station.c6
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/xfrm/xfrm_state.c21
-rw-r--r--sound/core/sgbuf.c2
-rw-r--r--sound/pci/emu10k1/memory.c5
-rw-r--r--sound/pci/hda/hda_auto_parser.c5
-rw-r--r--sound/pci/hda/patch_conexant.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c12
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/soc/codecs/ab8500-codec.c4
-rw-r--r--sound/soc/codecs/ad1980.c1
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c3
-rw-r--r--sound/soc/codecs/stac9766.c1
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/codecs/wm8994.c15
-rw-r--r--sound/soc/codecs/wm9712.c1
-rw-r--r--sound/soc/codecs/wm9713.c1
-rw-r--r--sound/soc/mxs/mxs-saif.c24
-rw-r--r--sound/soc/omap/omap-mcbsp.c1
-rw-r--r--sound/soc/omap/omap-pcm.c1
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c10
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.h2
-rw-r--r--tools/perf/Makefile7
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-test.c19
-rw-r--r--tools/perf/builtin-top.c23
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evlist.c7
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c9
-rw-r--r--tools/perf/util/intlist.c101
-rw-r--r--tools/perf/util/intlist.h75
-rw-r--r--tools/perf/util/parse-events-test.c12
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/rblist.c107
-rw-r--r--tools/perf/util/rblist.h47
-rw-r--r--tools/perf/util/session.c48
-rw-r--r--tools/perf/util/session.h24
-rw-r--r--tools/perf/util/strlist.c130
-rw-r--r--tools/perf/util/strlist.h11
-rw-r--r--tools/perf/util/symbol.c14
-rw-r--r--tools/perf/util/target.c2
294 files changed, 1821 insertions, 1999 deletions
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 3fca32c41927..25b58efd955d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -224,8 +224,8 @@ all your transactions.
224</para> 224</para>
225 225
226<para> 226<para>
227Then at umount time , in your put_super() (2.4) or write_super() (2.5) 227Then at umount time , in your put_super() you can then call journal_destroy()
228you can then call journal_destroy() to clean up your in-core journal object. 228to clean up your in-core journal object.
229</para> 229</para>
230 230
231<para> 231<para>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 0f103e39b4f6..e540a24e5d06 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -114,7 +114,6 @@ prototypes:
114 int (*drop_inode) (struct inode *); 114 int (*drop_inode) (struct inode *);
115 void (*evict_inode) (struct inode *); 115 void (*evict_inode) (struct inode *);
116 void (*put_super) (struct super_block *); 116 void (*put_super) (struct super_block *);
117 void (*write_super) (struct super_block *);
118 int (*sync_fs)(struct super_block *sb, int wait); 117 int (*sync_fs)(struct super_block *sb, int wait);
119 int (*freeze_fs) (struct super_block *); 118 int (*freeze_fs) (struct super_block *);
120 int (*unfreeze_fs) (struct super_block *); 119 int (*unfreeze_fs) (struct super_block *);
@@ -136,7 +135,6 @@ write_inode:
136drop_inode: !!!inode->i_lock!!! 135drop_inode: !!!inode->i_lock!!!
137evict_inode: 136evict_inode:
138put_super: write 137put_super: write
139write_super: read
140sync_fs: read 138sync_fs: read
141freeze_fs: write 139freeze_fs: write
142unfreeze_fs: write 140unfreeze_fs: write
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2bef2b3843d1..0742feebc6e2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -94,9 +94,8 @@ protected.
94--- 94---
95[mandatory] 95[mandatory]
96 96
97BKL is also moved from around sb operations. ->write_super() Is now called 97BKL is also moved from around sb operations. BKL should have been shifted into
98without BKL held. BKL should have been shifted into individual fs sb_op 98individual fs sb_op functions. If you don't need it, remove it.
99functions. If you don't need it, remove it.
100 99
101--- 100---
102[informational] 101[informational]
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 065aa2dc0835..2ee133e030c3 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -216,7 +216,6 @@ struct super_operations {
216 void (*drop_inode) (struct inode *); 216 void (*drop_inode) (struct inode *);
217 void (*delete_inode) (struct inode *); 217 void (*delete_inode) (struct inode *);
218 void (*put_super) (struct super_block *); 218 void (*put_super) (struct super_block *);
219 void (*write_super) (struct super_block *);
220 int (*sync_fs)(struct super_block *sb, int wait); 219 int (*sync_fs)(struct super_block *sb, int wait);
221 int (*freeze_fs) (struct super_block *); 220 int (*freeze_fs) (struct super_block *);
222 int (*unfreeze_fs) (struct super_block *); 221 int (*unfreeze_fs) (struct super_block *);
@@ -273,9 +272,6 @@ or bottom half).
273 put_super: called when the VFS wishes to free the superblock 272 put_super: called when the VFS wishes to free the superblock
274 (i.e. unmount). This is called with the superblock lock held 273 (i.e. unmount). This is called with the superblock lock held
275 274
276 write_super: called when the VFS superblock needs to be written to
277 disc. This method is optional
278
279 sync_fs: called when VFS is writing out all dirty data associated with 275 sync_fs: called when VFS is writing out all dirty data associated with
280 a superblock. The second parameter indicates whether the method 276 a superblock. The second parameter indicates whether the method
281 should wait until the write out has been completed. Optional. 277 should wait until the write out has been completed. Optional.
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 0bf25eebce94..4ebbfc3f1c6e 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10
262 262
263# 263#
264# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been 264# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
265# exceeded, the kernel will wake pdflush which will then reduce the amount 265# exceeded, the kernel will wake flusher threads which will then reduce the
266# of dirty memory to dirty_background_ratio. Set this nice and low, so once 266# amount of dirty memory to dirty_background_ratio. Set this nice and low,
267# some writeout has commenced, we do a lot of it. 267# so once some writeout has commenced, we do a lot of it.
268# 268#
269#DIRTY_BACKGROUND_RATIO=5 269#DIRTY_BACKGROUND_RATIO=5
270 270
@@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
384 384
385# 385#
386# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been 386# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
387# exceeded, the kernel will wake pdflush which will then reduce the amount 387# exceeded, the kernel will wake flusher threads which will then reduce the
388# of dirty memory to dirty_background_ratio. Set this nice and low, so once 388# amount of dirty memory to dirty_background_ratio. Set this nice and low,
389# some writeout has commenced, we do a lot of it. 389# so once some writeout has commenced, we do a lot of it.
390# 390#
391DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'} 391DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
392 392
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index dcc2a94ae34e..078701fdbd4d 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required.
76 76
77dirty_background_bytes 77dirty_background_bytes
78 78
79Contains the amount of dirty memory at which the pdflush background writeback 79Contains the amount of dirty memory at which the background kernel
80daemon will start writeback. 80flusher threads will start writeback.
81 81
82Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only 82Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
83one of them may be specified at a time. When one sysctl is written it is 83one of them may be specified at a time. When one sysctl is written it is
@@ -89,7 +89,7 @@ other appears as 0 when read.
89dirty_background_ratio 89dirty_background_ratio
90 90
91Contains, as a percentage of total system memory, the number of pages at which 91Contains, as a percentage of total system memory, the number of pages at which
92the pdflush background writeback daemon will start writing out dirty data. 92the background kernel flusher threads will start writing out dirty data.
93 93
94============================================================== 94==============================================================
95 95
@@ -112,9 +112,9 @@ retained.
112dirty_expire_centisecs 112dirty_expire_centisecs
113 113
114This tunable is used to define when dirty data is old enough to be eligible 114This tunable is used to define when dirty data is old enough to be eligible
115for writeout by the pdflush daemons. It is expressed in 100'ths of a second. 115for writeout by the kernel flusher threads. It is expressed in 100'ths
116Data which has been dirty in-memory for longer than this interval will be 116of a second. Data which has been dirty in-memory for longer than this
117written out next time a pdflush daemon wakes up. 117interval will be written out next time a flusher thread wakes up.
118 118
119============================================================== 119==============================================================
120 120
@@ -128,7 +128,7 @@ data.
128 128
129dirty_writeback_centisecs 129dirty_writeback_centisecs
130 130
131The pdflush writeback daemons will periodically wake up and write `old' data 131The kernel flusher threads will periodically wake up and write `old' data
132out to disk. This tunable expresses the interval between those wakeups, in 132out to disk. This tunable expresses the interval between those wakeups, in
133100'ths of a second. 133100'ths of a second.
134 134
diff --git a/MAINTAINERS b/MAINTAINERS
index 94b823f71e94..63ce3a38b332 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5329,14 +5329,15 @@ PIN CONTROL SUBSYSTEM
5329M: Linus Walleij <linus.walleij@linaro.org> 5329M: Linus Walleij <linus.walleij@linaro.org>
5330S: Maintained 5330S: Maintained
5331F: drivers/pinctrl/ 5331F: drivers/pinctrl/
5332F: include/linux/pinctrl/
5332 5333
5333PIN CONTROLLER - ST SPEAR 5334PIN CONTROLLER - ST SPEAR
5334M: Viresh Kumar <viresh.linux@gmail.com> 5335M: Viresh Kumar <viresh.linux@gmail.com>
5335L: spear-devel@list.st.com 5336L: spear-devel@list.st.com
5336L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5337L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5337W: http://www.st.com/spear 5338W: http://www.st.com/spear
5338S: Maintained 5339S: Maintained
5339F: driver/pinctrl/spear/ 5340F: drivers/pinctrl/spear/
5340 5341
5341PKTCDVD DRIVER 5342PKTCDVD DRIVER
5342M: Peter Osterlund <petero2@telia.com> 5343M: Peter Osterlund <petero2@telia.com>
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ada8f0fc71e4..fb96e607adcf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on);
52#ifdef CONFIG_MTD_UCLINUX 52#ifdef CONFIG_MTD_UCLINUX
53extern struct map_info uclinux_ram_map; 53extern struct map_info uclinux_ram_map;
54unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 54unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
55unsigned long _ebss;
56EXPORT_SYMBOL(memory_mtd_end); 55EXPORT_SYMBOL(memory_mtd_end);
57EXPORT_SYMBOL(memory_mtd_start); 56EXPORT_SYMBOL(memory_mtd_start);
58EXPORT_SYMBOL(mtd_size); 57EXPORT_SYMBOL(mtd_size);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6f38b6120d96..440578850ae5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
497 srat_num_cpus++; 497 srat_num_cpus++;
498} 498}
499 499
500void __init 500int __init
501acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 501acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
502{ 502{
503 unsigned long paddr, size; 503 unsigned long paddr, size;
@@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
512 512
513 /* Ignore disabled entries */ 513 /* Ignore disabled entries */
514 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) 514 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
515 return; 515 return -1;
516 516
517 /* record this node in proximity bitmap */ 517 /* record this node in proximity bitmap */
518 pxm_bit_set(pxm); 518 pxm_bit_set(pxm);
@@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
531 p->size = size; 531 p->size = size;
532 p->nid = pxm; 532 p->nid = pxm;
533 num_node_memblks++; 533 num_node_memblks++;
534 return 0;
534} 535}
535 536
536void __init acpi_numa_arch_fixup(void) 537void __init acpi_numa_arch_fixup(void)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0b0f8b8c4a26..4a469907f04a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -54,18 +54,6 @@ config ZONE_DMA
54 bool 54 bool
55 default y 55 default y
56 56
57config CPU_HAS_NO_BITFIELDS
58 bool
59
60config CPU_HAS_NO_MULDIV64
61 bool
62
63config CPU_HAS_ADDRESS_SPACES
64 bool
65
66config FPU
67 bool
68
69config HZ 57config HZ
70 int 58 int
71 default 1000 if CLEOPATRA 59 default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 43a9f8f1b8eb..82068349a2bb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -37,6 +37,7 @@ config M68000
37 bool 37 bool
38 select CPU_HAS_NO_BITFIELDS 38 select CPU_HAS_NO_BITFIELDS
39 select CPU_HAS_NO_MULDIV64 39 select CPU_HAS_NO_MULDIV64
40 select CPU_HAS_NO_UNALIGNED
40 select GENERIC_CSUM 41 select GENERIC_CSUM
41 help 42 help
42 The Freescale (was Motorola) 68000 CPU is the first generation of 43 The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -48,6 +49,7 @@ config M68000
48config MCPU32 49config MCPU32
49 bool 50 bool
50 select CPU_HAS_NO_BITFIELDS 51 select CPU_HAS_NO_BITFIELDS
52 select CPU_HAS_NO_UNALIGNED
51 help 53 help
52 The Freescale (was then Motorola) CPU32 is a CPU core that is 54 The Freescale (was then Motorola) CPU32 is a CPU core that is
53 based on the 68020 processor. For the most part it is used in 55 based on the 68020 processor. For the most part it is used in
@@ -376,6 +378,18 @@ config NODES_SHIFT
376 default "3" 378 default "3"
377 depends on !SINGLE_MEMORY_CHUNK 379 depends on !SINGLE_MEMORY_CHUNK
378 380
381config CPU_HAS_NO_BITFIELDS
382 bool
383
384config CPU_HAS_NO_MULDIV64
385 bool
386
387config CPU_HAS_NO_UNALIGNED
388 bool
389
390config CPU_HAS_ADDRESS_SPACES
391 bool
392
379config FPU 393config FPU
380 bool 394 bool
381 395
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 0a30406b9442..f5565d6eeb8e 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
177 177
178 timer_handler(irq, dev_id); 178 timer_handler(irq, dev_id);
179 179
180 x=*(volatile unsigned char *)(timer+3); 180 x = *(volatile unsigned char *)(apollo_timer + 3);
181 x=*(volatile unsigned char *)(timer+5); 181 x = *(volatile unsigned char *)(apollo_timer + 5);
182 182
183 return IRQ_HANDLED; 183 return IRQ_HANDLED;
184} 184}
@@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
186void dn_sched_init(irq_handler_t timer_routine) 186void dn_sched_init(irq_handler_t timer_routine)
187{ 187{
188 /* program timer 1 */ 188 /* program timer 1 */
189 *(volatile unsigned char *)(timer+3)=0x01; 189 *(volatile unsigned char *)(apollo_timer + 3) = 0x01;
190 *(volatile unsigned char *)(timer+1)=0x40; 190 *(volatile unsigned char *)(apollo_timer + 1) = 0x40;
191 *(volatile unsigned char *)(timer+5)=0x09; 191 *(volatile unsigned char *)(apollo_timer + 5) = 0x09;
192 *(volatile unsigned char *)(timer+7)=0xc4; 192 *(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
193 193
194 /* enable IRQ of PIC B */ 194 /* enable IRQ of PIC B */
195 *(volatile unsigned char *)(pica+1)&=(~8); 195 *(volatile unsigned char *)(pica+1)&=(~8);
196 196
197#if 0 197#if 0
198 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); 198 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
199 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); 199 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
200#endif 200#endif
201 201
202 if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) 202 if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eafa2539a8ee..a74e5d95c384 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,29 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2header-y += cachectl.h 2header-y += cachectl.h
3 3
4generic-y += bitsperlong.h
5generic-y += cputime.h
6generic-y += device.h
7generic-y += emergency-restart.h
8generic-y += errno.h
9generic-y += futex.h
10generic-y += ioctl.h
11generic-y += ipcbuf.h
12generic-y += irq_regs.h
13generic-y += kdebug.h
14generic-y += kmap_types.h
15generic-y += kvm_para.h
16generic-y += local64.h
17generic-y += local.h
18generic-y += mman.h
19generic-y += mutex.h
20generic-y += percpu.h
21generic-y += resource.h
22generic-y += scatterlist.h
23generic-y += sections.h
24generic-y += siginfo.h
25generic-y += statfs.h
26generic-y += topology.h
27generic-y += types.h
4generic-y += word-at-a-time.h 28generic-y += word-at-a-time.h
29generic-y += xor.h
diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h
deleted file mode 100644
index 6bb8f02685a2..000000000000
--- a/arch/m68k/include/asm/MC68332.h
+++ /dev/null
@@ -1,152 +0,0 @@
1
2/* include/asm-m68knommu/MC68332.h: '332 control registers
3 *
4 * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
5 *
6 */
7
8#ifndef _MC68332_H_
9#define _MC68332_H_
10
11#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
12#define WORD_REF(addr) (*((volatile unsigned short*)addr))
13
14#define PORTE_ADDR 0xfffa11
15#define PORTE BYTE_REF(PORTE_ADDR)
16#define DDRE_ADDR 0xfffa15
17#define DDRE BYTE_REF(DDRE_ADDR)
18#define PEPAR_ADDR 0xfffa17
19#define PEPAR BYTE_REF(PEPAR_ADDR)
20
21#define PORTF_ADDR 0xfffa19
22#define PORTF BYTE_REF(PORTF_ADDR)
23#define DDRF_ADDR 0xfffa1d
24#define DDRF BYTE_REF(DDRF_ADDR)
25#define PFPAR_ADDR 0xfffa1f
26#define PFPAR BYTE_REF(PFPAR_ADDR)
27
28#define PORTQS_ADDR 0xfffc15
29#define PORTQS BYTE_REF(PORTQS_ADDR)
30#define DDRQS_ADDR 0xfffc17
31#define DDRQS BYTE_REF(DDRQS_ADDR)
32#define PQSPAR_ADDR 0xfffc16
33#define PQSPAR BYTE_REF(PQSPAR_ADDR)
34
35#define CSPAR0_ADDR 0xFFFA44
36#define CSPAR0 WORD_REF(CSPAR0_ADDR)
37#define CSPAR1_ADDR 0xFFFA46
38#define CSPAR1 WORD_REF(CSPAR1_ADDR)
39#define CSARBT_ADDR 0xFFFA48
40#define CSARBT WORD_REF(CSARBT_ADDR)
41#define CSOPBT_ADDR 0xFFFA4A
42#define CSOPBT WORD_REF(CSOPBT_ADDR)
43#define CSBAR0_ADDR 0xFFFA4C
44#define CSBAR0 WORD_REF(CSBAR0_ADDR)
45#define CSOR0_ADDR 0xFFFA4E
46#define CSOR0 WORD_REF(CSOR0_ADDR)
47#define CSBAR1_ADDR 0xFFFA50
48#define CSBAR1 WORD_REF(CSBAR1_ADDR)
49#define CSOR1_ADDR 0xFFFA52
50#define CSOR1 WORD_REF(CSOR1_ADDR)
51#define CSBAR2_ADDR 0xFFFA54
52#define CSBAR2 WORD_REF(CSBAR2_ADDR)
53#define CSOR2_ADDR 0xFFFA56
54#define CSOR2 WORD_REF(CSOR2_ADDR)
55#define CSBAR3_ADDR 0xFFFA58
56#define CSBAR3 WORD_REF(CSBAR3_ADDR)
57#define CSOR3_ADDR 0xFFFA5A
58#define CSOR3 WORD_REF(CSOR3_ADDR)
59#define CSBAR4_ADDR 0xFFFA5C
60#define CSBAR4 WORD_REF(CSBAR4_ADDR)
61#define CSOR4_ADDR 0xFFFA5E
62#define CSOR4 WORD_REF(CSOR4_ADDR)
63#define CSBAR5_ADDR 0xFFFA60
64#define CSBAR5 WORD_REF(CSBAR5_ADDR)
65#define CSOR5_ADDR 0xFFFA62
66#define CSOR5 WORD_REF(CSOR5_ADDR)
67#define CSBAR6_ADDR 0xFFFA64
68#define CSBAR6 WORD_REF(CSBAR6_ADDR)
69#define CSOR6_ADDR 0xFFFA66
70#define CSOR6 WORD_REF(CSOR6_ADDR)
71#define CSBAR7_ADDR 0xFFFA68
72#define CSBAR7 WORD_REF(CSBAR7_ADDR)
73#define CSOR7_ADDR 0xFFFA6A
74#define CSOR7 WORD_REF(CSOR7_ADDR)
75#define CSBAR8_ADDR 0xFFFA6C
76#define CSBAR8 WORD_REF(CSBAR8_ADDR)
77#define CSOR8_ADDR 0xFFFA6E
78#define CSOR8 WORD_REF(CSOR8_ADDR)
79#define CSBAR9_ADDR 0xFFFA70
80#define CSBAR9 WORD_REF(CSBAR9_ADDR)
81#define CSOR9_ADDR 0xFFFA72
82#define CSOR9 WORD_REF(CSOR9_ADDR)
83#define CSBAR10_ADDR 0xFFFA74
84#define CSBAR10 WORD_REF(CSBAR10_ADDR)
85#define CSOR10_ADDR 0xFFFA76
86#define CSOR10 WORD_REF(CSOR10_ADDR)
87
88#define CSOR_MODE_ASYNC 0x0000
89#define CSOR_MODE_SYNC 0x8000
90#define CSOR_MODE_MASK 0x8000
91#define CSOR_BYTE_DISABLE 0x0000
92#define CSOR_BYTE_UPPER 0x4000
93#define CSOR_BYTE_LOWER 0x2000
94#define CSOR_BYTE_BOTH 0x6000
95#define CSOR_BYTE_MASK 0x6000
96#define CSOR_RW_RSVD 0x0000
97#define CSOR_RW_READ 0x0800
98#define CSOR_RW_WRITE 0x1000
99#define CSOR_RW_BOTH 0x1800
100#define CSOR_RW_MASK 0x1800
101#define CSOR_STROBE_DS 0x0400
102#define CSOR_STROBE_AS 0x0000
103#define CSOR_STROBE_MASK 0x0400
104#define CSOR_DSACK_WAIT(x) (wait << 6)
105#define CSOR_DSACK_FTERM (14 << 6)
106#define CSOR_DSACK_EXTERNAL (15 << 6)
107#define CSOR_DSACK_MASK 0x03c0
108#define CSOR_SPACE_CPU 0x0000
109#define CSOR_SPACE_USER 0x0010
110#define CSOR_SPACE_SU 0x0020
111#define CSOR_SPACE_BOTH 0x0030
112#define CSOR_SPACE_MASK 0x0030
113#define CSOR_IPL_ALL 0x0000
114#define CSOR_IPL_PRIORITY(x) (x << 1)
115#define CSOR_IPL_MASK 0x000e
116#define CSOR_AVEC_ON 0x0001
117#define CSOR_AVEC_OFF 0x0000
118#define CSOR_AVEC_MASK 0x0001
119
120#define CSBAR_ADDR(x) ((addr >> 11) << 3)
121#define CSBAR_ADDR_MASK 0xfff8
122#define CSBAR_BLKSIZE_2K 0x0000
123#define CSBAR_BLKSIZE_8K 0x0001
124#define CSBAR_BLKSIZE_16K 0x0002
125#define CSBAR_BLKSIZE_64K 0x0003
126#define CSBAR_BLKSIZE_128K 0x0004
127#define CSBAR_BLKSIZE_256K 0x0005
128#define CSBAR_BLKSIZE_512K 0x0006
129#define CSBAR_BLKSIZE_1M 0x0007
130#define CSBAR_BLKSIZE_MASK 0x0007
131
132#define CSPAR_DISC 0
133#define CSPAR_ALT 1
134#define CSPAR_CS8 2
135#define CSPAR_CS16 3
136#define CSPAR_MASK 3
137
138#define CSPAR0_CSBOOT(x) (x << 0)
139#define CSPAR0_CS0(x) (x << 2)
140#define CSPAR0_CS1(x) (x << 4)
141#define CSPAR0_CS2(x) (x << 6)
142#define CSPAR0_CS3(x) (x << 8)
143#define CSPAR0_CS4(x) (x << 10)
144#define CSPAR0_CS5(x) (x << 12)
145
146#define CSPAR1_CS6(x) (x << 0)
147#define CSPAR1_CS7(x) (x << 2)
148#define CSPAR1_CS8(x) (x << 4)
149#define CSPAR1_CS9(x) (x << 6)
150#define CSPAR1_CS10(x) (x << 8)
151
152#endif
diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h
deleted file mode 100644
index 954adc851adb..000000000000
--- a/arch/m68k/include/asm/apollodma.h
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
3 * Written by Hennus Bergman, 1992.
4 * High DMA channel support & info by Hannu Savolainen
5 * and John Boyd, Nov. 1992.
6 */
7
8#ifndef _ASM_APOLLO_DMA_H
9#define _ASM_APOLLO_DMA_H
10
11#include <asm/apollohw.h> /* need byte IO */
12#include <linux/spinlock.h> /* And spinlocks */
13#include <linux/delay.h>
14
15
16#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
17#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE)))
18
19/*
20 * NOTES about DMA transfers:
21 *
22 * controller 1: channels 0-3, byte operations, ports 00-1F
23 * controller 2: channels 4-7, word operations, ports C0-DF
24 *
25 * - ALL registers are 8 bits only, regardless of transfer size
26 * - channel 4 is not used - cascades 1 into 2.
27 * - channels 0-3 are byte - addresses/counts are for physical bytes
28 * - channels 5-7 are word - addresses/counts are for physical words
29 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
30 * - transfer count loaded to registers is 1 less than actual count
31 * - controller 2 offsets are all even (2x offsets for controller 1)
32 * - page registers for 5-7 don't use data bit 0, represent 128K pages
33 * - page registers for 0-3 use bit 0, represent 64K pages
34 *
35 * DMA transfers are limited to the lower 16MB of _physical_ memory.
36 * Note that addresses loaded into registers must be _physical_ addresses,
37 * not logical addresses (which may differ if paging is active).
38 *
39 * Address mapping for channels 0-3:
40 *
41 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
42 * | ... | | ... | | ... |
43 * | ... | | ... | | ... |
44 * | ... | | ... | | ... |
45 * P7 ... P0 A7 ... A0 A7 ... A0
46 * | Page | Addr MSB | Addr LSB | (DMA registers)
47 *
48 * Address mapping for channels 5-7:
49 *
50 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
51 * | ... | \ \ ... \ \ \ ... \ \
52 * | ... | \ \ ... \ \ \ ... \ (not used)
53 * | ... | \ \ ... \ \ \ ... \
54 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
55 * | Page | Addr MSB | Addr LSB | (DMA registers)
56 *
57 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
58 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
59 * the hardware level, so odd-byte transfers aren't possible).
60 *
61 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
62 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
63 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
64 *
65 */
66
67#define MAX_DMA_CHANNELS 8
68
69/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
70
71/* 8237 DMA controllers */
72#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */
73#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */
74
75/* DMA controller registers */
76#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */
77#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */
78#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */
79#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
80#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */
81#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
82#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
83#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */
84#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */
85#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
86
87#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */
88#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */
89#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */
90#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */
91#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */
92#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
93#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
94#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */
95#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */
96#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
97
98#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */
99#define DMA_ADDR_1 (IO_DMA1_BASE+0x02)
100#define DMA_ADDR_2 (IO_DMA1_BASE+0x04)
101#define DMA_ADDR_3 (IO_DMA1_BASE+0x06)
102#define DMA_ADDR_4 (IO_DMA2_BASE+0x00)
103#define DMA_ADDR_5 (IO_DMA2_BASE+0x04)
104#define DMA_ADDR_6 (IO_DMA2_BASE+0x08)
105#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C)
106
107#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */
108#define DMA_CNT_1 (IO_DMA1_BASE+0x03)
109#define DMA_CNT_2 (IO_DMA1_BASE+0x05)
110#define DMA_CNT_3 (IO_DMA1_BASE+0x07)
111#define DMA_CNT_4 (IO_DMA2_BASE+0x02)
112#define DMA_CNT_5 (IO_DMA2_BASE+0x06)
113#define DMA_CNT_6 (IO_DMA2_BASE+0x0A)
114#define DMA_CNT_7 (IO_DMA2_BASE+0x0E)
115
116#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
117#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
118#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
119
120#define DMA_AUTOINIT 0x10
121
122#define DMA_8BIT 0
123#define DMA_16BIT 1
124#define DMA_BUSMASTER 2
125
126extern spinlock_t dma_spin_lock;
127
128static __inline__ unsigned long claim_dma_lock(void)
129{
130 unsigned long flags;
131 spin_lock_irqsave(&dma_spin_lock, flags);
132 return flags;
133}
134
135static __inline__ void release_dma_lock(unsigned long flags)
136{
137 spin_unlock_irqrestore(&dma_spin_lock, flags);
138}
139
140/* enable/disable a specific DMA channel */
141static __inline__ void enable_dma(unsigned int dmanr)
142{
143 if (dmanr<=3)
144 dma_outb(dmanr, DMA1_MASK_REG);
145 else
146 dma_outb(dmanr & 3, DMA2_MASK_REG);
147}
148
149static __inline__ void disable_dma(unsigned int dmanr)
150{
151 if (dmanr<=3)
152 dma_outb(dmanr | 4, DMA1_MASK_REG);
153 else
154 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
155}
156
157/* Clear the 'DMA Pointer Flip Flop'.
158 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
159 * Use this once to initialize the FF to a known state.
160 * After that, keep track of it. :-)
161 * --- In order to do that, the DMA routines below should ---
162 * --- only be used while holding the DMA lock ! ---
163 */
164static __inline__ void clear_dma_ff(unsigned int dmanr)
165{
166 if (dmanr<=3)
167 dma_outb(0, DMA1_CLEAR_FF_REG);
168 else
169 dma_outb(0, DMA2_CLEAR_FF_REG);
170}
171
172/* set mode (above) for a specific DMA channel */
173static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
174{
175 if (dmanr<=3)
176 dma_outb(mode | dmanr, DMA1_MODE_REG);
177 else
178 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
179}
180
181/* Set transfer address & page bits for specific DMA channel.
182 * Assumes dma flipflop is clear.
183 */
184static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
185{
186 if (dmanr <= 3) {
187 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
188 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
189 } else {
190 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
191 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
192 }
193}
194
195
196/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
197 * a specific DMA channel.
198 * You must ensure the parameters are valid.
199 * NOTE: from a manual: "the number of transfers is one more
200 * than the initial word count"! This is taken into account.
201 * Assumes dma flip-flop is clear.
202 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
203 */
204static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
205{
206 count--;
207 if (dmanr <= 3) {
208 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
209 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
210 } else {
211 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
212 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
213 }
214}
215
216
217/* Get DMA residue count. After a DMA transfer, this
218 * should return zero. Reading this while a DMA transfer is
219 * still in progress will return unpredictable results.
220 * If called before the channel has been used, it may return 1.
221 * Otherwise, it returns the number of _bytes_ left to transfer.
222 *
223 * Assumes DMA flip-flop is clear.
224 */
225static __inline__ int get_dma_residue(unsigned int dmanr)
226{
227 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
228 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
229
230 /* using short to get 16-bit wrap around */
231 unsigned short count;
232
233 count = 1 + dma_inb(io_port);
234 count += dma_inb(io_port) << 8;
235
236 return (dmanr<=3)? count : (count<<1);
237}
238
239
240/* These are in kernel/dma.c: */
241extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
242extern void free_dma(unsigned int dmanr); /* release it again */
243
244/* These are in arch/m68k/apollo/dma.c: */
245extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
246extern void dma_unmap_page(unsigned short dma_addr);
247
248#endif /* _ASM_APOLLO_DMA_H */
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index a1373b9aa281..635ef4f89010 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@ extern u_long timer_physaddr;
98#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr)) 98#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
99#define pica (IO_BASE + pica_physaddr) 99#define pica (IO_BASE + pica_physaddr)
100#define picb (IO_BASE + picb_physaddr) 100#define picb (IO_BASE + picb_physaddr)
101#define timer (IO_BASE + timer_physaddr) 101#define apollo_timer (IO_BASE + timer_physaddr)
102#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000)) 102#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
103 103
104#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE) 104#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/m68k/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h
deleted file mode 100644
index c79c5e892305..000000000000
--- a/arch/m68k/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __M68K_CPUTIME_H
2#define __M68K_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __M68K_CPUTIME_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 9c09becfd4c9..12d8fe4f1d30 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops)
43extern void __bad_udelay(void); 43extern void __bad_udelay(void);
44 44
45 45
46#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 46#ifdef CONFIG_CPU_HAS_NO_MULDIV64
47/* 47/*
48 * The simpler m68k and ColdFire processors do not have a 32*32->64 48 * The simpler m68k and ColdFire processors do not have a 32*32->64
49 * multiply instruction. So we need to handle them a little differently. 49 * multiply instruction. So we need to handle them a little differently.
diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/m68k/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/m68k/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h
deleted file mode 100644
index 0d4e188d6ef6..000000000000
--- a/arch/m68k/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_ERRNO_H
2#define _M68K_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif /* _M68K_ERRNO_H */
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/m68k/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/m68k/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/m68k/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/m68k/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kdebug.h>
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
deleted file mode 100644
index 3413cc1390ec..000000000000
--- a/arch/m68k/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_M68K_KMAP_TYPES_H
2#define __ASM_M68K_KMAP_TYPES_H
3
4#include <asm-generic/kmap_types.h>
5
6#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/m68k/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h
deleted file mode 100644
index 6c259263e1f0..000000000000
--- a/arch/m68k/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_M68K_LOCAL_H
2#define _ASM_M68K_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* _ASM_M68K_LOCAL_H */
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/m68k/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h
deleted file mode 100644
index 39a5c292eaee..000000000000
--- a/arch/m68k/include/asm/mac_mouse.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef _ASM_MAC_MOUSE_H
2#define _ASM_MAC_MOUSE_H
3
4/*
5 * linux/include/asm-m68k/mac_mouse.h
6 * header file for Macintosh ADB mouse driver
7 * 27-10-97 Michael Schmitz
8 * copied from:
9 * header file for Atari Mouse driver
10 * by Robert de Vries (robert@and.nl) on 19Jul93
11 */
12
13struct mouse_status {
14 char buttons;
15 short dx;
16 short dy;
17 int ready;
18 int active;
19 wait_queue_head_t wait;
20 struct fasync_struct *fasyncptr;
21};
22
23#endif
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c47a2c..000000000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/****************************************************************************/
2
3/*
4 * mcfmbus.h -- Coldfire MBUS support defines.
5 *
6 * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
7 */
8
9/****************************************************************************/
10
11
12#ifndef mcfmbus_h
13#define mcfmbus_h
14
15
16#define MCFMBUS_BASE 0x280
17#define MCFMBUS_IRQ_VECTOR 0x19
18#define MCFMBUS_IRQ 0x1
19#define MCFMBUS_CLK 0x3f
20#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
21#define MCFMBUS_ADDRESS 0x01
22
23
24/*
25* Define the 5307 MBUS register set addresses
26*/
27
28#define MCFMBUS_MADR 0x00
29#define MCFMBUS_MFDR 0x04
30#define MCFMBUS_MBCR 0x08
31#define MCFMBUS_MBSR 0x0C
32#define MCFMBUS_MBDR 0x10
33
34
35#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
36
37#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
38
39/*
40* Define bit flags in Control Register
41*/
42
43#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
44#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
45#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
46#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
47#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
48#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
49
50/*
51* Define bit flags in Status Register
52*/
53
54#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
55#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
56#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
57#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
58#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
59#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
60#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
61
62/*
63* Define bit flags in DATA I/O Register
64*/
65
66#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
67
68#define MBUSIOCSCLOCK 1
69#define MBUSIOCGCLOCK 2
70#define MBUSIOCSADDR 3
71#define MBUSIOCGADDR 4
72#define MBUSIOCSSLADDR 5
73#define MBUSIOCGSLADDR 6
74#define MBUSIOCSSUBADDR 7
75#define MBUSIOCGSUBADDR 8
76
77#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/m68k/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m68k/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h
deleted file mode 100644
index 0859d048faf5..000000000000
--- a/arch/m68k/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_M68K_PERCPU_H
2#define __ASM_M68K_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ASM_M68K_PERCPU_H */
diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h
deleted file mode 100644
index e7d35019f337..000000000000
--- a/arch/m68k/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_RESOURCE_H
2#define _M68K_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif /* _M68K_RESOURCE_H */
diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h
deleted file mode 100644
index bfe3ba147f2e..000000000000
--- a/arch/m68k/include/asm/sbus.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * some sbus structures and macros to make usage of sbus drivers possible
3 */
4
5#ifndef __M68K_SBUS_H
6#define __M68K_SBUS_H
7
8struct sbus_dev {
9 struct {
10 unsigned int which_io;
11 unsigned int phys_addr;
12 } reg_addrs[1];
13};
14
15/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
16/* No SBUS on the Sun3, kludge -- sam */
17
18static inline void _sbus_writeb(unsigned char val, unsigned long addr)
19{
20 *(volatile unsigned char *)addr = val;
21}
22
23static inline unsigned char _sbus_readb(unsigned long addr)
24{
25 return *(volatile unsigned char *)addr;
26}
27
28static inline void _sbus_writel(unsigned long val, unsigned long addr)
29{
30 *(volatile unsigned long *)addr = val;
31
32}
33
34extern inline unsigned long _sbus_readl(unsigned long addr)
35{
36 return *(volatile unsigned long *)addr;
37}
38
39
40#define sbus_readb(a) _sbus_readb((unsigned long)a)
41#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
42#define sbus_readl(a) _sbus_readl((unsigned long)a)
43#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
44
45#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
deleted file mode 100644
index 312505452a1e..000000000000
--- a/arch/m68k/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_SCATTERLIST_H
2#define _M68K_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
deleted file mode 100644
index 5277e52715ec..000000000000
--- a/arch/m68k/include/asm/sections.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _ASM_M68K_SECTIONS_H
2#define _ASM_M68K_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6extern char _sbss[], _ebss[];
7
8#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h
deleted file mode 100644
index fa56ec84a126..000000000000
--- a/arch/m68k/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _M68K_SHM_H
2#define _M68K_SHM_H
3
4
5/* format of page table entries that correspond to shared memory pages
6 currently out in swap space (see also mm/swap.c):
7 bits 0-1 (PAGE_PRESENT) is = 0
8 bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
9 bits 31..9 are used like this:
10 bits 15..9 (SHM_ID) the id of the shared memory segment
11 bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
12 (actually only bits 25..16 get used since SHMMAX is so low)
13 bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
14*/
15/* on the m68k both bits 0 and 1 must be zero */
16/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
17 others are reduced by 2. --m */
18
19#ifndef CONFIG_SUN3
20#define SHM_ID_SHIFT 9
21#else
22#define SHM_ID_SHIFT 7
23#endif
24#define _SHM_ID_BITS 7
25#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
26
27#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
28#define _SHM_IDX_BITS 15
29#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
30
31#endif /* _M68K_SHM_H */
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h
deleted file mode 100644
index 851d3d784b53..000000000000
--- a/arch/m68k/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_SIGINFO_H
2#define _M68K_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif
diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h
deleted file mode 100644
index 08d93f14e061..000000000000
--- a/arch/m68k/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_STATFS_H
2#define _M68K_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif /* _M68K_STATFS_H */
diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h
deleted file mode 100644
index ca173e9f26ff..000000000000
--- a/arch/m68k/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_M68K_TOPOLOGY_H
2#define _ASM_M68K_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
deleted file mode 100644
index 89705adcbd52..000000000000
--- a/arch/m68k/include/asm/types.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _M68K_TYPES_H
2#define _M68K_TYPES_H
3
4/*
5 * This file is never included by application software unless
6 * explicitly requested (e.g., via linux/types.h) in which case the
7 * application is Linux specific so (user-) name space pollution is
8 * not a major issue. However, for interoperability, libraries still
9 * need to be careful to avoid a name clashes.
10 */
11#include <asm-generic/int-ll64.h>
12
13/*
14 * These aren't exported outside the kernel to avoid name space clashes
15 */
16#ifdef __KERNEL__
17
18#define BITS_PER_LONG 32
19
20#endif /* __KERNEL__ */
21
22#endif /* _M68K_TYPES_H */
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index f4043ae63db1..2b3ca0bf7a0d 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
2#define _ASM_M68K_UNALIGNED_H 2#define _ASM_M68K_UNALIGNED_H
3 3
4 4
5#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000) 5#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
6#include <linux/unaligned/be_struct.h> 6#include <linux/unaligned/be_struct.h>
7#include <linux/unaligned/le_byteshift.h> 7#include <linux/unaligned/le_byteshift.h>
8#include <linux/unaligned/generic.h> 8#include <linux/unaligned/generic.h>
@@ -12,7 +12,7 @@
12 12
13#else 13#else
14/* 14/*
15 * The m68k can do unaligned accesses itself. 15 * The m68k can do unaligned accesses itself.
16 */ 16 */
17#include <linux/unaligned/access_ok.h> 17#include <linux/unaligned/access_ok.h>
18#include <linux/unaligned/generic.h> 18#include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/m68k/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/xor.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 7dc186b7a85f..71fb29938dba 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p)
218 printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); 218 printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
219#endif 219#endif
220 220
221 pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " 221 pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
222 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, 222 _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
223 (int) &_sdata, (int) &_edata, 223 pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
224 (int) &_sbss, (int) &_ebss); 224 __bss_stop, memory_start, memory_start, memory_end);
225 pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
226 (int) &_ebss, (int) memory_start,
227 (int) memory_start, (int) memory_end);
228 225
229 /* Keep a copy of command line */ 226 /* Keep a copy of command line */
230 *cmdline_p = &command_line[0]; 227 *cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8dc16f8..9a5932ec3689 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
479 goto bad_access; 479 goto bad_access;
480 } 480 }
481 481
482 mem_value = *mem; 482 /*
483 * No need to check for EFAULT; we know that the page is
484 * present and writable.
485 */
486 __get_user(mem_value, mem);
483 if (mem_value == oldval) 487 if (mem_value == oldval)
484 *mem = newval; 488 __put_user(newval, mem);
485 489
486 pte_unmap_unlock(pte, ptl); 490 pte_unmap_unlock(pte, ptl);
487 up_read(&mm->mmap_sem); 491 up_read(&mm->mmap_sem);
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 40e02d9c38b4..06a763f49fd3 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,9 +78,7 @@ SECTIONS {
78 __init_end = .; 78 __init_end = .;
79 } 79 }
80 80
81 _sbss = .;
82 BSS_SECTION(0, 0, 0) 81 BSS_SECTION(0, 0, 0)
83 _ebss = .;
84 82
85 _end = .; 83 _end = .;
86 84
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 63407c836826..d0993594f558 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,7 @@ SECTIONS
31 31
32 RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) 32 RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
33 33
34 _sbss = .;
35 BSS_SECTION(0, 0, 0) 34 BSS_SECTION(0, 0, 0)
36 _ebss = .;
37 35
38 _edata = .; /* End of data section */ 36 _edata = .; /* End of data section */
39 37
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index ad0f46d64c0b..8080469ee6c1 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,9 +44,7 @@ __init_begin = .;
44 . = ALIGN(PAGE_SIZE); 44 . = ALIGN(PAGE_SIZE);
45 __init_end = .; 45 __init_end = .;
46 46
47 _sbss = .;
48 BSS_SECTION(0, 0, 0) 47 BSS_SECTION(0, 0, 0)
49 _ebss = .;
50 48
51 _end = . ; 49 _end = . ;
52 50
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 79e928a525d0..ee5f0b1b5c5d 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330, 19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */ 20Boston, MA 02111-1307, USA. */
21 21
22#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) 22#ifdef CONFIG_CPU_HAS_NO_MULDIV64
23 23
24#define SI_TYPE_SIZE 32 24#define SI_TYPE_SIZE 32
25#define __BITS4 (SI_TYPE_SIZE / 4) 25#define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index f77f258dce3a..282f9de68966 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@ void __init print_memmap(void)
104 MLK_ROUNDUP(__init_begin, __init_end), 104 MLK_ROUNDUP(__init_begin, __init_end),
105 MLK_ROUNDUP(_stext, _etext), 105 MLK_ROUNDUP(_stext, _etext),
106 MLK_ROUNDUP(_sdata, _edata), 106 MLK_ROUNDUP(_sdata, _edata),
107 MLK_ROUNDUP(_sbss, _ebss)); 107 MLK_ROUNDUP(__bss_start, __bss_stop));
108} 108}
109 109
110void __init mem_init(void) 110void __init mem_init(void)
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 345ec0d83e3d..688e3664aea0 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@ void __init mem_init(void)
91 totalram_pages = free_all_bootmem(); 91 totalram_pages = free_all_bootmem();
92 92
93 codek = (_etext - _stext) >> 10; 93 codek = (_etext - _stext) >> 10;
94 datak = (_ebss - _sdata) >> 10; 94 datak = (__bss_stop - _sdata) >> 10;
95 initk = (__init_begin - __init_end) >> 10; 95 initk = (__init_begin - __init_end) >> 10;
96 96
97 tmp = nr_free_pages() << PAGE_SHIFT; 97 tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
index f632fdcb93e9..537d3245b539 100644
--- a/arch/m68k/platform/68328/head-de2.S
+++ b/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@ _start:
60 * Move ROM filesystem above bss :-) 60 * Move ROM filesystem above bss :-)
61 */ 61 */
62 62
63 moveal #_sbss, %a0 /* romfs at the start of bss */ 63 moveal #__bss_start, %a0 /* romfs at the start of bss */
64 moveal #_ebss, %a1 /* Set up destination */ 64 moveal #__bss_stop, %a1 /* Set up destination */
65 movel %a0, %a2 /* Copy of bss start */ 65 movel %a0, %a2 /* Copy of bss start */
66 66
67 movel 8(%a0), %d1 /* Get size of ROMFS */ 67 movel 8(%a0), %d1 /* Get size of ROMFS */
@@ -84,8 +84,8 @@ _start:
84 * Initialize BSS segment to 0 84 * Initialize BSS segment to 0
85 */ 85 */
86 86
87 lea _sbss, %a0 87 lea __bss_start, %a0
88 lea _ebss, %a1 88 lea __bss_stop, %a1
89 89
90 /* Copy 0 to %a0 until %a0 == %a1 */ 90 /* Copy 0 to %a0 until %a0 == %a1 */
912: cmpal %a0, %a1 912: cmpal %a0, %a1
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index 2ebfd6420818..45a9dad29e3d 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@ L0:
110 movel #CONFIG_VECTORBASE, %d7 110 movel #CONFIG_VECTORBASE, %d7
111 addl #16, %d7 111 addl #16, %d7
112 moveal %d7, %a0 112 moveal %d7, %a0
113 moveal #_ebss, %a1 113 moveal #__bss_stop, %a1
114 lea %a1@(512), %a2 114 lea %a1@(512), %a2
115 115
116 DBG_PUTC('C') 116 DBG_PUTC('C')
@@ -138,8 +138,8 @@ LD1:
138 138
139 DBG_PUTC('E') 139 DBG_PUTC('E')
140 140
141 moveal #_sbss, %a0 141 moveal #__bss_start, %a0
142 moveal #_ebss, %a1 142 moveal #__bss_stop, %a1
143 143
144 /* Copy 0 to %a0 until %a0 == %a1 */ 144 /* Copy 0 to %a0 until %a0 == %a1 */
145L1: 145L1:
@@ -150,7 +150,7 @@ L1:
150 DBG_PUTC('F') 150 DBG_PUTC('F')
151 151
152 /* Copy command line from end of bss to command line */ 152 /* Copy command line from end of bss to command line */
153 moveal #_ebss, %a0 153 moveal #__bss_stop, %a0
154 moveal #command_line, %a1 154 moveal #command_line, %a1
155 lea %a1@(512), %a2 155 lea %a1@(512), %a2
156 156
@@ -165,7 +165,7 @@ L3:
165 165
166 movel #_sdata, %d0 166 movel #_sdata, %d0
167 movel %d0, _rambase 167 movel %d0, _rambase
168 movel #_ebss, %d0 168 movel #__bss_stop, %d0
169 movel %d0, _ramstart 169 movel %d0, _ramstart
170 170
171 movel %a4, %d0 171 movel %a4, %d0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
index 7f1aeeacb219..5189ef926098 100644
--- a/arch/m68k/platform/68328/head-ram.S
+++ b/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@ pclp3:
76 beq pclp3 76 beq pclp3
77#endif /* DEBUG */ 77#endif /* DEBUG */
78 moveal #0x007ffff0, %ssp 78 moveal #0x007ffff0, %ssp
79 moveal #_sbss, %a0 79 moveal #__bss_start, %a0
80 moveal #_ebss, %a1 80 moveal #__bss_stop, %a1
81 81
82 /* Copy 0 to %a0 until %a0 >= %a1 */ 82 /* Copy 0 to %a0 until %a0 >= %a1 */
83L1: 83L1:
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index a5ff96d0295f..3dff98ba2e97 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr
59 cmpal %a1, %a2 59 cmpal %a1, %a2
60 bhi 1b 60 bhi 1b
61 61
62 moveal #_sbss, %a0 62 moveal #__bss_start, %a0
63 moveal #_ebss, %a1 63 moveal #__bss_stop, %a1
64 /* Copy 0 to %a0 until %a0 == %a1 */ 64 /* Copy 0 to %a0 until %a0 == %a1 */
65 65
661: 661:
@@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr
70 70
71 movel #_sdata, %d0 71 movel #_sdata, %d0
72 movel %d0, _rambase 72 movel %d0, _rambase
73 movel #_ebss, %d0 73 movel #__bss_stop, %d0
74 movel %d0, _ramstart 74 movel %d0, _ramstart
75 movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0 75 movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
76 movel %d0, _ramend 76 movel %d0, _ramend
diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S
index 8eb94fb6b971..acd213170d80 100644
--- a/arch/m68k/platform/68360/head-ram.S
+++ b/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@ LD1:
219 cmp.l #_edata, %a1 219 cmp.l #_edata, %a1
220 blt LD1 220 blt LD1
221 221
222 moveal #_sbss, %a0 222 moveal #__bss_start, %a0
223 moveal #_ebss, %a1 223 moveal #__bss_stop, %a1
224 224
225 /* Copy 0 to %a0 until %a0 == %a1 */ 225 /* Copy 0 to %a0 until %a0 == %a1 */
226L1: 226L1:
@@ -234,7 +234,7 @@ load_quicc:
234store_ram_size: 234store_ram_size:
235 /* Set ram size information */ 235 /* Set ram size information */
236 move.l #_sdata, _rambase 236 move.l #_sdata, _rambase
237 move.l #_ebss, _ramstart 237 move.l #__bss_stop, _ramstart
238 move.l #RAMEND, %d0 238 move.l #RAMEND, %d0
239 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 239 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
240 move.l %d0, _ramend /* Different from RAMEND.*/ 240 move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S
index 97510e55b802..dfc756d99886 100644
--- a/arch/m68k/platform/68360/head-rom.S
+++ b/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
13 */ 13 */
14 14
15.global _stext 15.global _stext
16.global _sbss 16.global __bss_start
17.global _start 17.global _start
18 18
19.global _rambase 19.global _rambase
@@ -229,8 +229,8 @@ LD1:
229 cmp.l #_edata, %a1 229 cmp.l #_edata, %a1
230 blt LD1 230 blt LD1
231 231
232 moveal #_sbss, %a0 232 moveal #__bss_start, %a0
233 moveal #_ebss, %a1 233 moveal #__bss_stop, %a1
234 234
235 /* Copy 0 to %a0 until %a0 == %a1 */ 235 /* Copy 0 to %a0 until %a0 == %a1 */
236L1: 236L1:
@@ -244,7 +244,7 @@ load_quicc:
244store_ram_size: 244store_ram_size:
245 /* Set ram size information */ 245 /* Set ram size information */
246 move.l #_sdata, _rambase 246 move.l #_sdata, _rambase
247 move.l #_ebss, _ramstart 247 move.l #__bss_stop, _ramstart
248 move.l #RAMEND, %d0 248 move.l #RAMEND, %d0
249 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ 249 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
250 move.l %d0, _ramend /* Different from RAMEND.*/ 250 move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 4e0c9eb3bd1f..b88f5716f357 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@ _vstart:
230 /* 230 /*
231 * Move ROM filesystem above bss :-) 231 * Move ROM filesystem above bss :-)
232 */ 232 */
233 lea _sbss,%a0 /* get start of bss */ 233 lea __bss_start,%a0 /* get start of bss */
234 lea _ebss,%a1 /* set up destination */ 234 lea __bss_stop,%a1 /* set up destination */
235 movel %a0,%a2 /* copy of bss start */ 235 movel %a0,%a2 /* copy of bss start */
236 236
237 movel 8(%a0),%d0 /* get size of ROMFS */ 237 movel 8(%a0),%d0 /* get size of ROMFS */
@@ -249,7 +249,7 @@ _copy_romfs:
249 bne _copy_romfs 249 bne _copy_romfs
250 250
251#else /* CONFIG_ROMFS_FS */ 251#else /* CONFIG_ROMFS_FS */
252 lea _ebss,%a1 252 lea __bss_stop,%a1
253 movel %a1,_ramstart 253 movel %a1,_ramstart
254#endif /* CONFIG_ROMFS_FS */ 254#endif /* CONFIG_ROMFS_FS */
255 255
@@ -257,8 +257,8 @@ _copy_romfs:
257 /* 257 /*
258 * Zero out the bss region. 258 * Zero out the bss region.
259 */ 259 */
260 lea _sbss,%a0 /* get start of bss */ 260 lea __bss_start,%a0 /* get start of bss */
261 lea _ebss,%a1 /* get end of bss */ 261 lea __bss_stop,%a1 /* get end of bss */
262 clrl %d0 /* set value */ 262 clrl %d0 /* set value */
263_clear_bss: 263_clear_bss:
264 movel %d0,(%a0)+ /* clear each word */ 264 movel %d0,(%a0)+ /* clear each word */
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
index d8e6349336b4..eeba067d565f 100644
--- a/arch/m68k/sun3/prom/init.c
+++ b/arch/m68k/sun3/prom/init.c
@@ -22,57 +22,13 @@ int prom_root_node;
22struct linux_nodeops *prom_nodeops; 22struct linux_nodeops *prom_nodeops;
23 23
24/* You must call prom_init() before you attempt to use any of the 24/* You must call prom_init() before you attempt to use any of the
25 * routines in the prom library. It returns 0 on success, 1 on 25 * routines in the prom library.
26 * failure. It gets passed the pointer to the PROM vector. 26 * It gets passed the pointer to the PROM vector.
27 */ 27 */
28 28
29extern void prom_meminit(void);
30extern void prom_ranges_init(void);
31
32void __init prom_init(struct linux_romvec *rp) 29void __init prom_init(struct linux_romvec *rp)
33{ 30{
34 romvec = rp; 31 romvec = rp;
35#ifndef CONFIG_SUN3
36 switch(romvec->pv_romvers) {
37 case 0:
38 prom_vers = PROM_V0;
39 break;
40 case 2:
41 prom_vers = PROM_V2;
42 break;
43 case 3:
44 prom_vers = PROM_V3;
45 break;
46 case 4:
47 prom_vers = PROM_P1275;
48 prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
49 prom_halt();
50 break;
51 default:
52 prom_printf("PROMLIB: Bad PROM version %d\n",
53 romvec->pv_romvers);
54 prom_halt();
55 break;
56 };
57
58 prom_rev = romvec->pv_plugin_revision;
59 prom_prev = romvec->pv_printrev;
60 prom_nodeops = romvec->pv_nodeops;
61
62 prom_root_node = prom_getsibling(0);
63 if((prom_root_node == 0) || (prom_root_node == -1))
64 prom_halt();
65
66 if((((unsigned long) prom_nodeops) == 0) ||
67 (((unsigned long) prom_nodeops) == -1))
68 prom_halt();
69
70 prom_meminit();
71
72 prom_ranges_init();
73#endif
74// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
75// romvec->pv_romvers, prom_rev);
76 32
77 /* Initialization successful. */ 33 /* Initialization successful. */
78 return; 34 return;
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 4487e150b455..c07ed5d2a820 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[];
18extern unsigned long __ivt_start[], __ivt_end[]; 18extern unsigned long __ivt_start[], __ivt_end[];
19extern char _etext[], _stext[]; 19extern char _etext[], _stext[];
20 20
21# ifdef CONFIG_MTD_UCLINUX
22extern char *_ebss;
23# endif
24
25extern u32 _fdt_start[], _fdt_end[]; 21extern u32 _fdt_start[], _fdt_end[];
26 22
27# endif /* !__ASSEMBLY__ */ 23# endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bb4907c828dc..2b25bcf05c00 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,9 +21,6 @@
21#include <linux/ftrace.h> 21#include <linux/ftrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23 23
24extern char *_ebss;
25EXPORT_SYMBOL_GPL(_ebss);
26
27#ifdef CONFIG_FUNCTION_TRACER 24#ifdef CONFIG_FUNCTION_TRACER
28extern void _mcount(void); 25extern void _mcount(void);
29EXPORT_SYMBOL(_mcount); 26EXPORT_SYMBOL(_mcount);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 16d8dfd9094b..4da971d4392f 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
121 121
122 /* Move ROMFS out of BSS before clearing it */ 122 /* Move ROMFS out of BSS before clearing it */
123 if (romfs_size > 0) { 123 if (romfs_size > 0) {
124 memmove(&_ebss, (int *)romfs_base, romfs_size); 124 memmove(&__bss_stop, (int *)romfs_base, romfs_size);
125 klimit += romfs_size; 125 klimit += romfs_size;
126 } 126 }
127#endif 127#endif
@@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
165 BUG_ON(romfs_size < 0); /* What else can we do? */ 165 BUG_ON(romfs_size < 0); /* What else can we do? */
166 166
167 printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 167 printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
168 romfs_size, romfs_base, (unsigned)&_ebss); 168 romfs_size, romfs_base, (unsigned)&__bss_stop);
169 169
170 printk("New klimit: 0x%08x\n", (unsigned)klimit); 170 printk("New klimit: 0x%08x\n", (unsigned)klimit);
171#endif 171#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 109e9d86ade4..936d01a689d7 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,7 +131,6 @@ SECTIONS {
131 *(COMMON) 131 *(COMMON)
132 . = ALIGN (4) ; 132 . = ALIGN (4) ;
133 __bss_stop = . ; 133 __bss_stop = . ;
134 _ebss = . ;
135 } 134 }
136 . = ALIGN(PAGE_SIZE); 135 . = ALIGN(PAGE_SIZE);
137 _end = .; 136 _end = .;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 4a5350037c8f..1b6199740e98 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -6,7 +6,6 @@
6extern long __nosave_begin, __nosave_end; 6extern long __nosave_begin, __nosave_end;
7extern long __machvec_start, __machvec_end; 7extern long __machvec_start, __machvec_end;
8extern char __uncached_start, __uncached_end; 8extern char __uncached_start, __uncached_end;
9extern char _ebss[];
10extern char __start_eh_frame[], __stop_eh_frame[]; 9extern char __start_eh_frame[], __stop_eh_frame[];
11 10
12#endif /* __ASM_SH_SECTIONS_H */ 11#endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7b57bf1dc855..ebe7a7d97215 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p)
273 data_resource.start = virt_to_phys(_etext); 273 data_resource.start = virt_to_phys(_etext);
274 data_resource.end = virt_to_phys(_edata)-1; 274 data_resource.end = virt_to_phys(_edata)-1;
275 bss_resource.start = virt_to_phys(__bss_start); 275 bss_resource.start = virt_to_phys(__bss_start);
276 bss_resource.end = virt_to_phys(_ebss)-1; 276 bss_resource.end = virt_to_phys(__bss_stop)-1;
277 277
278#ifdef CONFIG_CMDLINE_OVERWRITE 278#ifdef CONFIG_CMDLINE_OVERWRITE
279 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); 279 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 3896f26efa4a..2a0a596ebf67 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial);
19EXPORT_SYMBOL(csum_partial_copy_generic); 19EXPORT_SYMBOL(csum_partial_copy_generic);
20EXPORT_SYMBOL(copy_page); 20EXPORT_SYMBOL(copy_page);
21EXPORT_SYMBOL(__clear_user); 21EXPORT_SYMBOL(__clear_user);
22EXPORT_SYMBOL(_ebss);
23EXPORT_SYMBOL(empty_zero_page); 22EXPORT_SYMBOL(empty_zero_page);
24 23
25#define DECLARE_EXPORT(name) \ 24#define DECLARE_EXPORT(name) \
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c98905f71e28..db88cbf9eafd 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -78,7 +78,6 @@ SECTIONS
78 . = ALIGN(PAGE_SIZE); 78 . = ALIGN(PAGE_SIZE);
79 __init_end = .; 79 __init_end = .;
80 BSS_SECTION(0, PAGE_SIZE, 4) 80 BSS_SECTION(0, PAGE_SIZE, 4)
81 _ebss = .; /* uClinux MTD sucks */
82 _end = . ; 81 _end = . ;
83 82
84 STABS_DEBUG 83 STABS_DEBUG
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 84a57761f17e..60164e65d665 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
39 * 39 *
40 * Make sure the stack pointer contains a valid address. Valid 40 * Make sure the stack pointer contains a valid address. Valid
41 * addresses for kernel stacks are anywhere after the bss 41 * addresses for kernel stacks are anywhere after the bss
42 * (after _ebss) and anywhere in init_thread_union (init_stack). 42 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
43 */ 43 */
44#define STACK_CHECK() \ 44#define STACK_CHECK() \
45 mov #(THREAD_SIZE >> 10), r0; \ 45 mov #(THREAD_SIZE >> 10), r0; \
@@ -60,7 +60,7 @@
60 cmp/hi r2, r1; \ 60 cmp/hi r2, r1; \
61 bf stack_panic; \ 61 bf stack_panic; \
62 \ 62 \
63 /* If sp > _ebss then we're OK. */ \ 63 /* If sp > __bss_stop then we're OK. */ \
64 mov.l .L_ebss, r1; \ 64 mov.l .L_ebss, r1; \
65 cmp/hi r1, r15; \ 65 cmp/hi r1, r15; \
66 bt 1f; \ 66 bt 1f; \
@@ -70,7 +70,7 @@
70 cmp/hs r1, r15; \ 70 cmp/hs r1, r15; \
71 bf stack_panic; \ 71 bf stack_panic; \
72 \ 72 \
73 /* If sp > init_stack && sp < _ebss, not OK. */ \ 73 /* If sp > init_stack && sp < __bss_stop, not OK. */ \
74 add r0, r1; \ 74 add r0, r1; \
75 cmp/hs r1, r15; \ 75 cmp/hs r1, r15; \
76 bt stack_panic; \ 76 bt stack_panic; \
@@ -292,8 +292,6 @@ stack_panic:
292 nop 292 nop
293 293
294 .align 2 294 .align 2
295.L_ebss:
296 .long _ebss
297.L_init_thread_union: 295.L_init_thread_union:
298 .long init_thread_union 296 .long init_thread_union
299.Lpanic: 297.Lpanic:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba2657c49217..8ec3a1aa4abd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1527,7 +1527,7 @@ config SECCOMP
1527 If unsure, say Y. Only embedded should say N here. 1527 If unsure, say Y. Only embedded should say N here.
1528 1528
1529config CC_STACKPROTECTOR 1529config CC_STACKPROTECTOR
1530 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" 1530 bool "Enable -fstack-protector buffer overflow detection"
1531 ---help--- 1531 ---help---
1532 This option turns on the -fstack-protector GCC feature. This 1532 This option turns on the -fstack-protector GCC feature. This
1533 feature puts, at the beginning of functions, a canary value on 1533 feature puts, at the beginning of functions, a canary value on
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 441520e4174f..a3ac52b29cbf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -33,6 +33,14 @@
33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
35#define MCI_STATUS_AR (1ULL<<55) /* Action required */ 35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
36#define MCACOD 0xffff /* MCA Error Code */
37
38/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
39#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
40#define MCACOD_SCRUBMSK 0xfff0
41#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
42#define MCACOD_DATA 0x0134 /* Data Load */
43#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
36 44
37/* MCi_MISC register defines */ 45/* MCi_MISC register defines */
38#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) 46#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dab39350e51e..cb4e43bce98a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; }
196extern void perf_events_lapic_init(void); 196extern void perf_events_lapic_init(void);
197 197
198/* 198/*
199 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. 199 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
200 * This flag is otherwise unused and ABI specified to be 0, so nobody should 200 * unused and ABI specified to be 0, so nobody should care what we do with
201 * care what we do with it. 201 * them.
202 *
203 * EXACT - the IP points to the exact instruction that triggered the
204 * event (HW bugs exempt).
205 * VM - original X86_VM_MASK; see set_linear_ip().
202 */ 206 */
203#define PERF_EFLAGS_EXACT (1UL << 3) 207#define PERF_EFLAGS_EXACT (1UL << 3)
208#define PERF_EFLAGS_VM (1UL << 5)
204 209
205struct pt_regs; 210struct pt_regs;
206extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 211extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 95bf99de9058..1b8e5a03d942 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags;
25static char temp_stack[4096]; 25static char temp_stack[4096];
26#endif 26#endif
27 27
28asmlinkage void acpi_enter_s3(void)
29{
30 acpi_enter_sleep_state(3, wake_sleep_flags);
31}
32/** 28/**
33 * acpi_suspend_lowlevel - save kernel state 29 * acpi_suspend_lowlevel - save kernel state
34 * 30 *
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 5653a5791ec9..67f59f8c6956 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,7 +2,6 @@
2 * Variables and functions used by the code in sleep.c 2 * Variables and functions used by the code in sleep.c
3 */ 3 */
4 4
5#include <linux/linkage.h>
6#include <asm/realmode.h> 5#include <asm/realmode.h>
7 6
8extern unsigned long saved_video_mode; 7extern unsigned long saved_video_mode;
@@ -11,7 +10,6 @@ extern long saved_magic;
11extern int wakeup_pmode_return; 10extern int wakeup_pmode_return;
12 11
13extern u8 wake_sleep_flags; 12extern u8 wake_sleep_flags;
14extern asmlinkage void acpi_enter_s3(void);
15 13
16extern unsigned long acpi_copy_wakeup_routine(unsigned long); 14extern unsigned long acpi_copy_wakeup_routine(unsigned long);
17extern void wakeup_long64(void); 15extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 72610839f03b..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,7 +74,9 @@ restore_registers:
74ENTRY(do_suspend_lowlevel) 74ENTRY(do_suspend_lowlevel)
75 call save_processor_state 75 call save_processor_state
76 call save_registers 76 call save_registers
77 call acpi_enter_s3 77 pushl $3
78 call acpi_enter_sleep_state
79 addl $4, %esp
78 80
79# In case of S3 failure, we'll emerge here. Jump 81# In case of S3 failure, we'll emerge here. Jump
80# to ret_point to recover 82# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 014d1d28c397..8ea5164cbd04 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel)
71 movq %rsi, saved_rsi 71 movq %rsi, saved_rsi
72 72
73 addq $8, %rsp 73 addq $8, %rsp
74 call acpi_enter_s3 74 movl $3, %edi
75 xorl %eax, %eax
76 call acpi_enter_sleep_state
75 /* in case something went wrong, restore the machine status and go on */ 77 /* in case something went wrong, restore the machine status and go on */
76 jmp resume_point 78 jmp resume_point
77 79
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 931280ff8299..afb7ff79a29f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void)
224 ideal_nops = intel_nops; 224 ideal_nops = intel_nops;
225#endif 225#endif
226 } 226 }
227 227 break;
228 default: 228 default:
229#ifdef CONFIG_X86_64 229#ifdef CONFIG_X86_64
230 ideal_nops = k8_nops; 230 ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 406eee784684..a6c64aaddf9a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1204 BUG_ON(!cfg->vector); 1204 BUG_ON(!cfg->vector);
1205 1205
1206 vector = cfg->vector; 1206 vector = cfg->vector;
1207 for_each_cpu(cpu, cfg->domain) 1207 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1208 per_cpu(vector_irq, cpu)[vector] = -1; 1208 per_cpu(vector_irq, cpu)[vector] = -1;
1209 1209
1210 cfg->vector = 0; 1210 cfg->vector = 0;
@@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1212 1212
1213 if (likely(!cfg->move_in_progress)) 1213 if (likely(!cfg->move_in_progress))
1214 return; 1214 return;
1215 for_each_cpu(cpu, cfg->old_domain) { 1215 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1216 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1216 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1217 vector++) { 1217 vector++) {
1218 if (per_cpu(vector_irq, cpu)[vector] != irq) 1218 if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 413c2ced887c..13017626f9a8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,13 +55,6 @@ static struct severity {
55#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) 55#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
56#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) 56#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
57#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) 57#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
58#define MCACOD 0xffff
59/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
60#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
61#define MCACOD_SCRUBMSK 0xfff0
62#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
63#define MCACOD_DATA 0x0134 /* Data Load */
64#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
65 58
66 MCESEV( 59 MCESEV(
67 NO, "Invalid", 60 NO, "Invalid",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5e095f873e3e..292d0258311c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
103 103
104static DEFINE_PER_CPU(struct work_struct, mce_work); 104static DEFINE_PER_CPU(struct work_struct, mce_work);
105 105
106static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
107
106/* 108/*
107 * CPU/chipset specific EDAC code can register a notifier call here to print 109 * CPU/chipset specific EDAC code can register a notifier call here to print
108 * MCE errors in a human-readable form. 110 * MCE errors in a human-readable form.
@@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
650 * Do a quick check if any of the events requires a panic. 652 * Do a quick check if any of the events requires a panic.
651 * This decides if we keep the events around or clear them. 653 * This decides if we keep the events around or clear them.
652 */ 654 */
653static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp) 655static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
656 struct pt_regs *regs)
654{ 657{
655 int i, ret = 0; 658 int i, ret = 0;
656 659
657 for (i = 0; i < banks; i++) { 660 for (i = 0; i < banks; i++) {
658 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 661 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
659 if (m->status & MCI_STATUS_VAL) 662 if (m->status & MCI_STATUS_VAL) {
660 __set_bit(i, validp); 663 __set_bit(i, validp);
664 if (quirk_no_way_out)
665 quirk_no_way_out(i, m, regs);
666 }
661 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 667 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
662 ret = 1; 668 ret = 1;
663 } 669 }
@@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1040 *final = m; 1046 *final = m;
1041 1047
1042 memset(valid_banks, 0, sizeof(valid_banks)); 1048 memset(valid_banks, 0, sizeof(valid_banks));
1043 no_way_out = mce_no_way_out(&m, &msg, valid_banks); 1049 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1044 1050
1045 barrier(); 1051 barrier();
1046 1052
@@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void)
1418 } 1424 }
1419} 1425}
1420 1426
1427/*
1428 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1429 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1430 * Vol 3B Table 15-20). But this confuses both the code that determines
1431 * whether the machine check occurred in kernel or user mode, and also
1432 * the severity assessment code. Pretend that EIPV was set, and take the
1433 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1434 */
1435static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1436{
1437 if (bank != 0)
1438 return;
1439 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1440 return;
1441 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1442 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1443 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1444 MCACOD)) !=
1445 (MCI_STATUS_UC|MCI_STATUS_EN|
1446 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1447 MCI_STATUS_AR|MCACOD_INSTR))
1448 return;
1449
1450 m->mcgstatus |= MCG_STATUS_EIPV;
1451 m->ip = regs->ip;
1452 m->cs = regs->cs;
1453}
1454
1421/* Add per CPU specific workarounds here */ 1455/* Add per CPU specific workarounds here */
1422static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1456static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1423{ 1457{
@@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1515 */ 1549 */
1516 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) 1550 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
1517 mce_bootlog = 0; 1551 mce_bootlog = 0;
1552
1553 if (c->x86 == 6 && c->x86_model == 45)
1554 quirk_no_way_out = quirk_sandybridge_ifu;
1518 } 1555 }
1519 if (monarch_timeout < 0) 1556 if (monarch_timeout < 0)
1520 monarch_timeout = 0; 1557 monarch_timeout = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 29557aa06dda..915b876edd1e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
32#include <asm/smp.h> 32#include <asm/smp.h>
33#include <asm/alternative.h> 33#include <asm/alternative.h>
34#include <asm/timer.h> 34#include <asm/timer.h>
35#include <asm/desc.h>
36#include <asm/ldt.h>
35 37
36#include "perf_event.h" 38#include "perf_event.h"
37 39
@@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size)
1738 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 1740 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1739} 1741}
1740 1742
1743static unsigned long get_segment_base(unsigned int segment)
1744{
1745 struct desc_struct *desc;
1746 int idx = segment >> 3;
1747
1748 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1749 if (idx > LDT_ENTRIES)
1750 return 0;
1751
1752 if (idx > current->active_mm->context.size)
1753 return 0;
1754
1755 desc = current->active_mm->context.ldt;
1756 } else {
1757 if (idx > GDT_ENTRIES)
1758 return 0;
1759
1760 desc = __this_cpu_ptr(&gdt_page.gdt[0]);
1761 }
1762
1763 return get_desc_base(desc + idx);
1764}
1765
1741#ifdef CONFIG_COMPAT 1766#ifdef CONFIG_COMPAT
1742 1767
1743#include <asm/compat.h> 1768#include <asm/compat.h>
@@ -1746,13 +1771,17 @@ static inline int
1746perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1771perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1747{ 1772{
1748 /* 32-bit process in 64-bit kernel. */ 1773 /* 32-bit process in 64-bit kernel. */
1774 unsigned long ss_base, cs_base;
1749 struct stack_frame_ia32 frame; 1775 struct stack_frame_ia32 frame;
1750 const void __user *fp; 1776 const void __user *fp;
1751 1777
1752 if (!test_thread_flag(TIF_IA32)) 1778 if (!test_thread_flag(TIF_IA32))
1753 return 0; 1779 return 0;
1754 1780
1755 fp = compat_ptr(regs->bp); 1781 cs_base = get_segment_base(regs->cs);
1782 ss_base = get_segment_base(regs->ss);
1783
1784 fp = compat_ptr(ss_base + regs->bp);
1756 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1785 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1757 unsigned long bytes; 1786 unsigned long bytes;
1758 frame.next_frame = 0; 1787 frame.next_frame = 0;
@@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1765 if (!valid_user_frame(fp, sizeof(frame))) 1794 if (!valid_user_frame(fp, sizeof(frame)))
1766 break; 1795 break;
1767 1796
1768 perf_callchain_store(entry, frame.return_address); 1797 perf_callchain_store(entry, cs_base + frame.return_address);
1769 fp = compat_ptr(frame.next_frame); 1798 fp = compat_ptr(ss_base + frame.next_frame);
1770 } 1799 }
1771 return 1; 1800 return 1;
1772} 1801}
@@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1789 return; 1818 return;
1790 } 1819 }
1791 1820
1821 /*
1822 * We don't know what to do with VM86 stacks.. ignore them for now.
1823 */
1824 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
1825 return;
1826
1792 fp = (void __user *)regs->bp; 1827 fp = (void __user *)regs->bp;
1793 1828
1794 perf_callchain_store(entry, regs->ip); 1829 perf_callchain_store(entry, regs->ip);
@@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1816 } 1851 }
1817} 1852}
1818 1853
1819unsigned long perf_instruction_pointer(struct pt_regs *regs) 1854/*
1855 * Deal with code segment offsets for the various execution modes:
1856 *
1857 * VM86 - the good olde 16 bit days, where the linear address is
1858 * 20 bits and we use regs->ip + 0x10 * regs->cs.
1859 *
1860 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
1861 * to figure out what the 32bit base address is.
1862 *
1863 * X32 - has TIF_X32 set, but is running in x86_64
1864 *
1865 * X86_64 - CS,DS,SS,ES are all zero based.
1866 */
1867static unsigned long code_segment_base(struct pt_regs *regs)
1820{ 1868{
1821 unsigned long ip; 1869 /*
1870 * If we are in VM86 mode, add the segment offset to convert to a
1871 * linear address.
1872 */
1873 if (regs->flags & X86_VM_MASK)
1874 return 0x10 * regs->cs;
1875
1876 /*
1877 * For IA32 we look at the GDT/LDT segment base to convert the
1878 * effective IP to a linear address.
1879 */
1880#ifdef CONFIG_X86_32
1881 if (user_mode(regs) && regs->cs != __USER_CS)
1882 return get_segment_base(regs->cs);
1883#else
1884 if (test_thread_flag(TIF_IA32)) {
1885 if (user_mode(regs) && regs->cs != __USER32_CS)
1886 return get_segment_base(regs->cs);
1887 }
1888#endif
1889 return 0;
1890}
1822 1891
1892unsigned long perf_instruction_pointer(struct pt_regs *regs)
1893{
1823 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) 1894 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1824 ip = perf_guest_cbs->get_guest_ip(); 1895 return perf_guest_cbs->get_guest_ip();
1825 else
1826 ip = instruction_pointer(regs);
1827 1896
1828 return ip; 1897 return regs->ip + code_segment_base(regs);
1829} 1898}
1830 1899
1831unsigned long perf_misc_flags(struct pt_regs *regs) 1900unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1838 else 1907 else
1839 misc |= PERF_RECORD_MISC_GUEST_KERNEL; 1908 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1840 } else { 1909 } else {
1841 if (!kernel_ip(regs->ip)) 1910 if (user_mode(regs))
1842 misc |= PERF_RECORD_MISC_USER; 1911 misc |= PERF_RECORD_MISC_USER;
1843 else 1912 else
1844 misc |= PERF_RECORD_MISC_KERNEL; 1913 misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 821d53b696d1..6605a81ba339 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip)
516#endif 516#endif
517} 517}
518 518
519/*
520 * Not all PMUs provide the right context information to place the reported IP
521 * into full context. Specifically segment registers are typically not
522 * supplied.
523 *
524 * Assuming the address is a linear address (it is for IBS), we fake the CS and
525 * vm86 mode using the known zero-based code segment and 'fix up' the registers
526 * to reflect this.
527 *
528 * Intel PEBS/LBR appear to typically provide the effective address, nothing
529 * much we can do about that but pray and treat it like a linear address.
530 */
531static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
532{
533 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
534 if (regs->flags & X86_VM_MASK)
535 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
536 regs->ip = ip;
537}
538
519#ifdef CONFIG_CPU_SUP_AMD 539#ifdef CONFIG_CPU_SUP_AMD
520 540
521int amd_pmu_init(void); 541int amd_pmu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index da9bcdcd9856..7bfb5bec8630 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,6 +13,8 @@
13 13
14#include <asm/apic.h> 14#include <asm/apic.h>
15 15
16#include "perf_event.h"
17
16static u32 ibs_caps; 18static u32 ibs_caps;
17 19
18#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 20#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
536 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) { 538 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
537 regs.flags &= ~PERF_EFLAGS_EXACT; 539 regs.flags &= ~PERF_EFLAGS_EXACT;
538 } else { 540 } else {
539 instruction_pointer_set(&regs, ibs_data.regs[1]); 541 set_linear_ip(&regs, ibs_data.regs[1]);
540 regs.flags |= PERF_EFLAGS_EXACT; 542 regs.flags |= PERF_EFLAGS_EXACT;
541 } 543 }
542 544
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 629ae0b7ad90..e38d97bf4259 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
499 * We sampled a branch insn, rewind using the LBR stack 499 * We sampled a branch insn, rewind using the LBR stack
500 */ 500 */
501 if (ip == to) { 501 if (ip == to) {
502 regs->ip = from; 502 set_linear_ip(regs, from);
503 return 1; 503 return 1;
504 } 504 }
505 505
@@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
529 } while (to < ip); 529 } while (to < ip);
530 530
531 if (to == ip) { 531 if (to == ip) {
532 regs->ip = old_to; 532 set_linear_ip(regs, old_to);
533 return 1; 533 return 1;
534 } 534 }
535 535
@@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
569 * A possible PERF_SAMPLE_REGS will have to transfer all regs. 569 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
570 */ 570 */
571 regs = *iregs; 571 regs = *iregs;
572 regs.ip = pebs->ip; 572 regs.flags = pebs->flags;
573 set_linear_ip(&regs, pebs->ip);
573 regs.bp = pebs->bp; 574 regs.bp = pebs->bp;
574 regs.sp = pebs->sp; 575 regs.sp = pebs->sp;
575 576
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index f3851892e077..c9e5dc56630a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,7 +5,7 @@
5#include "perf_event.h" 5#include "perf_event.h"
6 6
7#define UNCORE_PMU_NAME_LEN 32 7#define UNCORE_PMU_NAME_LEN 32
8#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) 8#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
9 9
10#define UNCORE_FIXED_EVENT 0xff 10#define UNCORE_FIXED_EVENT 0xff
11#define UNCORE_PMC_IDX_MAX_GENERIC 8 11#define UNCORE_PMC_IDX_MAX_GENERIC 8
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1f5f1d5d2a02..7ad683d78645 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -328,6 +328,7 @@ void fixup_irqs(void)
328 chip->irq_retrigger(data); 328 chip->irq_retrigger(data);
329 raw_spin_unlock(&desc->lock); 329 raw_spin_unlock(&desc->lock);
330 } 330 }
331 __this_cpu_write(vector_irq[vector], -1);
331 } 332 }
332} 333}
333#endif 334#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 1d5d31ea686b..dc1404bf8e4b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
107{ 107{
108 struct setup_data_node *node; 108 struct setup_data_node *node;
109 struct setup_data *data; 109 struct setup_data *data;
110 int error = -ENOMEM; 110 int error;
111 struct dentry *d; 111 struct dentry *d;
112 struct page *pg; 112 struct page *pg;
113 u64 pa_data; 113 u64 pa_data;
@@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent)
121 121
122 while (pa_data) { 122 while (pa_data) {
123 node = kmalloc(sizeof(*node), GFP_KERNEL); 123 node = kmalloc(sizeof(*node), GFP_KERNEL);
124 if (!node) 124 if (!node) {
125 error = -ENOMEM;
125 goto err_dir; 126 goto err_dir;
127 }
126 128
127 pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT); 129 pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
128 if (PageHighMem(pg)) { 130 if (PageHighMem(pg)) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1df8fb9e1d5d..e498b18f010c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
316 addr &= 1; 316 addr &= 1;
317 if (addr == 0) { 317 if (addr == 0) {
318 if (val & 0x10) { 318 if (val & 0x10) {
319 u8 edge_irr = s->irr & ~s->elcr;
320 int i;
321 bool found;
322 struct kvm_vcpu *vcpu;
323
319 s->init4 = val & 1; 324 s->init4 = val & 1;
320 s->last_irr = 0; 325 s->last_irr = 0;
321 s->irr &= s->elcr; 326 s->irr &= s->elcr;
@@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
333 if (val & 0x08) 338 if (val & 0x08)
334 pr_pic_unimpl( 339 pr_pic_unimpl(
335 "level sensitive irq not supported"); 340 "level sensitive irq not supported");
341
342 kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
343 if (kvm_apic_accept_pic_intr(vcpu)) {
344 found = true;
345 break;
346 }
347
348
349 if (found)
350 for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
351 if (edge_irr & (1 << irq))
352 pic_clear_isr(s, irq);
336 } else if (val & 0x08) { 353 } else if (val & 0x08) {
337 if (val & 0x04) 354 if (val & 0x04)
338 s->poll = 1; 355 s->poll = 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c39b60707e02..c00f03de1b79 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1488 loadsegment(ds, vmx->host_state.ds_sel); 1488 loadsegment(ds, vmx->host_state.ds_sel);
1489 loadsegment(es, vmx->host_state.es_sel); 1489 loadsegment(es, vmx->host_state.es_sel);
1490 } 1490 }
1491#else
1492 /*
1493 * The sysexit path does not restore ds/es, so we must set them to
1494 * a reasonable value ourselves.
1495 */
1496 loadsegment(ds, __USER_DS);
1497 loadsegment(es, __USER_DS);
1498#endif 1491#endif
1499 reload_tss(); 1492 reload_tss();
1500#ifdef CONFIG_X86_64 1493#ifdef CONFIG_X86_64
@@ -6370,6 +6363,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6370#endif 6363#endif
6371 ); 6364 );
6372 6365
6366#ifndef CONFIG_X86_64
6367 /*
6368 * The sysexit path does not restore ds/es, so we must set them to
6369 * a reasonable value ourselves.
6370 *
6371 * We can't defer this to vmx_load_host_state() since that function
6372 * may be executed in interrupt context, which saves and restore segments
6373 * around it, nullifying its effect.
6374 */
6375 loadsegment(ds, __USER_DS);
6376 loadsegment(es, __USER_DS);
6377#endif
6378
6373 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) 6379 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
6374 | (1 << VCPU_EXREG_RFLAGS) 6380 | (1 << VCPU_EXREG_RFLAGS)
6375 | (1 << VCPU_EXREG_CPL) 6381 | (1 << VCPU_EXREG_CPL)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b59508ff07..42bce48f6928 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
925 */ 925 */
926 getboottime(&boot); 926 getboottime(&boot);
927 927
928 if (kvm->arch.kvmclock_offset) {
929 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
930 boot = timespec_sub(boot, ts);
931 }
928 wc.sec = boot.tv_sec; 932 wc.sec = boot.tv_sec;
929 wc.nsec = boot.tv_nsec; 933 wc.nsec = boot.tv_nsec;
930 wc.version = version; 934 wc.version = version;
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb6..4ddf497ca65b 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
145void __init 145int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 147{
148 u64 start, end; 148 u64 start, end;
149 int node, pxm; 149 int node, pxm;
150 150
151 if (srat_disabled()) 151 if (srat_disabled())
152 return; 152 return -1;
153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { 153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
154 bad_srat(); 154 bad_srat();
155 return; 155 return -1;
156 } 156 }
157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
158 return; 158 return -1;
159 159
160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
161 return; 161 return -1;
162 start = ma->base_address; 162 start = ma->base_address;
163 end = start + ma->length; 163 end = start + ma->length;
164 pxm = ma->proximity_domain; 164 pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
168 if (node < 0) { 168 if (node < 0) {
169 printk(KERN_ERR "SRAT: Too many proximity domains.\n"); 169 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
170 bad_srat(); 170 bad_srat();
171 return; 171 return -1;
172 } 172 }
173 173
174 if (numa_add_memblk(node, start, end) < 0) { 174 if (numa_add_memblk(node, start, end) < 0) {
175 bad_srat(); 175 bad_srat();
176 return; 176 return -1;
177 } 177 }
178 178
179 node_set(node, numa_nodes_parsed); 179 node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
182 node, pxm, 182 node, pxm,
183 (unsigned long long) start, (unsigned long long) end - 1); 183 (unsigned long long) start, (unsigned long long) end - 1);
184 return 0;
184} 185}
185 186
186void __init acpi_numa_arch_fixup(void) {} 187void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 51171aeff0dc..29aed7ac2c02 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -318,7 +318,7 @@
318309 common getcpu sys_getcpu 318309 common getcpu sys_getcpu
319310 64 process_vm_readv sys_process_vm_readv 319310 64 process_vm_readv sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 320311 64 process_vm_writev sys_process_vm_writev
321312 64 kcmp sys_kcmp 321312 common kcmp sys_kcmp
322 322
323# 323#
324# x32-specific system call numbers start at 512 to avoid cache impact 324# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
83/* 83/*
84 * hwsleep - sleep/wake support (Legacy sleep registers) 84 * hwsleep - sleep/wake support (Legacy sleep registers)
85 */ 85 */
86acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); 86acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
87 87
88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); 88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
89 89
90acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); 90acpi_status acpi_hw_legacy_wake(u8 sleep_state);
91 91
92/* 92/*
93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) 93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
94 */ 94 */
95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); 95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
96 96
97acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); 97acpi_status acpi_hw_extended_sleep(u8 sleep_state);
98 98
99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); 99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
100 100
101acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); 101acpi_status acpi_hw_extended_wake(u8 sleep_state);
102 102
103/* 103/*
104 * hwvalid - Port I/O with validation 104 * hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 48518dac5342..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
90 * FUNCTION: acpi_hw_extended_sleep 90 * FUNCTION: acpi_hw_extended_sleep
91 * 91 *
92 * PARAMETERS: sleep_state - Which sleep state to enter 92 * PARAMETERS: sleep_state - Which sleep state to enter
93 * flags - ACPI_EXECUTE_GTS to run optional method
94 * 93 *
95 * RETURN: Status 94 * RETURN: Status
96 * 95 *
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
100 * 99 *
101 ******************************************************************************/ 100 ******************************************************************************/
102 101
103acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) 102acpi_status acpi_hw_extended_sleep(u8 sleep_state)
104{ 103{
105 acpi_status status; 104 acpi_status status;
106 u8 sleep_type_value; 105 u8 sleep_type_value;
@@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
125 124
126 acpi_gbl_system_awake_and_running = FALSE; 125 acpi_gbl_system_awake_and_running = FALSE;
127 126
128 /* Optionally execute _GTS (Going To Sleep) */
129
130 if (flags & ACPI_EXECUTE_GTS) {
131 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
132 }
133
134 /* Flush caches, as per ACPI specification */ 127 /* Flush caches, as per ACPI specification */
135 128
136 ACPI_FLUSH_CPU_CACHE(); 129 ACPI_FLUSH_CPU_CACHE();
@@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
172 * FUNCTION: acpi_hw_extended_wake_prep 165 * FUNCTION: acpi_hw_extended_wake_prep
173 * 166 *
174 * PARAMETERS: sleep_state - Which sleep state we just exited 167 * PARAMETERS: sleep_state - Which sleep state we just exited
175 * flags - ACPI_EXECUTE_BFS to run optional method
176 * 168 *
177 * RETURN: Status 169 * RETURN: Status
178 * 170 *
@@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
181 * 173 *
182 ******************************************************************************/ 174 ******************************************************************************/
183 175
184acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) 176acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
185{ 177{
186 acpi_status status; 178 acpi_status status;
187 u8 sleep_type_value; 179 u8 sleep_type_value;
@@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
200 &acpi_gbl_FADT.sleep_control); 192 &acpi_gbl_FADT.sleep_control);
201 } 193 }
202 194
203 /* Optionally execute _BFS (Back From Sleep) */
204
205 if (flags & ACPI_EXECUTE_BFS) {
206 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
207 }
208 return_ACPI_STATUS(AE_OK); 195 return_ACPI_STATUS(AE_OK);
209} 196}
210 197
@@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
222 * 209 *
223 ******************************************************************************/ 210 ******************************************************************************/
224 211
225acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) 212acpi_status acpi_hw_extended_wake(u8 sleep_state)
226{ 213{
227 ACPI_FUNCTION_TRACE(hw_extended_wake); 214 ACPI_FUNCTION_TRACE(hw_extended_wake);
228 215
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 9960fe9ef533..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
56 * FUNCTION: acpi_hw_legacy_sleep 56 * FUNCTION: acpi_hw_legacy_sleep
57 * 57 *
58 * PARAMETERS: sleep_state - Which sleep state to enter 58 * PARAMETERS: sleep_state - Which sleep state to enter
59 * flags - ACPI_EXECUTE_GTS to run optional method
60 * 59 *
61 * RETURN: Status 60 * RETURN: Status
62 * 61 *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
64 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 63 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) 66acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
68{ 67{
69 struct acpi_bit_register_info *sleep_type_reg_info; 68 struct acpi_bit_register_info *sleep_type_reg_info;
70 struct acpi_bit_register_info *sleep_enable_reg_info; 69 struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
110 return_ACPI_STATUS(status); 109 return_ACPI_STATUS(status);
111 } 110 }
112 111
113 /* Optionally execute _GTS (Going To Sleep) */
114
115 if (flags & ACPI_EXECUTE_GTS) {
116 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
117 }
118
119 /* Get current value of PM1A control */ 112 /* Get current value of PM1A control */
120 113
121 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, 114 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
214 * FUNCTION: acpi_hw_legacy_wake_prep 207 * FUNCTION: acpi_hw_legacy_wake_prep
215 * 208 *
216 * PARAMETERS: sleep_state - Which sleep state we just exited 209 * PARAMETERS: sleep_state - Which sleep state we just exited
217 * flags - ACPI_EXECUTE_BFS to run optional method
218 * 210 *
219 * RETURN: Status 211 * RETURN: Status
220 * 212 *
@@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
224 * 216 *
225 ******************************************************************************/ 217 ******************************************************************************/
226 218
227acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) 219acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
228{ 220{
229 acpi_status status; 221 acpi_status status;
230 struct acpi_bit_register_info *sleep_type_reg_info; 222 struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
275 } 267 }
276 } 268 }
277 269
278 /* Optionally execute _BFS (Back From Sleep) */
279
280 if (flags & ACPI_EXECUTE_BFS) {
281 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
282 }
283 return_ACPI_STATUS(status); 270 return_ACPI_STATUS(status);
284} 271}
285 272
@@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
288 * FUNCTION: acpi_hw_legacy_wake 275 * FUNCTION: acpi_hw_legacy_wake
289 * 276 *
290 * PARAMETERS: sleep_state - Which sleep state we just exited 277 * PARAMETERS: sleep_state - Which sleep state we just exited
291 * flags - Reserved, set to zero
292 * 278 *
293 * RETURN: Status 279 * RETURN: Status
294 * 280 *
@@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
297 * 283 *
298 ******************************************************************************/ 284 ******************************************************************************/
299 285
300acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) 286acpi_status acpi_hw_legacy_wake(u8 sleep_state)
301{ 287{
302 acpi_status status; 288 acpi_status status;
303 289
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f8684bfe7907..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
50 50
51/* Local prototypes */ 51/* Local prototypes */
52static acpi_status 52static acpi_status
53acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); 53acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
54 54
55/* 55/*
56 * Dispatch table used to efficiently branch to the various sleep 56 * Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
235 * 235 *
236 ******************************************************************************/ 236 ******************************************************************************/
237static acpi_status 237static acpi_status
238acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) 238acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
239{ 239{
240 acpi_status status; 240 acpi_status status;
241 struct acpi_sleep_functions *sleep_functions = 241 struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
248 * use the extended sleep registers 248 * use the extended sleep registers
249 */ 249 */
250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { 250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
251 status = sleep_functions->extended_function(sleep_state, flags); 251 status = sleep_functions->extended_function(sleep_state);
252 } else { 252 } else {
253 /* Legacy sleep */ 253 /* Legacy sleep */
254 254
255 status = sleep_functions->legacy_function(sleep_state, flags); 255 status = sleep_functions->legacy_function(sleep_state);
256 } 256 }
257 257
258 return (status); 258 return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
262 * For the case where reduced-hardware-only code is being generated, 262 * For the case where reduced-hardware-only code is being generated,
263 * we know that only the extended sleep registers are available 263 * we know that only the extended sleep registers are available
264 */ 264 */
265 status = sleep_functions->extended_function(sleep_state, flags); 265 status = sleep_functions->extended_function(sleep_state);
266 return (status); 266 return (status);
267 267
268#endif /* !ACPI_REDUCED_HARDWARE */ 268#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
349 * FUNCTION: acpi_enter_sleep_state 349 * FUNCTION: acpi_enter_sleep_state
350 * 350 *
351 * PARAMETERS: sleep_state - Which sleep state to enter 351 * PARAMETERS: sleep_state - Which sleep state to enter
352 * flags - ACPI_EXECUTE_GTS to run optional method
353 * 352 *
354 * RETURN: Status 353 * RETURN: Status
355 * 354 *
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
357 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 356 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
358 * 357 *
359 ******************************************************************************/ 358 ******************************************************************************/
360acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) 359acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
361{ 360{
362 acpi_status status; 361 acpi_status status;
363 362
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
371 } 370 }
372 371
373 status = 372 status =
374 acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); 373 acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
375 return_ACPI_STATUS(status); 374 return_ACPI_STATUS(status);
376} 375}
377 376
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
391 * Called with interrupts DISABLED. 390 * Called with interrupts DISABLED.
392 * 391 *
393 ******************************************************************************/ 392 ******************************************************************************/
394acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) 393acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
395{ 394{
396 acpi_status status; 395 acpi_status status;
397 396
398 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); 397 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
399 398
400 status = 399 status =
401 acpi_hw_sleep_dispatch(sleep_state, flags, 400 acpi_hw_sleep_dispatch(sleep_state,
402 ACPI_WAKE_PREP_FUNCTION_ID); 401 ACPI_WAKE_PREP_FUNCTION_ID);
403 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
404} 403}
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
423 422
424 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); 423 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
425 424
426 425 status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
427 status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
428 return_ACPI_STATUS(status); 426 return_ACPI_STATUS(status);
429} 427}
430 428
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
237 return 0; 237 return 0;
238} 238}
239 239
240static int __initdata parsed_numa_memblks;
241
240static int __init 242static int __init
241acpi_parse_memory_affinity(struct acpi_subtable_header * header, 243acpi_parse_memory_affinity(struct acpi_subtable_header * header,
242 const unsigned long end) 244 const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
250 acpi_table_print_srat_entry(header); 252 acpi_table_print_srat_entry(header);
251 253
252 /* let architecture-dependent part to do it */ 254 /* let architecture-dependent part to do it */
253 acpi_numa_memory_affinity_init(memory_affinity); 255 if (!acpi_numa_memory_affinity_init(memory_affinity))
254 256 parsed_numa_memblks++;
255 return 0; 257 return 0;
256} 258}
257 259
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
304 306
305 acpi_numa_arch_fixup(); 307 acpi_numa_arch_fixup();
306 308
307 if (cnt <= 0) 309 if (cnt < 0)
308 return cnt ?: -ENOENT; 310 return cnt;
311 else if (!parsed_numa_memblks)
312 return -ENOENT;
309 return 0; 313 return 0;
310} 314}
311 315
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c321c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
573 OSC_CLOCK_PWR_CAPABILITY_SUPPORT; 573 OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
574 if (pci_msi_enabled()) 574 if (pci_msi_enabled())
575 flags |= OSC_MSI_SUPPORT; 575 flags |= OSC_MSI_SUPPORT;
576 if (flags != base_flags) 576 if (flags != base_flags) {
577 acpi_pci_osc_support(root, flags); 577 status = acpi_pci_osc_support(root, flags);
578 if (ACPI_FAILURE(status)) {
579 dev_info(root->bus->bridge, "ACPI _OSC support "
580 "notification failed, disabling PCIe ASPM\n");
581 pcie_no_aspm();
582 flags = base_flags;
583 }
584 }
578 585
579 if (!pcie_ports_disabled 586 if (!pcie_ports_disabled
580 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { 587 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index ff8e04f2fab4..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
437 /* Normal CPU soft online event */ 437 /* Normal CPU soft online event */
438 } else { 438 } else {
439 acpi_processor_ppc_has_changed(pr, 0); 439 acpi_processor_ppc_has_changed(pr, 0);
440 acpi_processor_cst_has_changed(pr); 440 acpi_processor_hotplug(pr);
441 acpi_processor_reevaluate_tstate(pr, action); 441 acpi_processor_reevaluate_tstate(pr, action);
442 acpi_processor_tstate_has_changed(pr); 442 acpi_processor_tstate_has_changed(pr);
443 } 443 }
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a7a9c929247..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
28#include "internal.h" 28#include "internal.h"
29#include "sleep.h" 29#include "sleep.h"
30 30
31u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
32static unsigned int gts, bfs;
33static int set_param_wake_flag(const char *val, struct kernel_param *kp)
34{
35 int ret = param_set_int(val, kp);
36
37 if (ret)
38 return ret;
39
40 if (kp->arg == (const char *)&gts) {
41 if (gts)
42 wake_sleep_flags |= ACPI_EXECUTE_GTS;
43 else
44 wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
45 }
46 if (kp->arg == (const char *)&bfs) {
47 if (bfs)
48 wake_sleep_flags |= ACPI_EXECUTE_BFS;
49 else
50 wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
51 }
52 return ret;
53}
54module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
55module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
56MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
58
59static u8 sleep_states[ACPI_S_STATE_COUNT]; 31static u8 sleep_states[ACPI_S_STATE_COUNT];
60static bool pwr_btn_event_pending;
61 32
62static void acpi_sleep_tts_switch(u32 acpi_state) 33static void acpi_sleep_tts_switch(u32 acpi_state)
63{ 34{
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
110 81
111#ifdef CONFIG_ACPI_SLEEP 82#ifdef CONFIG_ACPI_SLEEP
112static u32 acpi_target_sleep_state = ACPI_STATE_S0; 83static u32 acpi_target_sleep_state = ACPI_STATE_S0;
84static bool pwr_btn_event_pending;
113 85
114/* 86/*
115 * The ACPI specification wants us to save NVS memory regions during hibernation 87 * The ACPI specification wants us to save NVS memory regions during hibernation
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
305 switch (acpi_state) { 277 switch (acpi_state) {
306 case ACPI_STATE_S1: 278 case ACPI_STATE_S1:
307 barrier(); 279 barrier();
308 status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); 280 status = acpi_enter_sleep_state(acpi_state);
309 break; 281 break;
310 282
311 case ACPI_STATE_S3: 283 case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
319 /* This violates the spec but is required for bug compatibility. */ 291 /* This violates the spec but is required for bug compatibility. */
320 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 292 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
321 293
322 /* Reprogram control registers and execute _BFS */ 294 /* Reprogram control registers */
323 acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); 295 acpi_leave_sleep_state_prep(acpi_state);
324 296
325 /* ACPI 3.0 specs (P62) says that it's the responsibility 297 /* ACPI 3.0 specs (P62) says that it's the responsibility
326 * of the OSPM to clear the status bit [ implying that the 298 * of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
603 ACPI_FLUSH_CPU_CACHE(); 575 ACPI_FLUSH_CPU_CACHE();
604 576
605 /* This shouldn't return. If it returns, we have a problem */ 577 /* This shouldn't return. If it returns, we have a problem */
606 status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); 578 status = acpi_enter_sleep_state(ACPI_STATE_S4);
607 /* Reprogram control registers and execute _BFS */ 579 /* Reprogram control registers */
608 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 580 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
609 581
610 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 582 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
611} 583}
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
617 * enable it here. 589 * enable it here.
618 */ 590 */
619 acpi_enable(); 591 acpi_enable();
620 /* Reprogram control registers and execute _BFS */ 592 /* Reprogram control registers */
621 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 593 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
622 /* Check the hardware signature */ 594 /* Check the hardware signature */
623 if (facs && s4_hardware_signature != facs->hardware_signature) { 595 if (facs && s4_hardware_signature != facs->hardware_signature) {
624 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 596 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@ static void acpi_power_off(void)
892 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 864 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
893 printk(KERN_DEBUG "%s called\n", __func__); 865 printk(KERN_DEBUG "%s called\n", __func__);
894 local_irq_disable(); 866 local_irq_disable();
895 acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); 867 acpi_enter_sleep_state(ACPI_STATE_S5);
896}
897
898/*
899 * ACPI 2.0 created the optional _GTS and _BFS,
900 * but industry adoption has been neither rapid nor broad.
901 *
902 * Linux gets into trouble when it executes poorly validated
903 * paths through the BIOS, so disable _GTS and _BFS by default,
904 * but do speak up and offer the option to enable them.
905 */
906static void __init acpi_gts_bfs_check(void)
907{
908 acpi_handle dummy;
909
910 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
911 {
912 printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
913 printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
914 "please notify linux-acpi@vger.kernel.org\n");
915 }
916 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
917 {
918 printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
919 printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
920 "please notify linux-acpi@vger.kernel.org\n");
921 }
922} 868}
923 869
924int __init acpi_sleep_init(void) 870int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@ int __init acpi_sleep_init(void)
979 * object can also be evaluated when the system enters S5. 925 * object can also be evaluated when the system enters S5.
980 */ 926 */
981 register_reboot_notifier(&tts_notifier); 927 register_reboot_notifier(&tts_notifier);
982 acpi_gts_bfs_check();
983 return 0; 928 return 0;
984} 929}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a24400976..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
173{ 173{
174 int result = 0; 174 int result = 0;
175 175
176 if (!strncmp(val, "enable", strlen("enable"))) { 176 if (!strncmp(val, "enable", sizeof("enable") - 1)) {
177 result = acpi_debug_trace(trace_method_name, trace_debug_level, 177 result = acpi_debug_trace(trace_method_name, trace_debug_level,
178 trace_debug_layer, 0); 178 trace_debug_layer, 0);
179 if (result) 179 if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
181 goto exit; 181 goto exit;
182 } 182 }
183 183
184 if (!strncmp(val, "disable", strlen("disable"))) { 184 if (!strncmp(val, "disable", sizeof("disable") - 1)) {
185 int name = 0; 185 int name = 0;
186 result = acpi_debug_trace((char *)&name, trace_debug_level, 186 result = acpi_debug_trace((char *)&name, trace_debug_level,
187 trace_debug_layer, 0); 187 trace_debug_layer, 0);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
2362 { 2362 {
2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n", 2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2364 dev->number); 2364 dev->number);
2365 return error; 2365 return -ENOMEM;
2366 } 2366 }
2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", 2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2368 dev->number, iadev->pci->revision, base, iadev->irq);) 2368 dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2642df..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
276 { 0, }, 277 { 0, },
277}; 278};
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d97fd9f..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
510 510 case BCMA_CHIP_ID_BCM43228:
511 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
512 break;
511 default: 513 default:
512 present = false; 514 present = false;
513 break; 515 break;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2e0e7fc1dbba..dbe6135a2abe 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@ static void drbd_cleanup(void)
3537} 3537}
3538 3538
3539/** 3539/**
3540 * drbd_congested() - Callback for pdflush 3540 * drbd_congested() - Callback for the flusher thread
3541 * @congested_data: User data 3541 * @congested_data: User data
3542 * @bdi_bits: Bits pdflush is currently interested in 3542 * @bdi_bits: Bits the BDI flusher thread is currently interested in
3543 * 3543 *
3544 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. 3544 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3545 */ 3545 */
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdfc9b2a..b64502dfa9f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev)
296 } else 296 } else
297 dma_pdev = pci_dev_get(pdev); 297 dma_pdev = pci_dev_get(pdev);
298 298
299 /* Account for quirked devices */
299 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 300 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
300 301
302 /*
303 * If it's a multifunction device that does not support our
304 * required ACS flags, add to the same group as function 0.
305 */
301 if (dma_pdev->multifunction && 306 if (dma_pdev->multifunction &&
302 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) 307 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
303 swap_pci_ref(&dma_pdev, 308 swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev)
305 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 310 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
306 0))); 311 0)));
307 312
313 /*
314 * Devices on the root bus go through the iommu. If that's not us,
315 * find the next upstream device and test ACS up to the root bus.
316 * Finding the next device may require skipping virtual buses.
317 */
308 while (!pci_is_root_bus(dma_pdev->bus)) { 318 while (!pci_is_root_bus(dma_pdev->bus)) {
309 if (pci_acs_path_enabled(dma_pdev->bus->self, 319 struct pci_bus *bus = dma_pdev->bus;
310 NULL, REQ_ACS_FLAGS)) 320
321 while (!bus->self) {
322 if (!pci_is_root_bus(bus))
323 bus = bus->parent;
324 else
325 goto root_bus;
326 }
327
328 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
311 break; 329 break;
312 330
313 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); 331 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
314 } 332 }
315 333
334root_bus:
316 group = iommu_group_get(&dma_pdev->dev); 335 group = iommu_group_get(&dma_pdev->dev);
317 pci_dev_put(dma_pdev); 336 pci_dev_put(dma_pdev);
318 if (!group) { 337 if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f15f5c2..0a2ea317120a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void)
1131 break; 1131 break;
1132 } 1132 }
1133 1133
1134 /* Make sure ACS will be enabled */
1135 pci_request_acs();
1136
1137 ret = amd_iommu_init_devices(); 1134 ret = amd_iommu_init_devices();
1138 1135
1139 print_iommu_info(); 1136 print_iommu_info();
@@ -1652,6 +1649,9 @@ static bool detect_ivrs(void)
1652 1649
1653 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); 1650 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1654 1651
1652 /* Make sure ACS will be enabled during PCI probe */
1653 pci_request_acs();
1654
1655 return true; 1655 return true;
1656} 1656}
1657 1657
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff5e93c..80bad32aa463 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
732 spin_lock_init(&priv->pgtablelock); 732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients); 733 INIT_LIST_HEAD(&priv->clients);
734 734
735 dom->geometry.aperture_start = 0; 735 domain->geometry.aperture_start = 0;
736 dom->geometry.aperture_end = ~0UL; 736 domain->geometry.aperture_end = ~0UL;
737 dom->geometry.force_aperture = true; 737 domain->geometry.force_aperture = true;
738 738
739 domain->priv = priv; 739 domain->priv = priv;
740 return 0; 740 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b5346643..2297ec193eb4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
2008 if (!drhd) { 2008 if (!drhd) {
2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", 2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2010 pci_name(pdev)); 2010 pci_name(pdev));
2011 free_domain_mem(domain);
2011 return NULL; 2012 return NULL;
2012 } 2013 }
2013 iommu = drhd->iommu; 2014 iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev)
4124 } else 4125 } else
4125 dma_pdev = pci_dev_get(pdev); 4126 dma_pdev = pci_dev_get(pdev);
4126 4127
4128 /* Account for quirked devices */
4127 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 4129 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4128 4130
4131 /*
4132 * If it's a multifunction device that does not support our
4133 * required ACS flags, add to the same group as function 0.
4134 */
4129 if (dma_pdev->multifunction && 4135 if (dma_pdev->multifunction &&
4130 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) 4136 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4131 swap_pci_ref(&dma_pdev, 4137 swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev)
4133 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 4139 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4134 0))); 4140 0)));
4135 4141
4142 /*
4143 * Devices on the root bus go through the iommu. If that's not us,
4144 * find the next upstream device and test ACS up to the root bus.
4145 * Finding the next device may require skipping virtual buses.
4146 */
4136 while (!pci_is_root_bus(dma_pdev->bus)) { 4147 while (!pci_is_root_bus(dma_pdev->bus)) {
4137 if (pci_acs_path_enabled(dma_pdev->bus->self, 4148 struct pci_bus *bus = dma_pdev->bus;
4138 NULL, REQ_ACS_FLAGS)) 4149
4150 while (!bus->self) {
4151 if (!pci_is_root_bus(bus))
4152 bus = bus->parent;
4153 else
4154 goto root_bus;
4155 }
4156
4157 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4139 break; 4158 break;
4140 4159
4141 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); 4160 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4142 } 4161 }
4143 4162
4163root_bus:
4144 group = iommu_group_get(&dma_pdev->dev); 4164 group = iommu_group_get(&dma_pdev->dev);
4145 pci_dev_put(dma_pdev); 4165 pci_dev_put(dma_pdev);
4146 if (!group) { 4166 if (!group) {
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325ab6262..2a4bb36bc688 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
799 goto out; 799 goto out;
800 } 800 }
801 } 801 }
802 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); 802 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
803out: 803out:
804 spin_unlock(&as->client_lock); 804 spin_unlock(&as->client_lock);
805} 805}
806 806
807static int smmu_iommu_domain_init(struct iommu_domain *domain) 807static int smmu_iommu_domain_init(struct iommu_domain *domain)
808{ 808{
809 int i, err = -ENODEV; 809 int i, err = -EAGAIN;
810 unsigned long flags; 810 unsigned long flags;
811 struct smmu_as *as; 811 struct smmu_as *as;
812 struct smmu_device *smmu = smmu_handle; 812 struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
814 /* Look for a free AS with lock held */ 814 /* Look for a free AS with lock held */
815 for (i = 0; i < smmu->num_as; i++) { 815 for (i = 0; i < smmu->num_as; i++) {
816 as = &smmu->as[i]; 816 as = &smmu->as[i];
817 if (!as->pdir_page) { 817
818 err = alloc_pdir(as); 818 if (as->pdir_page)
819 if (!err) 819 continue;
820 goto found; 820
821 } 821 err = alloc_pdir(as);
822 if (!err)
823 goto found;
824
822 if (err != -EAGAIN) 825 if (err != -EAGAIN)
823 break; 826 break;
824 } 827 }
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec644db3..baf2686aa8eb 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include "isdnloop.h" 17#include "isdnloop.h"
18 18
19static char *revision = "$Revision: 1.11.6.7 $";
20static char *isdnloop_id = "loop0"; 19static char *isdnloop_id = "loop0";
21 20
22MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); 21MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
1494static int __init 1493static int __init
1495isdnloop_init(void) 1494isdnloop_init(void)
1496{ 1495{
1497 char *p;
1498 char rev[10];
1499
1500 if ((p = strchr(revision, ':'))) {
1501 strcpy(rev, p + 1);
1502 p = strchr(rev, '$');
1503 *p = 0;
1504 } else
1505 strcpy(rev, " ??? ");
1506 printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
1507
1508 if (isdnloop_id) 1496 if (isdnloop_id)
1509 return (isdnloop_addcard(isdnloop_id)); 1497 return (isdnloop_addcard(isdnloop_id));
1510 1498
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abca1407..949cabb88f1c 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2222 InitWin(l2); 2222 InitWin(l2);
2223 l2->l2m.fsm = &l2fsm; 2223 l2->l2m.fsm = &l2fsm;
2224 if (test_bit(FLG_LAPB, &l2->flag) || 2224 if (test_bit(FLG_LAPB, &l2->flag) ||
2225 test_bit(FLG_PTP, &l2->flag) || 2225 test_bit(FLG_FIXED_TEI, &l2->flag) ||
2226 test_bit(FLG_LAPD_NET, &l2->flag)) 2226 test_bit(FLG_LAPD_NET, &l2->flag))
2227 l2->l2m.state = ST_L2_4; 2227 l2->l2m.state = ST_L2_4;
2228 else 2228 else
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454f628b..c3bb304eca07 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
19#include <linux/mtd/map.h> 19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sections.h>
22 23
23/****************************************************************************/ 24/****************************************************************************/
24 25
25extern char _ebss;
26
27struct map_info uclinux_ram_map = { 26struct map_info uclinux_ram_map = {
28 .name = "RAM", 27 .name = "RAM",
29 .phys = (unsigned long)&_ebss, 28 .phys = (unsigned long)__bss_stop,
30 .size = 0, 29 .size = 0,
31}; 30};
32 31
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09ed9079..cff6f023c03a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@ static int __init cops_module_init(void)
996 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 996 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
997 cardname); 997 cardname);
998 cops_dev = cops_probe(-1); 998 cops_dev = cops_probe(-1);
999 if (IS_ERR(cops_dev)) 999 return PTR_RET(cops_dev);
1000 return PTR_ERR(cops_dev);
1001 return 0;
1002} 1000}
1003 1001
1004static void __exit cops_module_exit(void) 1002static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce3996d..b5782cdf0bca 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void)
1243 "ltpc: Autoprobing is not recommended for modules\n"); 1243 "ltpc: Autoprobing is not recommended for modules\n");
1244 1244
1245 dev_ltpc = ltpc_probe(); 1245 dev_ltpc = ltpc_probe();
1246 if (IS_ERR(dev_ltpc)) 1246 return PTR_RET(dev_ltpc);
1247 return PTR_ERR(dev_ltpc);
1248 return 0;
1249} 1247}
1250module_init(ltpc_module_init); 1248module_init(ltpc_module_init);
1251#endif 1249#endif
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd54ce29..021d69c5d9bc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
1712static void 1712static void
1713e100_netpoll(struct net_device* netdev) 1713e100_netpoll(struct net_device* netdev)
1714{ 1714{
1715 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); 1715 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
1716} 1716}
1717#endif 1717#endif
1718 1718
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87cd990..62f754bd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2485 break; 2485 break;
2486 2486
2487 default: 2487 default:
2488 kfree(new_cmd);
2488 BNX2X_ERR("Unknown command: %d\n", cmd); 2489 BNX2X_ERR("Unknown command: %d\n", cmd);
2489 return -EINVAL; 2490 return -EINVAL;
2490 } 2491 }
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c60de89b6669..90a903d83d87 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1948 1948
1949 if (adapter->num_rx_qs != MAX_RX_QS) 1949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev, 1950 dev_info(&adapter->pdev->dev,
1951 "Created only %d receive queues", adapter->num_rx_qs); 1951 "Created only %d receive queues\n", adapter->num_rx_qs);
1952 1952
1953 return 0; 1953 return 0;
1954} 1954}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c1..ba994fb4cec6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
254 */ 254 */
255 size += NVM_WORD_SIZE_BASE_SHIFT; 255 size += NVM_WORD_SIZE_BASE_SHIFT;
256 256
257 /*
258 * Check for invalid size
259 */
260 if ((hw->mac.type == e1000_82576) && (size > 15)) {
261 pr_notice("The NVM size is not valid, defaulting to 32K\n");
262 size = 15;
263 }
264
257 nvm->word_size = 1 << size; 265 nvm->word_size = 1 << size;
258 if (hw->mac.type < e1000_i210) { 266 if (hw->mac.type < e1000_i210) {
259 nvm->opcode_bits = 8; 267 nvm->opcode_bits = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
281 } else 289 } else
282 nvm->type = e1000_nvm_flash_hw; 290 nvm->type = e1000_nvm_flash_hw;
283 291
284 /*
285 * Check for invalid size
286 */
287 if ((hw->mac.type == e1000_82576) && (size > 15)) {
288 pr_notice("The NVM size is not valid, defaulting to 32K\n");
289 size = 15;
290 }
291
292 /* NVM Function Pointers */ 292 /* NVM Function Pointers */
293 switch (hw->mac.type) { 293 switch (hw->mac.type) {
294 case e1000_82580: 294 case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e9..90550f5e3dd9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
209 /* When SoL/IDER sessions are active, autoneg/speed/duplex 209 /* When SoL/IDER sessions are active, autoneg/speed/duplex
210 * cannot be changed */ 210 * cannot be changed */
211 if (igb_check_reset_block(hw)) { 211 if (igb_check_reset_block(hw)) {
212 dev_err(&adapter->pdev->dev, "Cannot change link " 212 dev_err(&adapter->pdev->dev,
213 "characteristics when SoL/IDER is active.\n"); 213 "Cannot change link characteristics when SoL/IDER is active.\n");
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 216
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1089 wr32(reg, (_test[pat] & write)); 1089 wr32(reg, (_test[pat] & write));
1090 val = rd32(reg) & mask; 1090 val = rd32(reg) & mask;
1091 if (val != (_test[pat] & write & mask)) { 1091 if (val != (_test[pat] & write & mask)) {
1092 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1092 dev_err(&adapter->pdev->dev,
1093 "failed: got 0x%08X expected 0x%08X\n", 1093 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1094 reg, val, (_test[pat] & write & mask)); 1094 reg, val, (_test[pat] & write & mask));
1095 *data = reg; 1095 *data = reg;
1096 return 1; 1096 return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1108 wr32(reg, write & mask); 1108 wr32(reg, write & mask);
1109 val = rd32(reg); 1109 val = rd32(reg);
1110 if ((write & mask) != (val & mask)) { 1110 if ((write & mask) != (val & mask)) {
1111 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 1111 dev_err(&adapter->pdev->dev,
1112 " got 0x%08X expected 0x%08X\n", reg, 1112 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
1113 (val & mask), (write & mask)); 1113 (val & mask), (write & mask));
1114 *data = reg; 1114 *data = reg;
1115 return 1; 1115 return 1;
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1171 wr32(E1000_STATUS, toggle); 1171 wr32(E1000_STATUS, toggle);
1172 after = rd32(E1000_STATUS) & toggle; 1172 after = rd32(E1000_STATUS) & toggle;
1173 if (value != after) { 1173 if (value != after) {
1174 dev_err(&adapter->pdev->dev, "failed STATUS register test " 1174 dev_err(&adapter->pdev->dev,
1175 "got: 0x%08X expected: 0x%08X\n", after, value); 1175 "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1176 after, value);
1176 *data = 1; 1177 *data = 1;
1177 return 1; 1178 return 1;
1178 } 1179 }
@@ -1777,16 +1778,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1777 * sessions are active */ 1778 * sessions are active */
1778 if (igb_check_reset_block(&adapter->hw)) { 1779 if (igb_check_reset_block(&adapter->hw)) {
1779 dev_err(&adapter->pdev->dev, 1780 dev_err(&adapter->pdev->dev,
1780 "Cannot do PHY loopback test " 1781 "Cannot do PHY loopback test when SoL/IDER is active.\n");
1781 "when SoL/IDER is active.\n");
1782 *data = 0; 1782 *data = 0;
1783 goto out; 1783 goto out;
1784 } 1784 }
1785 if ((adapter->hw.mac.type == e1000_i210) 1785 if ((adapter->hw.mac.type == e1000_i210)
1786 || (adapter->hw.mac.type == e1000_i210)) { 1786 || (adapter->hw.mac.type == e1000_i211)) {
1787 dev_err(&adapter->pdev->dev, 1787 dev_err(&adapter->pdev->dev,
1788 "Loopback test not supported " 1788 "Loopback test not supported on this part at this time.\n");
1789 "on this part at this time.\n");
1790 *data = 0; 1789 *data = 0;
1791 goto out; 1790 goto out;
1792 } 1791 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e70300770..5aba5ecdf1e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
614 /* If source MAC is equal to our own MAC and not performing 614 /* If source MAC is equal to our own MAC and not performing
615 * the selftest or flb disabled - drop the packet */ 615 * the selftest or flb disabled - drop the packet */
616 if (s_mac == priv->mac && 616 if (s_mac == priv->mac &&
617 (!(dev->features & NETIF_F_LOOPBACK) || 617 !((dev->features & NETIF_F_LOOPBACK) ||
618 !priv->validate_loopback)) 618 priv->validate_loopback))
619 goto next; 619 goto next;
620 620
621 /* 621 /*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856b1334..10bba09c44ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
164 ring->cons = 0xffffffff; 164 ring->cons = 0xffffffff;
165 ring->last_nr_txbb = 1; 165 ring->last_nr_txbb = 1;
166 ring->poll_cnt = 0; 166 ring->poll_cnt = 0;
167 ring->blocked = 0;
168 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); 167 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
169 memset(ring->buf, 0, ring->buf_size); 168 memset(ring->buf, 0, ring->buf_size);
170 169
@@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
365 ring->cons += txbbs_skipped; 364 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes); 365 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
367 366
368 /* Wakeup Tx queue if this ring stopped it */ 367 /*
369 if (unlikely(ring->blocked)) { 368 * Wakeup Tx queue if this stopped, and at least 1 packet
370 if ((u32) (ring->prod - ring->cons) <= 369 * was completed
371 ring->size - HEADROOM - MAX_DESC_TXBBS) { 370 */
372 ring->blocked = 0; 371 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
373 netif_tx_wake_queue(ring->tx_queue); 372 netif_tx_wake_queue(ring->tx_queue);
374 priv->port_stats.wake_queue++; 373 priv->port_stats.wake_queue++;
375 }
376 } 374 }
377} 375}
378 376
@@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
592 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 590 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
593 /* every full Tx ring stops queue */ 591 /* every full Tx ring stops queue */
594 netif_tx_stop_queue(ring->tx_queue); 592 netif_tx_stop_queue(ring->tx_queue);
595 ring->blocked = 1;
596 priv->port_stats.queue_stopped++; 593 priv->port_stats.queue_stopped++;
597 594
598 return NETDEV_TX_BUSY; 595 return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 48d0e90194cb..827b72dfce99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
157 "on this HCA, aborting.\n"); 157 "on this HCA, aborting.\n");
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
161 port_type[i + 1] == MLX4_PORT_TYPE_IB)
162 return -EINVAL;
163 } 160 }
164 } 161 }
165 162
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab105debc..9d27e42264e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@ struct mlx4_en_tx_ring {
248 u32 doorbell_qpn; 248 u32 doorbell_qpn;
249 void *buf; 249 void *buf;
250 u16 poll_cnt; 250 u16 poll_cnt;
251 int blocked;
252 struct mlx4_en_tx_info *tx_info; 251 struct mlx4_en_tx_info *tx_info;
253 u8 *bounce_buf; 252 u8 *bounce_buf;
254 u32 last_nr_txbb; 253 u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 802498293528..34ee09bae36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
81 } 81 }
82 82
83 /* 83 /*
84 * Adjust port configuration:
85 * If port 1 sensed nothing and port 2 is IB, set both as IB
86 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
87 */
88 if (stype[0] == MLX4_PORT_TYPE_ETH) {
89 for (i = 1; i < dev->caps.num_ports; i++)
90 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
91 }
92 if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
93 for (i = 0; i < dev->caps.num_ports - 1; i++)
94 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
95 }
96
97 /*
98 * If sensed nothing, remain in current configuration. 84 * If sensed nothing, remain in current configuration.
99 */ 85 */
100 for (i = 0; i < dev->caps.num_ports; i++) 86 for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
1503 goto fail2; 1503 goto fail2;
1504 } 1504 }
1505 1505
1506 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1507 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1508 rc = -EINVAL;
1509 goto fail3;
1510 }
1506 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1511 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1507 1512
1508 rc = efx_probe_filters(efx); 1513 rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2070 net_dev->irq = efx->pci_dev->irq; 2075 net_dev->irq = efx->pci_dev->irq;
2071 net_dev->netdev_ops = &efx_netdev_ops; 2076 net_dev->netdev_ops = &efx_netdev_ops;
2072 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2077 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2078 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2073 2079
2074 rtnl_lock(); 2080 rtnl_lock();
2075 2081
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); 32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
33 34
34/* RX */ 35/* RX */
35extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52#define EFX_MAX_EVQ_SIZE 16384UL 53#define EFX_MAX_EVQ_SIZE 16384UL
53#define EFX_MIN_EVQ_SIZE 512UL 54#define EFX_MIN_EVQ_SIZE 512UL
54 55
55/* The smallest [rt]xq_entries that the driver supports. Callers of 56/* Maximum number of TCP segments we support for soft-TSO */
56 * efx_wake_queue() assume that they can subsequently send at least one 57#define EFX_TSO_MAX_SEGS 100
57 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ 58
58#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) 59/* The smallest [rt]xq_entries that the driver supports. RX minimum
60 * is a bit arbitrary. For TX, we must have space for at least 2
61 * TSO skbs.
62 */
63#define EFX_RXQ_MIN_ENT 128U
64#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
59 65
60/* Filters */ 66/* Filters */
61extern int efx_probe_filters(struct efx_nic *efx); 67extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..8cba2df82b18 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
680 struct ethtool_ringparam *ring) 680 struct ethtool_ringparam *ring)
681{ 681{
682 struct efx_nic *efx = netdev_priv(net_dev); 682 struct efx_nic *efx = netdev_priv(net_dev);
683 u32 txq_entries;
683 684
684 if (ring->rx_mini_pending || ring->rx_jumbo_pending || 685 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
685 ring->rx_pending > EFX_MAX_DMAQ_SIZE || 686 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
686 ring->tx_pending > EFX_MAX_DMAQ_SIZE) 687 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
687 return -EINVAL; 688 return -EINVAL;
688 689
689 if (ring->rx_pending < EFX_MIN_RING_SIZE || 690 if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
690 ring->tx_pending < EFX_MIN_RING_SIZE) {
691 netif_err(efx, drv, efx->net_dev, 691 netif_err(efx, drv, efx->net_dev,
692 "TX and RX queues cannot be smaller than %ld\n", 692 "RX queues cannot be smaller than %u\n",
693 EFX_MIN_RING_SIZE); 693 EFX_RXQ_MIN_ENT);
694 return -EINVAL; 694 return -EINVAL;
695 } 695 }
696 696
697 return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); 697 txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
698 if (txq_entries != ring->tx_pending)
699 netif_warn(efx, drv, efx->net_dev,
700 "increasing TX queue size to minimum of %u\n",
701 txq_entries);
702
703 return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
698} 704}
699 705
700static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 706static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
119 return len; 119 return len;
120} 120}
121 121
122unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
123{
124 /* Header and payload descriptor for each output segment, plus
125 * one for every input fragment boundary within a segment
126 */
127 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
128
129 /* Possibly one more per segment for the alignment workaround */
130 if (EFX_WORKAROUND_5391(efx))
131 max_descs += EFX_TSO_MAX_SEGS;
132
133 /* Possibly more for PCIe page boundaries within input fragments */
134 if (PAGE_SIZE > EFX_PAGE_SIZE)
135 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
136 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
137
138 return max_descs;
139}
140
122/* 141/*
123 * Add a socket buffer to a TX queue 142 * Add a socket buffer to a TX queue
124 * 143 *
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648fcf0b6..98934bdf6acf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev)
1003} 1003}
1004 1004
1005int ixp46x_phc_index = -1; 1005int ixp46x_phc_index = -1;
1006EXPORT_SYMBOL_GPL(ixp46x_phc_index);
1006 1007
1007static int ixp4xx_get_ts_info(struct net_device *dev, 1008static int ixp4xx_get_ts_info(struct net_device *dev,
1008 struct ethtool_ts_info *info) 1009 struct ethtool_ts_info *info)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee2917eb02..4a1a5f58fa73 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device)
383 unsigned long flags; 383 unsigned long flags;
384 384
385 net_device = hv_get_drvdata(device); 385 net_device = hv_get_drvdata(device);
386 spin_lock_irqsave(&device->channel->inbound_lock, flags);
387 net_device->destroy = true;
388 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
389
390 /* Wait for all send completions */
391 wait_event(net_device->wait_drain,
392 atomic_read(&net_device->num_outstanding_sends) == 0);
393 386
394 netvsc_disconnect_vsp(net_device); 387 netvsc_disconnect_vsp(net_device);
395 388
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e5d6146937fa..1e88a1095934 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
718{ 718{
719 struct rndis_request *request; 719 struct rndis_request *request;
720 struct rndis_halt_request *halt; 720 struct rndis_halt_request *halt;
721 struct netvsc_device *nvdev = dev->net_dev;
722 struct hv_device *hdev = nvdev->dev;
723 ulong flags;
721 724
722 /* Attempt to do a rndis device halt */ 725 /* Attempt to do a rndis device halt */
723 request = get_rndis_request(dev, RNDIS_MSG_HALT, 726 request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
735 dev->state = RNDIS_DEV_UNINITIALIZED; 738 dev->state = RNDIS_DEV_UNINITIALIZED;
736 739
737cleanup: 740cleanup:
741 spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
742 nvdev->destroy = true;
743 spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
744
745 /* Wait for all send completions */
746 wait_event(nvdev->wait_drain,
747 atomic_read(&nvdev->num_outstanding_sends) == 0);
748
738 if (request) 749 if (request)
739 put_rndis_request(dev, request); 750 put_rndis_request(dev, request);
740 return; 751 return;
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef33dee..eefe49e8713c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@ err:
101 n--; 101 n--;
102 gpio_free(s->gpio[n]); 102 gpio_free(s->gpio[n]);
103 } 103 }
104 devm_kfree(&pdev->dev, s);
105 return r; 104 return r;
106} 105}
107 106
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f4ce5957df32..4cd582a4f625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = {
1225 .driver_info = (unsigned long) &wwan_info, 1225 .driver_info = (unsigned long) &wwan_info,
1226 }, 1226 },
1227 1227
1228 /* Dell branded MBM devices like DW5550 */
1229 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
1230 | USB_DEVICE_ID_MATCH_VENDOR,
1231 .idVendor = 0x413c,
1232 .bInterfaceClass = USB_CLASS_COMM,
1233 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
1234 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
1235 .driver_info = (unsigned long) &wwan_info,
1236 },
1237
1238 /* Toshiba branded MBM devices */
1239 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
1240 | USB_DEVICE_ID_MATCH_VENDOR,
1241 .idVendor = 0x0930,
1242 .bInterfaceClass = USB_CLASS_COMM,
1243 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
1244 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
1245 .driver_info = (unsigned long) &wwan_info,
1246 },
1247
1228 /* Generic CDC-NCM devices */ 1248 /* Generic CDC-NCM devices */
1229 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1249 { USB_INTERFACE_INFO(USB_CLASS_COMM,
1230 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1250 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab7acf8..60b6a9daff7e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah)
730 case AR9300_DEVID_QCA955X: 730 case AR9300_DEVID_QCA955X:
731 case AR9300_DEVID_AR9580: 731 case AR9300_DEVID_AR9580:
732 case AR9300_DEVID_AR9462: 732 case AR9300_DEVID_AR9462:
733 case AR9485_DEVID_AR1111:
733 break; 734 break;
734 default: 735 default:
735 if (common->bus_ops->ath_bus_type == ATH_USB) 736 if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146d81dc..ce7332c64efb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
49#define AR9300_DEVID_AR9462 0x0034 49#define AR9300_DEVID_AR9462 0x0034
50#define AR9300_DEVID_AR9330 0x0035 50#define AR9300_DEVID_AR9330 0x0035
51#define AR9300_DEVID_QCA955X 0x0038 51#define AR9300_DEVID_QCA955X 0x0038
52#define AR9485_DEVID_AR1111 0x0037
52 53
53#define AR5416_AR9100_DEVID 0x000b 54#define AR5416_AR9100_DEVID 0x000b
54 55
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d55e637..d455de9162ec 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
37 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ 37 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
40 { 0 } 41 { 0 }
41}; 42};
42 43
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b308d5..a140165dfee0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev)
2719 if (dev->dev->chip_id == 0x4301) { 2719 if (dev->dev->chip_id == 0x4301) {
2720 mask |= 0x0060; 2720 mask |= 0x0060;
2721 set |= 0x0060; 2721 set |= 0x0060;
2722 } else if (dev->dev->chip_id == 0x5354) {
2723 /* Don't allow overtaking buttons GPIOs */
2724 set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
2722 } 2725 }
2723 if (dev->dev->chip_id == 0x5354) 2726
2724 set &= 0xff02;
2725 if (0 /* FIXME: conditional unknown */ ) { 2727 if (0 /* FIXME: conditional unknown */ ) {
2726 b43_write16(dev, B43_MMIO_GPIO_MASK, 2728 b43_write16(dev, B43_MMIO_GPIO_MASK,
2727 b43_read16(dev, B43_MMIO_GPIO_MASK) 2729 b43_read16(dev, B43_MMIO_GPIO_MASK)
2728 | 0x0100); 2730 | 0x0100);
2729 mask |= 0x0180; 2731 /* BT Coexistance Input */
2730 set |= 0x0180; 2732 mask |= 0x0080;
2733 set |= 0x0080;
2734 /* BT Coexistance Out */
2735 mask |= 0x0100;
2736 set |= 0x0100;
2731 } 2737 }
2732 if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { 2738 if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
2739 /* PA is controlled by gpio 9, let ucode handle it */
2733 b43_write16(dev, B43_MMIO_GPIO_MASK, 2740 b43_write16(dev, B43_MMIO_GPIO_MASK,
2734 b43_read16(dev, B43_MMIO_GPIO_MASK) 2741 b43_read16(dev, B43_MMIO_GPIO_MASK)
2735 | 0x0200); 2742 | 0x0200);
2736 mask |= 0x0200; 2743 mask |= 0x0200;
2737 set |= 0x0200; 2744 set |= 0x0200;
2738 } 2745 }
2739 if (dev->dev->core_rev >= 2)
2740 mask |= 0x0010; /* FIXME: This is redundant. */
2741 2746
2742 switch (dev->dev->bus_type) { 2747 switch (dev->dev->bus_type) {
2743#ifdef CONFIG_B43_BCMA 2748#ifdef CONFIG_B43_BCMA
2744 case B43_BUS_BCMA: 2749 case B43_BUS_BCMA:
2745 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, 2750 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
2746 (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, 2751 (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
2747 BCMA_CC_GPIOCTL) & mask) | set); 2752 BCMA_CC_GPIOCTL) & ~mask) | set);
2748 break; 2753 break;
2749#endif 2754#endif
2750#ifdef CONFIG_B43_SSB 2755#ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
2753 if (gpiodev) 2758 if (gpiodev)
2754 ssb_write32(gpiodev, B43_GPIO_CONTROL, 2759 ssb_write32(gpiodev, B43_GPIO_CONTROL,
2755 (ssb_read32(gpiodev, B43_GPIO_CONTROL) 2760 (ssb_read32(gpiodev, B43_GPIO_CONTROL)
2756 & mask) | set); 2761 & ~mask) | set);
2757 break; 2762 break;
2758#endif 2763#endif
2759 } 2764 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f927cb..7ed7d7577024 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
382{ 382{
383 struct brcms_c_info *wlc = wlc_cm->wlc; 383 struct brcms_c_info *wlc = wlc_cm->wlc;
384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
385 const struct ieee80211_reg_rule *reg_rule;
386 struct txpwr_limits txpwr; 385 struct txpwr_limits txpwr;
387 int ret;
388 386
389 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); 387 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
390 388
@@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
393 ); 391 );
394 392
395 /* set or restore gmode as required by regulatory */ 393 /* set or restore gmode as required by regulatory */
396 ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule); 394 if (ch->flags & IEEE80211_CHAN_NO_OFDM)
397 if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
398 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); 395 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
399 else 396 else
400 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); 397 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47e077f..192ad5c1fcc8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
121 IEEE80211_CHAN_NO_HT40PLUS), 121 IEEE80211_CHAN_NO_HT40PLUS),
122 CHAN2GHZ(14, 2484, 122 CHAN2GHZ(14, 2484,
123 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | 123 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
124 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) 124 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
125 IEEE80211_CHAN_NO_OFDM)
125}; 126};
126 127
127static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { 128static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de800ed90..1c10b542ab23 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv,
1254 netif_tx_wake_all_queues(priv->dev); 1254 netif_tx_wake_all_queues(priv->dev);
1255 } 1255 }
1256 1256
1257 kfree(cmd);
1257done: 1258done:
1258 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1259 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1259 return ret; 1260 return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caebaa4397..e970897f6ab5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func)
1314 kfree(packet); 1314 kfree(packet);
1315 } 1315 }
1316 1316
1317 kfree(card);
1317 lbs_deb_leave(LBS_DEB_SDIO); 1318 lbs_deb_leave(LBS_DEB_SDIO);
1318} 1319}
1319 1320
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 58048189bd24..fe1ea43c5149 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@ static int lbs_thread(void *data)
571 netdev_info(dev, "Timeout submitting command 0x%04x\n", 571 netdev_info(dev, "Timeout submitting command 0x%04x\n",
572 le16_to_cpu(cmdnode->cmdbuf->command)); 572 le16_to_cpu(cmdnode->cmdbuf->command));
573 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 573 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
574 if (priv->reset_card) 574
575 /* Reset card, but only when it isn't in the process
576 * of being shutdown anyway. */
577 if (!dev->dismantle && priv->reset_card)
575 priv->reset_card(priv); 578 priv->reset_card(priv);
576 } 579 }
577 priv->cmd_timed_out = 0; 580 priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1b9fe0..cb8c2aca54e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
221 mutex_unlock(&rt2x00dev->csr_mutex); 221 mutex_unlock(&rt2x00dev->csr_mutex);
222} 222}
223 223
224static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
225{
226 u32 reg;
227 int i, count;
228
229 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
230 if (rt2x00_get_field32(reg, WLAN_EN))
231 return 0;
232
233 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
234 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
235 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
236 rt2x00_set_field32(&reg, WLAN_EN, 1);
237 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
238
239 udelay(REGISTER_BUSY_DELAY);
240
241 count = 0;
242 do {
243 /*
244 * Check PLL_LD & XTAL_RDY.
245 */
246 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
247 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
248 if (rt2x00_get_field32(reg, PLL_LD) &&
249 rt2x00_get_field32(reg, XTAL_RDY))
250 break;
251 udelay(REGISTER_BUSY_DELAY);
252 }
253
254 if (i >= REGISTER_BUSY_COUNT) {
255
256 if (count >= 10)
257 return -EIO;
258
259 rt2800_register_write(rt2x00dev, 0x58, 0x018);
260 udelay(REGISTER_BUSY_DELAY);
261 rt2800_register_write(rt2x00dev, 0x58, 0x418);
262 udelay(REGISTER_BUSY_DELAY);
263 rt2800_register_write(rt2x00dev, 0x58, 0x618);
264 udelay(REGISTER_BUSY_DELAY);
265 count++;
266 } else {
267 count = 0;
268 }
269
270 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
271 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
272 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
273 rt2x00_set_field32(&reg, WLAN_RESET, 1);
274 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
275 udelay(10);
276 rt2x00_set_field32(&reg, WLAN_RESET, 0);
277 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
278 udelay(10);
279 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
280 } while (count != 0);
281
282 return 0;
283}
284
224void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, 285void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
225 const u8 command, const u8 token, 286 const u8 command, const u8 token,
226 const u8 arg0, const u8 arg1) 287 const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
400{ 461{
401 unsigned int i; 462 unsigned int i;
402 u32 reg; 463 u32 reg;
464 int retval;
465
466 if (rt2x00_rt(rt2x00dev, RT3290)) {
467 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
468 if (retval)
469 return -EBUSY;
470 }
403 471
404 /* 472 /*
405 * If driver doesn't wake up firmware here, 473 * If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e9cb04..98aa426a3564 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
980 return rt2800_validate_eeprom(rt2x00dev); 980 return rt2800_validate_eeprom(rt2x00dev);
981} 981}
982 982
983static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
984{
985 u32 reg;
986 int i, count;
987
988 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
989 if (rt2x00_get_field32(reg, WLAN_EN))
990 return 0;
991
992 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
993 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
994 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
995 rt2x00_set_field32(&reg, WLAN_EN, 1);
996 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
997
998 udelay(REGISTER_BUSY_DELAY);
999
1000 count = 0;
1001 do {
1002 /*
1003 * Check PLL_LD & XTAL_RDY.
1004 */
1005 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1006 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
1007 if (rt2x00_get_field32(reg, PLL_LD) &&
1008 rt2x00_get_field32(reg, XTAL_RDY))
1009 break;
1010 udelay(REGISTER_BUSY_DELAY);
1011 }
1012
1013 if (i >= REGISTER_BUSY_COUNT) {
1014
1015 if (count >= 10)
1016 return -EIO;
1017
1018 rt2800_register_write(rt2x00dev, 0x58, 0x018);
1019 udelay(REGISTER_BUSY_DELAY);
1020 rt2800_register_write(rt2x00dev, 0x58, 0x418);
1021 udelay(REGISTER_BUSY_DELAY);
1022 rt2800_register_write(rt2x00dev, 0x58, 0x618);
1023 udelay(REGISTER_BUSY_DELAY);
1024 count++;
1025 } else {
1026 count = 0;
1027 }
1028
1029 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
1030 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
1031 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
1032 rt2x00_set_field32(&reg, WLAN_RESET, 1);
1033 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1034 udelay(10);
1035 rt2x00_set_field32(&reg, WLAN_RESET, 0);
1036 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1037 udelay(10);
1038 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
1039 } while (count != 0);
1040
1041 return 0;
1042}
1043static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) 983static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1044{ 984{
1045 int retval; 985 int retval;
@@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1063 return retval; 1003 return retval;
1064 1004
1065 /* 1005 /*
1066 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
1067 * clk for rt3290. That avoid the MCU fail in start phase.
1068 */
1069 if (rt2x00_rt(rt2x00dev, RT3290)) {
1070 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
1071
1072 if (retval)
1073 return retval;
1074 }
1075
1076 /*
1077 * This device has multiple filters for control frames 1006 * This device has multiple filters for control frames
1078 * and has a separate filter for PS Poll frames. 1007 * and has a separate filter for PS Poll frames.
1079 */ 1008 */
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff94296..3674d877ed7c 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
292{ 292{
293 return platform_driver_register(&imx23_pinctrl_driver); 293 return platform_driver_register(&imx23_pinctrl_driver);
294} 294}
295arch_initcall(imx23_pinctrl_init); 295postcore_initcall(imx23_pinctrl_init);
296 296
297static void __exit imx23_pinctrl_exit(void) 297static void __exit imx23_pinctrl_exit(void)
298{ 298{
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026811a2..0f5b2122b1ba 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
408{ 408{
409 return platform_driver_register(&imx28_pinctrl_driver); 409 return platform_driver_register(&imx28_pinctrl_driver);
410} 410}
411arch_initcall(imx28_pinctrl_init); 411postcore_initcall(imx28_pinctrl_init);
412 412
413static void __exit imx28_pinctrl_exit(void) 413static void __exit imx28_pinctrl_exit(void)
414{ 414{
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769c6733..5f3e9d0221e1 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
766DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); 766DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
767DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); 767DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
768DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); 768DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
769DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); 769DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
770DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); 770DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
771DB8500_FUNC_GROUPS(usb, "usb_a_1"); 771DB8500_FUNC_GROUPS(usb, "usb_a_1");
772DB8500_FUNC_GROUPS(trig, "trig_b_1"); 772DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49a7a1c..ec6ac501b23a 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
1731 for (i = 0; i < npct->soc->gpio_num_ranges; i++) { 1731 for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
1732 if (!nmk_gpio_chips[i]) { 1732 if (!nmk_gpio_chips[i]) {
1733 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); 1733 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
1734 devm_kfree(&pdev->dev, npct);
1735 return -EPROBE_DEFER; 1734 return -EPROBE_DEFER;
1736 } 1735 }
1737 npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; 1736 npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8978e9..7fca6ce5952b 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@ out_no_rsc_remap:
1217 iounmap(spmx->gpio_virtbase); 1217 iounmap(spmx->gpio_virtbase);
1218out_no_gpio_remap: 1218out_no_gpio_remap:
1219 platform_set_drvdata(pdev, NULL); 1219 platform_set_drvdata(pdev, NULL);
1220 devm_kfree(&pdev->dev, spmx);
1221 return ret; 1220 return ret;
1222} 1221}
1223 1222
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c112d91..309f5b9a70ec 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
1121 upmx->dev = &pdev->dev; 1121 upmx->dev = &pdev->dev;
1122 1122
1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124 if (!res) { 1124 if (!res)
1125 ret = -ENOENT; 1125 return -ENOENT;
1126 goto out_no_resource;
1127 }
1128 upmx->phybase = res->start; 1126 upmx->phybase = res->start;
1129 upmx->physize = resource_size(res); 1127 upmx->physize = resource_size(res);
1130 1128
@@ -1165,8 +1163,6 @@ out_no_remap:
1165 platform_set_drvdata(pdev, NULL); 1163 platform_set_drvdata(pdev, NULL);
1166out_no_memregion: 1164out_no_memregion:
1167 release_mem_region(upmx->phybase, upmx->physize); 1165 release_mem_region(upmx->phybase, upmx->physize);
1168out_no_resource:
1169 devm_kfree(&pdev->dev, upmx);
1170 return ret; 1166 return ret;
1171} 1167}
1172 1168
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebacf8227..89dcf155d57e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
450 writel(FLAG_CF, &ehci_regs->configured_flag); 450 writel(FLAG_CF, &ehci_regs->configured_flag);
451 451
452 /* Wait until the controller is no longer halted */ 452 /* Wait until the controller is no longer halted */
453 loop = 10; 453 loop = 1000;
454 do { 454 do {
455 status = readl(&ehci_regs->status); 455 status = readl(&ehci_regs->status);
456 if (!(status & STS_HALT)) 456 if (!(status & STS_HALT))
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa8158a8b..858c9714b2f3 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
37 */ 37 */
38 38
39struct zorro_bus { 39struct zorro_bus {
40 struct list_head devices; /* list of devices on this bus */
41 struct device dev; 40 struct device dev;
42}; 41};
43 42
@@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
136 if (!bus) 135 if (!bus)
137 return -ENOMEM; 136 return -ENOMEM;
138 137
139 INIT_LIST_HEAD(&bus->devices);
140 bus->dev.parent = &pdev->dev; 138 bus->dev.parent = &pdev->dev;
141 dev_set_name(&bus->dev, "zorro"); 139 dev_set_name(&bus->dev, "zorro");
142 error = device_register(&bus->dev); 140 error = device_register(&bus->dev);
diff --git a/fs/bio.c b/fs/bio.c
index 73922abba832..5eaa70c9d96e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1312,7 +1312,7 @@ EXPORT_SYMBOL(bio_copy_kern);
1312 * Note that this code is very hard to test under normal circumstances because 1312 * Note that this code is very hard to test under normal circumstances because
1313 * direct-io pins the pages with get_user_pages(). This makes 1313 * direct-io pins the pages with get_user_pages(). This makes
1314 * is_page_cache_freeable return false, and the VM will not clean the pages. 1314 * is_page_cache_freeable return false, and the VM will not clean the pages.
1315 * But other code (eg, pdflush) could clean the pages if they are mapped 1315 * But other code (eg, flusher threads) could clean the pages if they are mapped
1316 * pagecache. 1316 * pagecache.
1317 * 1317 *
1318 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1318 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 83baec24946d..6e8f416773d4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow,
324 * If this code finds it can't get good compression, it puts an 324 * If this code finds it can't get good compression, it puts an
325 * entry onto the work queue to write the uncompressed bytes. This 325 * entry onto the work queue to write the uncompressed bytes. This
326 * makes sure that both compressed inodes and uncompressed inodes 326 * makes sure that both compressed inodes and uncompressed inodes
327 * are written in the same order that pdflush sent them down. 327 * are written in the same order that the flusher thread sent them
328 * down.
328 */ 329 */
329static noinline int compress_file_range(struct inode *inode, 330static noinline int compress_file_range(struct inode *inode,
330 struct page *locked_page, 331 struct page *locked_page,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 643335a4fe3c..051c7fe551dd 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
596 /* 596 /*
597 * pages in the range can be dirty, clean or writeback. We 597 * pages in the range can be dirty, clean or writeback. We
598 * start IO on any dirty ones so the wait doesn't stall waiting 598 * start IO on any dirty ones so the wait doesn't stall waiting
599 * for pdflush to find them 599 * for the flusher thread to find them
600 */ 600 */
601 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) 601 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
602 filemap_fdatawrite_range(inode->i_mapping, start, end); 602 filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8c6e61d6eed5..f2eb24c477a3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
100 fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; 100 fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
101} 101}
102 102
103/* NOTE:
104 * We move write_super stuff at umount in order to avoid deadlock
105 * for umount hold all lock.
106 */
107static void save_error_info(struct btrfs_fs_info *fs_info) 103static void save_error_info(struct btrfs_fs_info *fs_info)
108{ 104{
109 __save_error_info(fs_info); 105 __save_error_info(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b8708f994e67..e86ae04abe6a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1744,10 +1744,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1744 1744
1745 device->fs_devices = root->fs_info->fs_devices; 1745 device->fs_devices = root->fs_info->fs_devices;
1746 1746
1747 /*
1748 * we don't want write_supers to jump in here with our device
1749 * half setup
1750 */
1751 mutex_lock(&root->fs_info->fs_devices->device_list_mutex); 1747 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1752 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); 1748 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1753 list_add(&device->dev_alloc_list, 1749 list_add(&device->dev_alloc_list,
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 5badb0c039de..1562c27a2fab 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,15 +37,12 @@
37 37
38#define EXOFS_DBGMSG2(M...) do {} while (0) 38#define EXOFS_DBGMSG2(M...) do {} while (0)
39 39
40enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
41
42unsigned exofs_max_io_pages(struct ore_layout *layout, 40unsigned exofs_max_io_pages(struct ore_layout *layout,
43 unsigned expected_pages) 41 unsigned expected_pages)
44{ 42{
45 unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC); 43 unsigned pages = min_t(unsigned, expected_pages,
44 layout->max_io_length / PAGE_SIZE);
46 45
47 /* TODO: easily support bio chaining */
48 pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
49 return pages; 46 return pages;
50} 47}
51 48
@@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol)
101 * it might not end here. don't be left with nothing 98 * it might not end here. don't be left with nothing
102 */ 99 */
103 if (!pcol->expected_pages) 100 if (!pcol->expected_pages)
104 pcol->expected_pages = MAX_PAGES_KMALLOC; 101 pcol->expected_pages =
102 exofs_max_io_pages(&pcol->sbi->layout, ~0);
105} 103}
106 104
107static int pcol_try_alloc(struct page_collect *pcol) 105static int pcol_try_alloc(struct page_collect *pcol)
@@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page)
389 size_t len; 387 size_t len;
390 int ret; 388 int ret;
391 389
390 BUG_ON(!PageLocked(page));
391
392 /* FIXME: Just for debugging, will be removed */ 392 /* FIXME: Just for debugging, will be removed */
393 if (PageUptodate(page)) 393 if (PageUptodate(page))
394 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino, 394 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
572 572
573 if (!pcol->that_locked_page || 573 if (!pcol->that_locked_page ||
574 (pcol->that_locked_page->index != index)) { 574 (pcol->that_locked_page->index != index)) {
575 struct page *page = find_get_page(pcol->inode->i_mapping, index); 575 struct page *page;
576 loff_t i_size = i_size_read(pcol->inode);
577
578 if (offset >= i_size) {
579 *uptodate = true;
580 EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
581 return ZERO_PAGE(0);
582 }
576 583
584 page = find_get_page(pcol->inode->i_mapping, index);
577 if (!page) { 585 if (!page) {
578 page = find_or_create_page(pcol->inode->i_mapping, 586 page = find_or_create_page(pcol->inode->i_mapping,
579 index, GFP_NOFS); 587 index, GFP_NOFS);
@@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page)
602{ 610{
603 struct page_collect *pcol = priv; 611 struct page_collect *pcol = priv;
604 612
605 if (pcol->that_locked_page != page) { 613 if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
606 EXOFS_DBGMSG("index=0x%lx\n", page->index); 614 EXOFS_DBGMSG("index=0x%lx\n", page->index);
607 page_cache_release(page); 615 page_cache_release(page);
608 return; 616 return;
609 } 617 }
610 EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index); 618 EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
619 ZERO_PAGE(0) == page ? -1 : page->index);
611} 620}
612 621
613static const struct _ore_r4w_op _r4w_op = { 622static const struct _ore_r4w_op _r4w_op = {
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d47e935..1585db1aa365 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
837 bio->bi_rw |= REQ_WRITE; 837 bio->bi_rw |= REQ_WRITE;
838 } 838 }
839 839
840 osd_req_write(or, _ios_obj(ios, dev), per_dev->offset, 840 osd_req_write(or, _ios_obj(ios, cur_comp),
841 bio, per_dev->length); 841 per_dev->offset, bio, per_dev->length);
842 ORE_DBGMSG("write(0x%llx) offset=0x%llx " 842 ORE_DBGMSG("write(0x%llx) offset=0x%llx "
843 "length=0x%llx dev=%d\n", 843 "length=0x%llx dev=%d\n",
844 _LLU(_ios_obj(ios, dev)->id), 844 _LLU(_ios_obj(ios, cur_comp)->id),
845 _LLU(per_dev->offset), 845 _LLU(per_dev->offset),
846 _LLU(per_dev->length), dev); 846 _LLU(per_dev->length), dev);
847 } else if (ios->kern_buff) { 847 } else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
853 (ios->si.unit_off + ios->length > 853 (ios->si.unit_off + ios->length >
854 ios->layout->stripe_unit)); 854 ios->layout->stripe_unit));
855 855
856 ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev), 856 ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
857 per_dev->offset, 857 per_dev->offset,
858 ios->kern_buff, ios->length); 858 ios->kern_buff, ios->length);
859 if (unlikely(ret)) 859 if (unlikely(ret))
860 goto out; 860 goto out;
861 ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx " 861 ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
862 "length=0x%llx dev=%d\n", 862 "length=0x%llx dev=%d\n",
863 _LLU(_ios_obj(ios, dev)->id), 863 _LLU(_ios_obj(ios, cur_comp)->id),
864 _LLU(per_dev->offset), 864 _LLU(per_dev->offset),
865 _LLU(ios->length), per_dev->dev); 865 _LLU(ios->length), per_dev->dev);
866 } else { 866 } else {
867 osd_req_set_attributes(or, _ios_obj(ios, dev)); 867 osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
868 ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", 868 ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
869 _LLU(_ios_obj(ios, dev)->id), 869 _LLU(_ios_obj(ios, cur_comp)->id),
870 ios->out_attr_len, dev); 870 ios->out_attr_len, dev);
871 } 871 }
872 872
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 433783624d10..dde41a75c7c8 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
400 ret = ore_write(ios); 400 ret = ore_write(ios);
401 if (unlikely(ret)) 401 if (unlikely(ret))
402 EXOFS_ERR("%s: ore_write failed.\n", __func__); 402 EXOFS_ERR("%s: ore_write failed.\n", __func__);
403 else
404 sb->s_dirt = 0;
405 403
406 404
407 unlock_super(sb); 405 unlock_super(sb);
@@ -412,14 +410,6 @@ out:
412 return ret; 410 return ret;
413} 411}
414 412
415static void exofs_write_super(struct super_block *sb)
416{
417 if (!(sb->s_flags & MS_RDONLY))
418 exofs_sync_fs(sb, 1);
419 else
420 sb->s_dirt = 0;
421}
422
423static void _exofs_print_device(const char *msg, const char *dev_path, 413static void _exofs_print_device(const char *msg, const char *dev_path,
424 struct osd_dev *od, u64 pid) 414 struct osd_dev *od, u64 pid)
425{ 415{
@@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = {
952 .write_inode = exofs_write_inode, 942 .write_inode = exofs_write_inode,
953 .evict_inode = exofs_evict_inode, 943 .evict_inode = exofs_evict_inode,
954 .put_super = exofs_put_super, 944 .put_super = exofs_put_super,
955 .write_super = exofs_write_super,
956 .sync_fs = exofs_sync_fs, 945 .sync_fs = exofs_sync_fs,
957 .statfs = exofs_statfs, 946 .statfs = exofs_statfs,
958}; 947};
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9a4a5c48b1c9..a07597307fd1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3459,14 +3459,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3459 * inode out, but prune_icache isn't a user-visible syncing function. 3459 * inode out, but prune_icache isn't a user-visible syncing function.
3460 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 3460 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3461 * we start and wait on commits. 3461 * we start and wait on commits.
3462 *
3463 * Is this efficient/effective? Well, we're being nice to the system
3464 * by cleaning up our inodes proactively so they can be reaped
3465 * without I/O. But we are potentially leaving up to five seconds'
3466 * worth of inodes floating about which prune_icache wants us to
3467 * write out. One way to fix that would be to get prune_icache()
3468 * to do a write_super() to free up some memory. It has the desired
3469 * effect.
3470 */ 3462 */
3471int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) 3463int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3472{ 3464{
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ff9bcdc5b0d5..8c892e93d8e7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb);
64 64
65/* 65/*
66 * Wrappers for journal_start/end. 66 * Wrappers for journal_start/end.
67 *
68 * The only special thing we need to do here is to make sure that all
69 * journal_end calls result in the superblock being marked dirty, so
70 * that sync() will call the filesystem's write_super callback if
71 * appropriate.
72 */ 67 */
73handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) 68handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
74{ 69{
@@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
90 return journal_start(journal, nblocks); 85 return journal_start(journal, nblocks);
91} 86}
92 87
93/*
94 * The only special thing we need to do here is to make sure that all
95 * journal_stop calls result in the superblock being marked dirty, so
96 * that sync() will call the filesystem's write_super callback if
97 * appropriate.
98 */
99int __ext3_journal_stop(const char *where, handle_t *handle) 88int __ext3_journal_stop(const char *where, handle_t *handle)
100{ 89{
101 struct super_block *sb; 90 struct super_block *sb;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6324f74e0342..dff171c3a123 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1970,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1970 * This function can get called via... 1970 * This function can get called via...
1971 * - ext4_da_writepages after taking page lock (have journal handle) 1971 * - ext4_da_writepages after taking page lock (have journal handle)
1972 * - journal_submit_inode_data_buffers (no journal handle) 1972 * - journal_submit_inode_data_buffers (no journal handle)
1973 * - shrink_page_list via pdflush (no journal handle) 1973 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1974 * - grab_page_cache when doing write_begin (have journal handle) 1974 * - grab_page_cache when doing write_begin (have journal handle)
1975 * 1975 *
1976 * We don't do any block allocation in this function. If we have page with 1976 * We don't do any block allocation in this function. If we have page with
@@ -4589,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
4589 * inode out, but prune_icache isn't a user-visible syncing function. 4589 * inode out, but prune_icache isn't a user-visible syncing function.
4590 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4590 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4591 * we start and wait on commits. 4591 * we start and wait on commits.
4592 *
4593 * Is this efficient/effective? Well, we're being nice to the system
4594 * by cleaning up our inodes proactively so they can be reaped
4595 * without I/O. But we are potentially leaving up to five seconds'
4596 * worth of inodes floating about which prune_icache wants us to
4597 * write out. One way to fix that would be to get prune_icache()
4598 * to do a write_super() to free up some memory. It has the desired
4599 * effect.
4600 */ 4592 */
4601int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4593int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4602{ 4594{
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d76ec8277d3f..3e0851e4f468 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -326,11 +326,6 @@ static void ext4_put_nojournal(handle_t *handle)
326 326
327/* 327/*
328 * Wrappers for jbd2_journal_start/end. 328 * Wrappers for jbd2_journal_start/end.
329 *
330 * The only special thing we need to do here is to make sure that all
331 * journal_end calls result in the superblock being marked dirty, so
332 * that sync() will call the filesystem's write_super callback if
333 * appropriate.
334 */ 329 */
335handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) 330handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
336{ 331{
@@ -356,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
356 return jbd2_journal_start(journal, nblocks); 351 return jbd2_journal_start(journal, nblocks);
357} 352}
358 353
359/*
360 * The only special thing we need to do here is to make sure that all
361 * jbd2_journal_stop calls result in the superblock being marked dirty, so
362 * that sync() will call the filesystem's write_super callback if
363 * appropriate.
364 */
365int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) 354int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
366{ 355{
367 struct super_block *sb; 356 struct super_block *sb;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3a56c8d94de0..22255d96b27e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
52 /* 52 /*
53 * If it's a fully non-blocking write attempt and we cannot 53 * If it's a fully non-blocking write attempt and we cannot
54 * lock the buffer then redirty the page. Note that this can 54 * lock the buffer then redirty the page. Note that this can
55 * potentially cause a busy-wait loop from pdflush and kswapd 55 * potentially cause a busy-wait loop from flusher thread and kswapd
56 * activity, but those code paths have their own higher-level 56 * activity, but those code paths have their own higher-level
57 * throttling. 57 * throttling.
58 */ 58 */
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5fd51a5833ff..b7ec224910c5 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -236,10 +236,10 @@ out:
236 * hfs_mdb_commit() 236 * hfs_mdb_commit()
237 * 237 *
238 * Description: 238 * Description:
239 * This updates the MDB on disk (look also at hfs_write_super()). 239 * This updates the MDB on disk.
240 * It does not check, if the superblock has been modified, or 240 * It does not check, if the superblock has been modified, or
241 * if the filesystem has been mounted read-only. It is mainly 241 * if the filesystem has been mounted read-only. It is mainly
242 * called by hfs_write_super() and hfs_btree_extend(). 242 * called by hfs_sync_fs() and flush_mdb().
243 * Input Variable(s): 243 * Input Variable(s):
244 * struct hfs_mdb *mdb: Pointer to the hfs MDB 244 * struct hfs_mdb *mdb: Pointer to the hfs MDB
245 * int backup; 245 * int backup;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 425c2f2cf170..09357508ec9a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
534 ret = 1; 534 ret = 1;
535 } else if (journal->j_committing_transaction) { 535 } else if (journal->j_committing_transaction) {
536 /* 536 /*
537 * If ext3_write_super() recently started a commit, then we 537 * If commit has been started, then we have to wait for
538 * have to wait for completion of that transaction 538 * completion of that transaction.
539 */ 539 */
540 if (ptid) 540 if (ptid)
541 *ptid = journal->j_committing_transaction->t_tid; 541 *ptid = journal->j_committing_transaction->t_tid;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c85594..8625da27eccf 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
612 ret = 1; 612 ret = 1;
613 } else if (journal->j_committing_transaction) { 613 } else if (journal->j_committing_transaction) {
614 /* 614 /*
615 * If ext3_write_super() recently started a commit, then we 615 * If commit has been started, then we have to wait for
616 * have to wait for completion of that transaction 616 * completion of that transaction.
617 */ 617 */
618 if (ptid) 618 if (ptid)
619 *ptid = journal->j_committing_transaction->t_tid; 619 *ptid = journal->j_committing_transaction->t_tid;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 6522cac6057c..6a10812711c1 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -676,17 +676,13 @@ static const struct super_operations nilfs_sops = {
676 .alloc_inode = nilfs_alloc_inode, 676 .alloc_inode = nilfs_alloc_inode,
677 .destroy_inode = nilfs_destroy_inode, 677 .destroy_inode = nilfs_destroy_inode,
678 .dirty_inode = nilfs_dirty_inode, 678 .dirty_inode = nilfs_dirty_inode,
679 /* .write_inode = nilfs_write_inode, */
680 /* .drop_inode = nilfs_drop_inode, */
681 .evict_inode = nilfs_evict_inode, 679 .evict_inode = nilfs_evict_inode,
682 .put_super = nilfs_put_super, 680 .put_super = nilfs_put_super,
683 /* .write_super = nilfs_write_super, */
684 .sync_fs = nilfs_sync_fs, 681 .sync_fs = nilfs_sync_fs,
685 .freeze_fs = nilfs_freeze, 682 .freeze_fs = nilfs_freeze,
686 .unfreeze_fs = nilfs_unfreeze, 683 .unfreeze_fs = nilfs_unfreeze,
687 .statfs = nilfs_statfs, 684 .statfs = nilfs_statfs,
688 .remount_fs = nilfs_remount, 685 .remount_fs = nilfs_remount,
689 /* .umount_begin */
690 .show_options = nilfs_show_options 686 .show_options = nilfs_show_options
691}; 687};
692 688
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 6eee4177807b..be1267a34cea 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -107,8 +107,6 @@ struct the_nilfs {
107 * used for 107 * used for
108 * - loading the latest checkpoint exclusively. 108 * - loading the latest checkpoint exclusively.
109 * - allocating a new full segment. 109 * - allocating a new full segment.
110 * - protecting s_dirt in the super_block struct
111 * (see nilfs_write_super) and the following fields.
112 */ 110 */
113 struct buffer_head *ns_sbh[2]; 111 struct buffer_head *ns_sbh[2];
114 struct nilfs_super_block *ns_sbp[2]; 112 struct nilfs_super_block *ns_sbp[2];
diff --git a/fs/open.c b/fs/open.c
index f3d96e7e7b19..bc132e167d2d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -717,7 +717,7 @@ cleanup_all:
717 * here, so just reset the state. 717 * here, so just reset the state.
718 */ 718 */
719 file_reset_write(f); 719 file_reset_write(f);
720 mnt_drop_write(f->f_path.mnt); 720 __mnt_drop_write(f->f_path.mnt);
721 } 721 }
722 } 722 }
723cleanup_file: 723cleanup_file:
diff --git a/fs/super.c b/fs/super.c
index b05cf47463d0..0902cfa6a12e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -537,46 +537,6 @@ void drop_super(struct super_block *sb)
537EXPORT_SYMBOL(drop_super); 537EXPORT_SYMBOL(drop_super);
538 538
539/** 539/**
540 * sync_supers - helper for periodic superblock writeback
541 *
542 * Call the write_super method if present on all dirty superblocks in
543 * the system. This is for the periodic writeback used by most older
544 * filesystems. For data integrity superblock writeback use
545 * sync_filesystems() instead.
546 *
547 * Note: check the dirty flag before waiting, so we don't
548 * hold up the sync while mounting a device. (The newly
549 * mounted device won't need syncing.)
550 */
551void sync_supers(void)
552{
553 struct super_block *sb, *p = NULL;
554
555 spin_lock(&sb_lock);
556 list_for_each_entry(sb, &super_blocks, s_list) {
557 if (hlist_unhashed(&sb->s_instances))
558 continue;
559 if (sb->s_op->write_super && sb->s_dirt) {
560 sb->s_count++;
561 spin_unlock(&sb_lock);
562
563 down_read(&sb->s_umount);
564 if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
565 sb->s_op->write_super(sb);
566 up_read(&sb->s_umount);
567
568 spin_lock(&sb_lock);
569 if (p)
570 __put_super(p);
571 p = sb;
572 }
573 }
574 if (p)
575 __put_super(p);
576 spin_unlock(&sb_lock);
577}
578
579/**
580 * iterate_supers - call function for all active superblocks 540 * iterate_supers - call function for all active superblocks
581 * @f: function to call 541 * @f: function to call
582 * @arg: argument to pass to it 542 * @arg: argument to pass to it
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 35389ca2d267..7bd6e72afd11 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -37,11 +37,11 @@
37 * 37 *
38 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we 38 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
39 * implement. However, this is not true for 'ubifs_writepage()', which may be 39 * implement. However, this is not true for 'ubifs_writepage()', which may be
40 * called with @i_mutex unlocked. For example, when pdflush is doing background 40 * called with @i_mutex unlocked. For example, when flusher thread is doing
41 * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal" 41 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
42 * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the 42 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
43 * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()' 43 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
44 * we are only guaranteed that the page is locked. 44 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
45 * 45 *
46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the 46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> 47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1c766c39c038..c3fa6c5327a3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
303 mutex_lock(&ui->ui_mutex); 303 mutex_lock(&ui->ui_mutex);
304 /* 304 /*
305 * Due to races between write-back forced by budgeting 305 * Due to races between write-back forced by budgeting
306 * (see 'sync_some_inodes()') and pdflush write-back, the inode may 306 * (see 'sync_some_inodes()') and background write-back, the inode may
307 * have already been synchronized, do not do this again. This might 307 * have already been synchronized, do not do this again. This might
308 * also happen if it was synchronized in an VFS operation, e.g. 308 * also happen if it was synchronized in an VFS operation, e.g.
309 * 'ubifs_link()'. 309 * 'ubifs_link()'.
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 2c744c7a5b3d..26a92fc28a59 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -491,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
491 491
492acpi_status acpi_enter_sleep_state_prep(u8 sleep_state); 492acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
493 493
494acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags); 494acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
495 495
496ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)) 496ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
497 497
498acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags); 498acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
499 499
500acpi_status acpi_leave_sleep_state(u8 sleep_state); 500acpi_status acpi_leave_sleep_state(u8 sleep_state);
501 501
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 3af87de6a68c..3d00bd5bd7e3 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -803,7 +803,7 @@ typedef u8 acpi_adr_space_type;
803 803
804/* Sleep function dispatch */ 804/* Sleep function dispatch */
805 805
806typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags); 806typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state);
807 807
808struct acpi_sleep_functions { 808struct acpi_sleep_functions {
809 ACPI_SLEEP_FUNCTION legacy_function; 809 ACPI_SLEEP_FUNCTION legacy_function;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3ad510b25283..4f2a76224509 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
96void acpi_numa_slit_init (struct acpi_table_slit *slit); 96void acpi_numa_slit_init (struct acpi_table_slit *slit);
97void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); 97void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
98void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); 98void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
99void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); 99int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
100void acpi_numa_arch_fixup(void); 100void acpi_numa_arch_fixup(void);
101 101
102#ifdef CONFIG_ACPI_HOTPLUG_CPU 102#ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c97c6b9cd38e..2a9a9abc9126 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -124,7 +124,6 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
124void bdi_start_background_writeback(struct backing_dev_info *bdi); 124void bdi_start_background_writeback(struct backing_dev_info *bdi);
125int bdi_writeback_thread(void *data); 125int bdi_writeback_thread(void *data);
126int bdi_has_dirty_io(struct backing_dev_info *bdi); 126int bdi_has_dirty_io(struct backing_dev_info *bdi);
127void bdi_arm_supers_timer(void);
128void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 127void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
129void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); 128void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
130 129
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 3c80885fa829..d323a4b4143c 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -89,6 +89,12 @@
89#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 89#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
90#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 90#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
91#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 91#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
92#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001
93#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002
94#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004
95#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008
96#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010
97#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020
92#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ 98#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
93#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ 99#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
94#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ 100#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 38dba16c4176..aa110476a95b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1491,7 +1491,6 @@ struct sb_writers {
1491struct super_block { 1491struct super_block {
1492 struct list_head s_list; /* Keep this first */ 1492 struct list_head s_list; /* Keep this first */
1493 dev_t s_dev; /* search index; _not_ kdev_t */ 1493 dev_t s_dev; /* search index; _not_ kdev_t */
1494 unsigned char s_dirt;
1495 unsigned char s_blocksize_bits; 1494 unsigned char s_blocksize_bits;
1496 unsigned long s_blocksize; 1495 unsigned long s_blocksize;
1497 loff_t s_maxbytes; /* Max file size */ 1496 loff_t s_maxbytes; /* Max file size */
@@ -1861,7 +1860,6 @@ struct super_operations {
1861 int (*drop_inode) (struct inode *); 1860 int (*drop_inode) (struct inode *);
1862 void (*evict_inode) (struct inode *); 1861 void (*evict_inode) (struct inode *);
1863 void (*put_super) (struct super_block *); 1862 void (*put_super) (struct super_block *);
1864 void (*write_super) (struct super_block *);
1865 int (*sync_fs)(struct super_block *sb, int wait); 1863 int (*sync_fs)(struct super_block *sb, int wait);
1866 int (*freeze_fs) (struct super_block *); 1864 int (*freeze_fs) (struct super_block *);
1867 int (*unfreeze_fs) (struct super_block *); 1865 int (*unfreeze_fs) (struct super_block *);
@@ -2397,7 +2395,6 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2397 int datasync); 2395 int datasync);
2398extern int vfs_fsync(struct file *file, int datasync); 2396extern int vfs_fsync(struct file *file, int datasync);
2399extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); 2397extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
2400extern void sync_supers(void);
2401extern void emergency_sync(void); 2398extern void emergency_sync(void);
2402extern void emergency_remount(void); 2399extern void emergency_remount(void);
2403#ifdef CONFIG_BLOCK 2400#ifdef CONFIG_BLOCK
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index af961d6f7ab1..642928cf57b4 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -306,9 +306,10 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type,
306 306
307static inline void 307static inline void
308perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 308perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
309 u64 count, struct pt_regs *regs, void *head) 309 u64 count, struct pt_regs *regs, void *head,
310 struct task_struct *task)
310{ 311{
311 perf_tp_event(addr, count, raw_data, size, regs, head, rctx); 312 perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
312} 313}
313#endif 314#endif
314 315
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index bb7f30971858..305f23cd7cff 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -22,7 +22,7 @@
22 * 22 *
23 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) 23 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
24 * - bit 26 is the NMI_MASK 24 * - bit 26 is the NMI_MASK
25 * - bit 28 is the PREEMPT_ACTIVE flag 25 * - bit 27 is the PREEMPT_ACTIVE flag
26 * 26 *
27 * PREEMPT_MASK: 0x000000ff 27 * PREEMPT_MASK: 0x000000ff
28 * SOFTIRQ_MASK: 0x0000ff00 28 * SOFTIRQ_MASK: 0x0000ff00
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 54d6d690073c..7e83370e6fd2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -20,6 +20,7 @@
20#define __LINUX_IOMMU_H 20#define __LINUX_IOMMU_H
21 21
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/types.h>
23 24
24#define IOMMU_READ (1) 25#define IOMMU_READ (1)
25#define IOMMU_WRITE (2) 26#define IOMMU_WRITE (2)
@@ -30,6 +31,7 @@ struct iommu_group;
30struct bus_type; 31struct bus_type;
31struct device; 32struct device;
32struct iommu_domain; 33struct iommu_domain;
34struct notifier_block;
33 35
34/* iommu fault flags */ 36/* iommu fault flags */
35#define IOMMU_FAULT_READ 0x0 37#define IOMMU_FAULT_READ 0x0
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 379e433e15e0..879db26ec401 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -369,6 +369,7 @@ struct ipv6_pinfo {
369 __u8 rcv_tclass; 369 __u8 rcv_tclass;
370 370
371 __u32 dst_cookie; 371 __u32 dst_cookie;
372 __u32 rx_dst_cookie;
372 373
373 struct ipv6_mc_socklist __rcu *ipv6_mc_list; 374 struct ipv6_mc_socklist __rcu *ipv6_mc_list;
374 struct ipv6_ac_socklist *ipv6_ac_list; 375 struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 553fb66da130..216b0ba109d7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -349,6 +349,7 @@ enum {
349 IRQCHIP_MASK_ON_SUSPEND = (1 << 2), 349 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
350 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), 350 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
351 IRQCHIP_SKIP_SET_WAKE = (1 << 4), 351 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
352 IRQCHIP_ONESHOT_SAFE = (1 << 5),
352}; 353};
353 354
354/* This include will go away once we isolated irq_desc usage to core code */ 355/* This include will go away once we isolated irq_desc usage to core code */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 265e2c3cbd1c..82680541576d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -39,9 +39,6 @@
39# error Invalid value of HZ. 39# error Invalid value of HZ.
40#endif 40#endif
41 41
42/* LATCH is used in the interval timer and ftape setup. */
43#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
44
45/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can 42/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
46 * improve accuracy by shifting LSH bits, hence calculating: 43 * improve accuracy by shifting LSH bits, hence calculating:
47 * (NOM << LSH) / DEN 44 * (NOM << LSH) / DEN
@@ -54,18 +51,30 @@
54#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ 51#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
55 + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) 52 + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
56 53
57/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ 54#ifdef CLOCK_TICK_RATE
58#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) 55/* LATCH is used in the interval timer and ftape setup. */
56# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
57
58/*
59 * HZ is the requested value. However the CLOCK_TICK_RATE may not allow
60 * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
61 */
62# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
63#else
64# define SHIFTED_HZ (HZ << 8)
65#endif
59 66
60/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ 67/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
61#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) 68#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
62 69
63/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 70/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
64#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 71#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
65 72
66/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */ 73/*
67/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */ 74 * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
68#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8)) 75 * a value TUSEC for TICK_USEC (can be set bij adjtimex)
76 */
77#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
69 78
70/* some arch's have a small-data section that can be accessed register-relative 79/* some arch's have a small-data section that can be accessed register-relative
71 * but that can only take up to, say, 4-byte variables. jiffies being part of 80 * but that can only take up to, say, 4-byte variables. jiffies being part of
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 064725854db8..42d9e863a313 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -75,8 +75,6 @@ extern const char *kdb_diemsg;
75#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ 75#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
76#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ 76#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
77#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ 77#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
78#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
79 * kdb is off */
80#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, 78#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
81 * kdb is disabled */ 79 * kdb is disabled */
82#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do 80#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eb06e58bed0b..a9db4f33407f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1300,6 +1300,8 @@ struct net_device {
1300 /* for setting kernel sock attribute on TCP connection setup */ 1300 /* for setting kernel sock attribute on TCP connection setup */
1301#define GSO_MAX_SIZE 65536 1301#define GSO_MAX_SIZE 65536
1302 unsigned int gso_max_size; 1302 unsigned int gso_max_size;
1303#define GSO_MAX_SEGS 65535
1304 u16 gso_max_segs;
1303 1305
1304#ifdef CONFIG_DCB 1306#ifdef CONFIG_DCB
1305 /* Data Center Bridging netlink ops */ 1307 /* Data Center Bridging netlink ops */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 76c5c8b724a7..7602ccb3f40e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1272,7 +1272,8 @@ static inline bool perf_paranoid_kernel(void)
1272extern void perf_event_init(void); 1272extern void perf_event_init(void);
1273extern void perf_tp_event(u64 addr, u64 count, void *record, 1273extern void perf_tp_event(u64 addr, u64 count, void *record,
1274 int entry_size, struct pt_regs *regs, 1274 int entry_size, struct pt_regs *regs,
1275 struct hlist_head *head, int rctx); 1275 struct hlist_head *head, int rctx,
1276 struct task_struct *task);
1276extern void perf_bp_event(struct perf_event *event, void *data); 1277extern void perf_bp_event(struct perf_event *event, void *data);
1277 1278
1278#ifndef perf_misc_flags 1279#ifndef perf_misc_flags
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 99bc88b1fc02..7c5ceb20e03a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,7 +232,7 @@ struct timex {
232 * estimated error = NTP dispersion. 232 * estimated error = NTP dispersion.
233 */ 233 */
234extern unsigned long tick_usec; /* USER_HZ period (usec) */ 234extern unsigned long tick_usec; /* USER_HZ period (usec) */
235extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ 235extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
236 236
237extern void ntp_init(void); 237extern void ntp_init(void);
238extern void ntp_clear(void); 238extern void ntp_clear(void);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e91cd43394df..fec12d667211 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
164 | 0*SD_SHARE_CPUPOWER \ 164 | 0*SD_SHARE_CPUPOWER \
165 | 0*SD_SHARE_PKG_RESOURCES \ 165 | 0*SD_SHARE_PKG_RESOURCES \
166 | 0*SD_SERIALIZE \ 166 | 0*SD_SERIALIZE \
167 | 1*SD_PREFER_SIBLING \
167 , \ 168 , \
168 .last_balance = jiffies, \ 169 .last_balance = jiffies, \
169 .balance_interval = 1, \ 170 .balance_interval = 1, \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index c66fe3332d83..50c3e8fa06a8 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -104,7 +104,6 @@ static inline void wait_on_inode(struct inode *inode)
104 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); 104 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
105} 105}
106 106
107
108/* 107/*
109 * mm/page-writeback.c 108 * mm/page-writeback.c
110 */ 109 */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 493fa0c79005..3d254e10ff30 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,7 @@ enum ieee80211_band {
96 * is not permitted. 96 * is not permitted.
97 * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel 97 * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
98 * is not permitted. 98 * is not permitted.
99 * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
99 */ 100 */
100enum ieee80211_channel_flags { 101enum ieee80211_channel_flags {
101 IEEE80211_CHAN_DISABLED = 1<<0, 102 IEEE80211_CHAN_DISABLED = 1<<0,
@@ -104,6 +105,7 @@ enum ieee80211_channel_flags {
104 IEEE80211_CHAN_RADAR = 1<<3, 105 IEEE80211_CHAN_RADAR = 1<<3,
105 IEEE80211_CHAN_NO_HT40PLUS = 1<<4, 106 IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
106 IEEE80211_CHAN_NO_HT40MINUS = 1<<5, 107 IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
108 IEEE80211_CHAN_NO_OFDM = 1<<6,
107}; 109};
108 110
109#define IEEE80211_CHAN_NO_HT40 \ 111#define IEEE80211_CHAN_NO_HT40 \
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5ee66f517b4f..ba1d3615acbb 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops {
39 int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); 39 int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl);
40 void (*send_check)(struct sock *sk, struct sk_buff *skb); 40 void (*send_check)(struct sock *sk, struct sk_buff *skb);
41 int (*rebuild_header)(struct sock *sk); 41 int (*rebuild_header)(struct sock *sk);
42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
42 int (*conn_request)(struct sock *sk, struct sk_buff *skb); 43 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 44 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
44 struct request_sock *req, 45 struct request_sock *req,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83b567fe1941..613cfa401672 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -249,13 +249,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
249 return flags; 249 return flags;
250} 250}
251 251
252static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
253{
254 struct dst_entry *dst = skb_dst(skb);
255
256 dst_hold(dst);
257 sk->sk_rx_dst = dst;
258 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
259}
260
261#endif /* _INET_SOCK_H */ 252#endif /* _INET_SOCK_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b3730239bf18..72132aef53fc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,6 +218,7 @@ struct cg_proto;
218 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 218 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
219 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 219 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
220 * @sk_gso_max_size: Maximum GSO segment size to build 220 * @sk_gso_max_size: Maximum GSO segment size to build
221 * @sk_gso_max_segs: Maximum number of GSO segments
221 * @sk_lingertime: %SO_LINGER l_linger setting 222 * @sk_lingertime: %SO_LINGER l_linger setting
222 * @sk_backlog: always used with the per-socket spinlock held 223 * @sk_backlog: always used with the per-socket spinlock held
223 * @sk_callback_lock: used with the callbacks in the end of this struct 224 * @sk_callback_lock: used with the callbacks in the end of this struct
@@ -338,6 +339,7 @@ struct sock {
338 netdev_features_t sk_route_nocaps; 339 netdev_features_t sk_route_nocaps;
339 int sk_gso_type; 340 int sk_gso_type;
340 unsigned int sk_gso_max_size; 341 unsigned int sk_gso_max_size;
342 u16 sk_gso_max_segs;
341 int sk_rcvlowat; 343 int sk_rcvlowat;
342 unsigned long sk_lingertime; 344 unsigned long sk_lingertime;
343 struct sk_buff_head sk_error_queue; 345 struct sk_buff_head sk_error_queue;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d9509eb29b80..62b619e82a90 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -213,6 +213,9 @@ struct xfrm_state {
213 struct xfrm_lifetime_cur curlft; 213 struct xfrm_lifetime_cur curlft;
214 struct tasklet_hrtimer mtimer; 214 struct tasklet_hrtimer mtimer;
215 215
216 /* used to fix curlft->add_time when changing date */
217 long saved_tmo;
218
216 /* Last used time */ 219 /* Last used time */
217 unsigned long lastused; 220 unsigned long lastused;
218 221
@@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x)
238 241
239/* xflags - make enum if more show up */ 242/* xflags - make enum if more show up */
240#define XFRM_TIME_DEFER 1 243#define XFRM_TIME_DEFER 1
244#define XFRM_SOFT_EXPIRE 2
241 245
242enum { 246enum {
243 XFRM_STATE_VOID, 247 XFRM_STATE_VOID,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ea7a2035456d..5a8671e8a67f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -73,6 +73,9 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
73 __entry->prio = p->prio; 73 __entry->prio = p->prio;
74 __entry->success = success; 74 __entry->success = success;
75 __entry->target_cpu = task_cpu(p); 75 __entry->target_cpu = task_cpu(p);
76 )
77 TP_perf_assign(
78 __perf_task(p);
76 ), 79 ),
77 80
78 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", 81 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -325,6 +328,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
325 ) 328 )
326 TP_perf_assign( 329 TP_perf_assign(
327 __perf_count(delay); 330 __perf_count(delay);
331 __perf_task(tsk);
328 ), 332 ),
329 333
330 TP_printk("comm=%s pid=%d delay=%Lu [ns]", 334 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6bc2faaf261..a763888a36f9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -712,6 +712,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
712#undef __perf_count 712#undef __perf_count
713#define __perf_count(c) __count = (c) 713#define __perf_count(c) __count = (c)
714 714
715#undef __perf_task
716#define __perf_task(t) __task = (t)
717
715#undef TP_perf_assign 718#undef TP_perf_assign
716#define TP_perf_assign(args...) args 719#define TP_perf_assign(args...) args
717 720
@@ -725,6 +728,7 @@ perf_trace_##call(void *__data, proto) \
725 struct ftrace_raw_##call *entry; \ 728 struct ftrace_raw_##call *entry; \
726 struct pt_regs __regs; \ 729 struct pt_regs __regs; \
727 u64 __addr = 0, __count = 1; \ 730 u64 __addr = 0, __count = 1; \
731 struct task_struct *__task = NULL; \
728 struct hlist_head *head; \ 732 struct hlist_head *head; \
729 int __entry_size; \ 733 int __entry_size; \
730 int __data_size; \ 734 int __data_size; \
@@ -752,7 +756,7 @@ perf_trace_##call(void *__data, proto) \
752 \ 756 \
753 head = this_cpu_ptr(event_call->perf_events); \ 757 head = this_cpu_ptr(event_call->perf_events); \
754 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 758 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
755 __count, &__regs, head); \ 759 __count, &__regs, head, __task); \
756} 760}
757 761
758/* 762/*
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8b68ce78ff17..be7b33b73d30 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -12,6 +12,7 @@
12#include <linux/kdb.h> 12#include <linux/kdb.h>
13#include <linux/kdebug.h> 13#include <linux/kdebug.h>
14#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/hardirq.h>
15#include "kdb_private.h" 16#include "kdb_private.h"
16#include "../debug_core.h" 17#include "../debug_core.h"
17 18
@@ -52,6 +53,9 @@ int kdb_stub(struct kgdb_state *ks)
52 if (atomic_read(&kgdb_setting_breakpoint)) 53 if (atomic_read(&kgdb_setting_breakpoint))
53 reason = KDB_REASON_KEYBOARD; 54 reason = KDB_REASON_KEYBOARD;
54 55
56 if (in_nmi())
57 reason = KDB_REASON_NMI;
58
55 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { 59 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
56 if ((bp->bp_enabled) && (bp->bp_addr == addr)) { 60 if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
57 reason = KDB_REASON_BREAK; 61 reason = KDB_REASON_BREAK;
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index bb9520f0f6ff..0a69d2adc4f3 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -715,9 +715,6 @@ kdb_printit:
715 /* check for having reached the LINES number of printed lines */ 715 /* check for having reached the LINES number of printed lines */
716 if (kdb_nextline == linecount) { 716 if (kdb_nextline == linecount) {
717 char buf1[16] = ""; 717 char buf1[16] = "";
718#if defined(CONFIG_SMP)
719 char buf2[32];
720#endif
721 718
722 /* Watch out for recursion here. Any routine that calls 719 /* Watch out for recursion here. Any routine that calls
723 * kdb_printf will come back through here. And kdb_read 720 * kdb_printf will come back through here. And kdb_read
@@ -732,14 +729,6 @@ kdb_printit:
732 if (moreprompt == NULL) 729 if (moreprompt == NULL)
733 moreprompt = "more> "; 730 moreprompt = "more> ";
734 731
735#if defined(CONFIG_SMP)
736 if (strchr(moreprompt, '%')) {
737 sprintf(buf2, moreprompt, get_cpu());
738 put_cpu();
739 moreprompt = buf2;
740 }
741#endif
742
743 kdb_input_flush(); 732 kdb_input_flush();
744 c = console_drivers; 733 c = console_drivers;
745 734
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 1f91413edb87..31df1706b9a9 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -139,11 +139,10 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
139static char *__env[] = { 139static char *__env[] = {
140#if defined(CONFIG_SMP) 140#if defined(CONFIG_SMP)
141 "PROMPT=[%d]kdb> ", 141 "PROMPT=[%d]kdb> ",
142 "MOREPROMPT=[%d]more> ",
143#else 142#else
144 "PROMPT=kdb> ", 143 "PROMPT=kdb> ",
145 "MOREPROMPT=more> ",
146#endif 144#endif
145 "MOREPROMPT=more> ",
147 "RADIX=16", 146 "RADIX=16",
148 "MDCOUNT=8", /* lines of md output */ 147 "MDCOUNT=8", /* lines of md output */
149 KDB_PLATFORM_ENV, 148 KDB_PLATFORM_ENV,
@@ -1236,18 +1235,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1236 *cmdbuf = '\0'; 1235 *cmdbuf = '\0';
1237 *(cmd_hist[cmd_head]) = '\0'; 1236 *(cmd_hist[cmd_head]) = '\0';
1238 1237
1239 if (KDB_FLAG(ONLY_DO_DUMP)) {
1240 /* kdb is off but a catastrophic error requires a dump.
1241 * Take the dump and reboot.
1242 * Turn on logging so the kdb output appears in the log
1243 * buffer in the dump.
1244 */
1245 const char *setargs[] = { "set", "LOGGING", "1" };
1246 kdb_set(2, setargs);
1247 kdb_reboot(0, NULL);
1248 /*NOTREACHED*/
1249 }
1250
1251do_full_getstr: 1238do_full_getstr:
1252#if defined(CONFIG_SMP) 1239#if defined(CONFIG_SMP)
1253 snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), 1240 snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 6581a040f399..98d4597f43d6 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -153,7 +153,8 @@ put_callchain_entry(int rctx)
153 put_recursion_context(__get_cpu_var(callchain_recursion), rctx); 153 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
154} 154}
155 155
156struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) 156struct perf_callchain_entry *
157perf_callchain(struct perf_event *event, struct pt_regs *regs)
157{ 158{
158 int rctx; 159 int rctx;
159 struct perf_callchain_entry *entry; 160 struct perf_callchain_entry *entry;
@@ -178,6 +179,12 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
178 } 179 }
179 180
180 if (regs) { 181 if (regs) {
182 /*
183 * Disallow cross-task user callchains.
184 */
185 if (event->ctx->task && event->ctx->task != current)
186 goto exit_put;
187
181 perf_callchain_store(entry, PERF_CONTEXT_USER); 188 perf_callchain_store(entry, PERF_CONTEXT_USER);
182 perf_callchain_user(entry, regs); 189 perf_callchain_user(entry, regs);
183 } 190 }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f1cf0edeb39a..b7935fcec7d9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header,
4039 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 4039 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4040 int size = 1; 4040 int size = 1;
4041 4041
4042 data->callchain = perf_callchain(regs); 4042 data->callchain = perf_callchain(event, regs);
4043 4043
4044 if (data->callchain) 4044 if (data->callchain)
4045 size += data->callchain->nr; 4045 size += data->callchain->nr;
@@ -5209,7 +5209,8 @@ static int perf_tp_event_match(struct perf_event *event,
5209} 5209}
5210 5210
5211void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, 5211void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5212 struct pt_regs *regs, struct hlist_head *head, int rctx) 5212 struct pt_regs *regs, struct hlist_head *head, int rctx,
5213 struct task_struct *task)
5213{ 5214{
5214 struct perf_sample_data data; 5215 struct perf_sample_data data;
5215 struct perf_event *event; 5216 struct perf_event *event;
@@ -5228,6 +5229,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5228 perf_swevent_event(event, count, &data, regs); 5229 perf_swevent_event(event, count, &data, regs);
5229 } 5230 }
5230 5231
5232 /*
5233 * If we got specified a target task, also iterate its context and
5234 * deliver this event there too.
5235 */
5236 if (task && task != current) {
5237 struct perf_event_context *ctx;
5238 struct trace_entry *entry = record;
5239
5240 rcu_read_lock();
5241 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5242 if (!ctx)
5243 goto unlock;
5244
5245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5246 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5247 continue;
5248 if (event->attr.config != entry->type)
5249 continue;
5250 if (perf_tp_event_match(event, &data, regs))
5251 perf_swevent_event(event, count, &data, regs);
5252 }
5253unlock:
5254 rcu_read_unlock();
5255 }
5256
5231 perf_swevent_put_recursion_context(rctx); 5257 perf_swevent_put_recursion_context(rctx);
5232} 5258}
5233EXPORT_SYMBOL_GPL(perf_tp_event); 5259EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f90afc..a096c19f2c2a 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -101,7 +101,8 @@ __output_copy(struct perf_output_handle *handle,
101} 101}
102 102
103/* Callchain handling */ 103/* Callchain handling */
104extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 104extern struct perf_callchain_entry *
105perf_callchain(struct perf_event *event, struct pt_regs *regs);
105extern int get_callchain_buffers(void); 106extern int get_callchain_buffers(void);
106extern void put_callchain_buffers(void); 107extern void put_callchain_buffers(void);
107 108
diff --git a/kernel/futex.c b/kernel/futex.c
index e2b0fb9a0b3b..3717e7b306e0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2231 * @uaddr2: the pi futex we will take prior to returning to user-space 2231 * @uaddr2: the pi futex we will take prior to returning to user-space
2232 * 2232 *
2233 * The caller will wait on uaddr and will be requeued by futex_requeue() to 2233 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2234 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and 2234 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2235 * complete the acquisition of the rt_mutex prior to returning to userspace. 2235 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2236 * This ensures the rt_mutex maintains an owner when it has waiters; without 2236 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2237 * one, the pi logic wouldn't know which task to boost/deboost, if there was a 2237 * without one, the pi logic would not know which task to boost/deboost, if
2238 * need to. 2238 * there was a need to.
2239 * 2239 *
2240 * We call schedule in futex_wait_queue_me() when we enqueue and return there 2240 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2241 * via the following: 2241 * via the following:
@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2272 struct futex_q q = futex_q_init; 2272 struct futex_q q = futex_q_init;
2273 int res, ret; 2273 int res, ret;
2274 2274
2275 if (uaddr == uaddr2)
2276 return -EINVAL;
2277
2275 if (!bitset) 2278 if (!bitset)
2276 return -EINVAL; 2279 return -EINVAL;
2277 2280
@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2343 * signal. futex_unlock_pi() will not destroy the lock_ptr nor 2346 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2344 * the pi_state. 2347 * the pi_state.
2345 */ 2348 */
2346 WARN_ON(!&q.pi_state); 2349 WARN_ON(!q.pi_state);
2347 pi_mutex = &q.pi_state->pi_mutex; 2350 pi_mutex = &q.pi_state->pi_mutex;
2348 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); 2351 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2349 debug_rt_mutex_free_waiter(&rt_waiter); 2352 debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2370 * fault, unlock the rt_mutex and return the fault to userspace. 2373 * fault, unlock the rt_mutex and return the fault to userspace.
2371 */ 2374 */
2372 if (ret == -EFAULT) { 2375 if (ret == -EFAULT) {
2373 if (rt_mutex_owner(pi_mutex) == current) 2376 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2374 rt_mutex_unlock(pi_mutex); 2377 rt_mutex_unlock(pi_mutex);
2375 } else if (ret == -EINTR) { 2378 } else if (ret == -EINTR) {
2376 /* 2379 /*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a8e8f059627..4c69326aa773 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -944,6 +944,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
944 } 944 }
945 945
946 /* 946 /*
947 * Drivers are often written to work w/o knowledge about the
948 * underlying irq chip implementation, so a request for a
949 * threaded irq without a primary hard irq context handler
950 * requires the ONESHOT flag to be set. Some irq chips like
951 * MSI based interrupts are per se one shot safe. Check the
952 * chip flags, so we can avoid the unmask dance at the end of
953 * the threaded handler for those.
954 */
955 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
956 new->flags &= ~IRQF_ONESHOT;
957
958 /*
947 * The following block of code has to be executed atomically 959 * The following block of code has to be executed atomically
948 */ 960 */
949 raw_spin_lock_irqsave(&desc->lock, flags); 961 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1017,7 +1029,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1017 */ 1029 */
1018 new->thread_mask = 1 << ffz(thread_mask); 1030 new->thread_mask = 1 << ffz(thread_mask);
1019 1031
1020 } else if (new->handler == irq_default_primary_handler) { 1032 } else if (new->handler == irq_default_primary_handler &&
1033 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1021 /* 1034 /*
1022 * The interrupt was requested with handler = NULL, so 1035 * The interrupt was requested with handler = NULL, so
1023 * we use the default primary handler for it. But it 1036 * we use the default primary handler for it. But it
diff --git a/kernel/printk.c b/kernel/printk.c
index 6a76ab9d4476..66a2ea37b576 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1034 struct log *msg = log_from_idx(idx); 1034 struct log *msg = log_from_idx(idx);
1035 1035
1036 len += msg_print_text(msg, prev, true, NULL, 0); 1036 len += msg_print_text(msg, prev, true, NULL, 0);
1037 prev = msg->flags;
1037 idx = log_next(idx); 1038 idx = log_next(idx);
1038 seq++; 1039 seq++;
1039 } 1040 }
@@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1046 struct log *msg = log_from_idx(idx); 1047 struct log *msg = log_from_idx(idx);
1047 1048
1048 len -= msg_print_text(msg, prev, true, NULL, 0); 1049 len -= msg_print_text(msg, prev, true, NULL, 0);
1050 prev = msg->flags;
1049 idx = log_next(idx); 1051 idx = log_next(idx);
1050 seq++; 1052 seq++;
1051 } 1053 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d325c4b2dcbb..82ad284f823b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4340,9 +4340,7 @@ recheck:
4340 */ 4340 */
4341 if (unlikely(policy == p->policy && (!rt_policy(policy) || 4341 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4342 param->sched_priority == p->rt_priority))) { 4342 param->sched_priority == p->rt_priority))) {
4343 4343 task_rq_unlock(rq, p, &flags);
4344 __task_rq_unlock(rq);
4345 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4346 return 0; 4344 return 0;
4347 } 4345 }
4348 4346
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d72586fdf660..23aa789c53ee 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
65int cpupri_find(struct cpupri *cp, struct task_struct *p, 65int cpupri_find(struct cpupri *cp, struct task_struct *p,
66 struct cpumask *lowest_mask) 66 struct cpumask *lowest_mask)
67{ 67{
68 int idx = 0; 68 int idx = 0;
69 int task_pri = convert_prio(p->prio); 69 int task_pri = convert_prio(p->prio);
70 70
71 if (task_pri >= MAX_RT_PRIO) 71 if (task_pri >= MAX_RT_PRIO)
72 return 0; 72 return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
137 */ 137 */
138void cpupri_set(struct cpupri *cp, int cpu, int newpri) 138void cpupri_set(struct cpupri *cp, int cpu, int newpri)
139{ 139{
140 int *currpri = &cp->cpu_to_pri[cpu]; 140 int *currpri = &cp->cpu_to_pri[cpu];
141 int oldpri = *currpri; 141 int oldpri = *currpri;
142 int do_mb = 0; 142 int do_mb = 0;
143 143
144 newpri = convert_prio(newpri); 144 newpri = convert_prio(newpri);
145 145
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db64952..d0cc03b3e70b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@ struct lb_env {
3069 int new_dst_cpu; 3069 int new_dst_cpu;
3070 enum cpu_idle_type idle; 3070 enum cpu_idle_type idle;
3071 long imbalance; 3071 long imbalance;
3072 /* The set of CPUs under consideration for load-balancing */
3073 struct cpumask *cpus;
3074
3072 unsigned int flags; 3075 unsigned int flags;
3073 3076
3074 unsigned int loop; 3077 unsigned int loop;
@@ -3653,8 +3656,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3653 */ 3656 */
3654static inline void update_sg_lb_stats(struct lb_env *env, 3657static inline void update_sg_lb_stats(struct lb_env *env,
3655 struct sched_group *group, int load_idx, 3658 struct sched_group *group, int load_idx,
3656 int local_group, const struct cpumask *cpus, 3659 int local_group, int *balance, struct sg_lb_stats *sgs)
3657 int *balance, struct sg_lb_stats *sgs)
3658{ 3660{
3659 unsigned long nr_running, max_nr_running, min_nr_running; 3661 unsigned long nr_running, max_nr_running, min_nr_running;
3660 unsigned long load, max_cpu_load, min_cpu_load; 3662 unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3673,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3671 max_nr_running = 0; 3673 max_nr_running = 0;
3672 min_nr_running = ~0UL; 3674 min_nr_running = ~0UL;
3673 3675
3674 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3676 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
3675 struct rq *rq = cpu_rq(i); 3677 struct rq *rq = cpu_rq(i);
3676 3678
3677 nr_running = rq->nr_running; 3679 nr_running = rq->nr_running;
@@ -3800,8 +3802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3800 * @sds: variable to hold the statistics for this sched_domain. 3802 * @sds: variable to hold the statistics for this sched_domain.
3801 */ 3803 */
3802static inline void update_sd_lb_stats(struct lb_env *env, 3804static inline void update_sd_lb_stats(struct lb_env *env,
3803 const struct cpumask *cpus, 3805 int *balance, struct sd_lb_stats *sds)
3804 int *balance, struct sd_lb_stats *sds)
3805{ 3806{
3806 struct sched_domain *child = env->sd->child; 3807 struct sched_domain *child = env->sd->child;
3807 struct sched_group *sg = env->sd->groups; 3808 struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3819,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
3818 3819
3819 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); 3820 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
3820 memset(&sgs, 0, sizeof(sgs)); 3821 memset(&sgs, 0, sizeof(sgs));
3821 update_sg_lb_stats(env, sg, load_idx, local_group, 3822 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
3822 cpus, balance, &sgs);
3823 3823
3824 if (local_group && !(*balance)) 3824 if (local_group && !(*balance))
3825 return; 3825 return;
@@ -4055,7 +4055,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4055 * to restore balance. 4055 * to restore balance.
4056 * 4056 *
4057 * @env: The load balancing environment. 4057 * @env: The load balancing environment.
4058 * @cpus: The set of CPUs under consideration for load-balancing.
4059 * @balance: Pointer to a variable indicating if this_cpu 4058 * @balance: Pointer to a variable indicating if this_cpu
4060 * is the appropriate cpu to perform load balancing at this_level. 4059 * is the appropriate cpu to perform load balancing at this_level.
4061 * 4060 *
@@ -4065,7 +4064,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4065 * put to idle by rebalancing its tasks onto our group. 4064 * put to idle by rebalancing its tasks onto our group.
4066 */ 4065 */
4067static struct sched_group * 4066static struct sched_group *
4068find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) 4067find_busiest_group(struct lb_env *env, int *balance)
4069{ 4068{
4070 struct sd_lb_stats sds; 4069 struct sd_lb_stats sds;
4071 4070
@@ -4075,7 +4074,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
4075 * Compute the various statistics relavent for load balancing at 4074 * Compute the various statistics relavent for load balancing at
4076 * this level. 4075 * this level.
4077 */ 4076 */
4078 update_sd_lb_stats(env, cpus, balance, &sds); 4077 update_sd_lb_stats(env, balance, &sds);
4079 4078
4080 /* 4079 /*
4081 * this_cpu is not the appropriate cpu to perform load balancing at 4080 * this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4154,7 @@ ret:
4155 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4154 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4156 */ 4155 */
4157static struct rq *find_busiest_queue(struct lb_env *env, 4156static struct rq *find_busiest_queue(struct lb_env *env,
4158 struct sched_group *group, 4157 struct sched_group *group)
4159 const struct cpumask *cpus)
4160{ 4158{
4161 struct rq *busiest = NULL, *rq; 4159 struct rq *busiest = NULL, *rq;
4162 unsigned long max_load = 0; 4160 unsigned long max_load = 0;
@@ -4171,7 +4169,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
4171 if (!capacity) 4169 if (!capacity)
4172 capacity = fix_small_capacity(env->sd, group); 4170 capacity = fix_small_capacity(env->sd, group);
4173 4171
4174 if (!cpumask_test_cpu(i, cpus)) 4172 if (!cpumask_test_cpu(i, env->cpus))
4175 continue; 4173 continue;
4176 4174
4177 rq = cpu_rq(i); 4175 rq = cpu_rq(i);
@@ -4252,6 +4250,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4252 .dst_grpmask = sched_group_cpus(sd->groups), 4250 .dst_grpmask = sched_group_cpus(sd->groups),
4253 .idle = idle, 4251 .idle = idle,
4254 .loop_break = sched_nr_migrate_break, 4252 .loop_break = sched_nr_migrate_break,
4253 .cpus = cpus,
4255 }; 4254 };
4256 4255
4257 cpumask_copy(cpus, cpu_active_mask); 4256 cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4259,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4260 schedstat_inc(sd, lb_count[idle]); 4259 schedstat_inc(sd, lb_count[idle]);
4261 4260
4262redo: 4261redo:
4263 group = find_busiest_group(&env, cpus, balance); 4262 group = find_busiest_group(&env, balance);
4264 4263
4265 if (*balance == 0) 4264 if (*balance == 0)
4266 goto out_balanced; 4265 goto out_balanced;
@@ -4270,7 +4269,7 @@ redo:
4270 goto out_balanced; 4269 goto out_balanced;
4271 } 4270 }
4272 4271
4273 busiest = find_busiest_queue(&env, group, cpus); 4272 busiest = find_busiest_queue(&env, group);
4274 if (!busiest) { 4273 if (!busiest) {
4275 schedstat_inc(sd, lb_nobusyq[idle]); 4274 schedstat_inc(sd, lb_nobusyq[idle]);
4276 goto out_balanced; 4275 goto out_balanced;
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154e0408..46da0537c10b 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -37,7 +37,7 @@
37 * requested HZ value. It is also not recommended 37 * requested HZ value. It is also not recommended
38 * for "tick-less" systems. 38 * for "tick-less" systems.
39 */ 39 */
40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) 40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ))
41 41
42/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier 42/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
43 * conversion, the .shift value could be zero. However 43 * conversion, the .shift value could be zero. However
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b7fbadc5c973..24174b4d669b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock);
28/* USER_HZ period (usecs): */ 28/* USER_HZ period (usecs): */
29unsigned long tick_usec = TICK_USEC; 29unsigned long tick_usec = TICK_USEC;
30 30
31/* ACTHZ period (nsecs): */ 31/* SHIFTED_HZ period (nsecs): */
32unsigned long tick_nsec; 32unsigned long tick_nsec;
33 33
34static u64 tick_length; 34static u64 tick_length;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f045cc50832d..e16af197a2bc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -65,14 +65,14 @@ struct timekeeper {
65 * used instead. 65 * used instead.
66 */ 66 */
67 struct timespec wall_to_monotonic; 67 struct timespec wall_to_monotonic;
68 /* time spent in suspend */
69 struct timespec total_sleep_time;
70 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
71 struct timespec raw_time;
72 /* Offset clock monotonic -> clock realtime */ 68 /* Offset clock monotonic -> clock realtime */
73 ktime_t offs_real; 69 ktime_t offs_real;
70 /* time spent in suspend */
71 struct timespec total_sleep_time;
74 /* Offset clock monotonic -> clock boottime */ 72 /* Offset clock monotonic -> clock boottime */
75 ktime_t offs_boot; 73 ktime_t offs_boot;
74 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
75 struct timespec raw_time;
76 /* Seqlock for all timekeeper values */ 76 /* Seqlock for all timekeeper values */
77 seqlock_t lock; 77 seqlock_t lock;
78}; 78};
@@ -108,13 +108,38 @@ static struct timespec tk_xtime(struct timekeeper *tk)
108static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) 108static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
109{ 109{
110 tk->xtime_sec = ts->tv_sec; 110 tk->xtime_sec = ts->tv_sec;
111 tk->xtime_nsec = ts->tv_nsec << tk->shift; 111 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
112} 112}
113 113
114static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) 114static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
115{ 115{
116 tk->xtime_sec += ts->tv_sec; 116 tk->xtime_sec += ts->tv_sec;
117 tk->xtime_nsec += ts->tv_nsec << tk->shift; 117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
118}
119
120static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
121{
122 struct timespec tmp;
123
124 /*
125 * Verify consistency of: offset_real = -wall_to_monotonic
126 * before modifying anything
127 */
128 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
129 -tk->wall_to_monotonic.tv_nsec);
130 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
131 tk->wall_to_monotonic = wtm;
132 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
133 tk->offs_real = timespec_to_ktime(tmp);
134}
135
136static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
137{
138 /* Verify consistency before modifying */
139 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
140
141 tk->total_sleep_time = t;
142 tk->offs_boot = timespec_to_ktime(t);
118} 143}
119 144
120/** 145/**
@@ -217,14 +242,6 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
217 return nsec + arch_gettimeoffset(); 242 return nsec + arch_gettimeoffset();
218} 243}
219 244
220static void update_rt_offset(struct timekeeper *tk)
221{
222 struct timespec tmp, *wtm = &tk->wall_to_monotonic;
223
224 set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
225 tk->offs_real = timespec_to_ktime(tmp);
226}
227
228/* must hold write on timekeeper.lock */ 245/* must hold write on timekeeper.lock */
229static void timekeeping_update(struct timekeeper *tk, bool clearntp) 246static void timekeeping_update(struct timekeeper *tk, bool clearntp)
230{ 247{
@@ -234,12 +251,10 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
234 tk->ntp_error = 0; 251 tk->ntp_error = 0;
235 ntp_clear(); 252 ntp_clear();
236 } 253 }
237 update_rt_offset(tk);
238 xt = tk_xtime(tk); 254 xt = tk_xtime(tk);
239 update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); 255 update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
240} 256}
241 257
242
243/** 258/**
244 * timekeeping_forward_now - update clock to the current time 259 * timekeeping_forward_now - update clock to the current time
245 * 260 *
@@ -277,18 +292,19 @@ static void timekeeping_forward_now(struct timekeeper *tk)
277 */ 292 */
278void getnstimeofday(struct timespec *ts) 293void getnstimeofday(struct timespec *ts)
279{ 294{
295 struct timekeeper *tk = &timekeeper;
280 unsigned long seq; 296 unsigned long seq;
281 s64 nsecs = 0; 297 s64 nsecs = 0;
282 298
283 WARN_ON(timekeeping_suspended); 299 WARN_ON(timekeeping_suspended);
284 300
285 do { 301 do {
286 seq = read_seqbegin(&timekeeper.lock); 302 seq = read_seqbegin(&tk->lock);
287 303
288 ts->tv_sec = timekeeper.xtime_sec; 304 ts->tv_sec = tk->xtime_sec;
289 ts->tv_nsec = timekeeping_get_ns(&timekeeper); 305 ts->tv_nsec = timekeeping_get_ns(tk);
290 306
291 } while (read_seqretry(&timekeeper.lock, seq)); 307 } while (read_seqretry(&tk->lock, seq));
292 308
293 timespec_add_ns(ts, nsecs); 309 timespec_add_ns(ts, nsecs);
294} 310}
@@ -296,19 +312,18 @@ EXPORT_SYMBOL(getnstimeofday);
296 312
297ktime_t ktime_get(void) 313ktime_t ktime_get(void)
298{ 314{
315 struct timekeeper *tk = &timekeeper;
299 unsigned int seq; 316 unsigned int seq;
300 s64 secs, nsecs; 317 s64 secs, nsecs;
301 318
302 WARN_ON(timekeeping_suspended); 319 WARN_ON(timekeeping_suspended);
303 320
304 do { 321 do {
305 seq = read_seqbegin(&timekeeper.lock); 322 seq = read_seqbegin(&tk->lock);
306 secs = timekeeper.xtime_sec + 323 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
307 timekeeper.wall_to_monotonic.tv_sec; 324 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
308 nsecs = timekeeping_get_ns(&timekeeper) +
309 timekeeper.wall_to_monotonic.tv_nsec;
310 325
311 } while (read_seqretry(&timekeeper.lock, seq)); 326 } while (read_seqretry(&tk->lock, seq));
312 /* 327 /*
313 * Use ktime_set/ktime_add_ns to create a proper ktime on 328 * Use ktime_set/ktime_add_ns to create a proper ktime on
314 * 32-bit architectures without CONFIG_KTIME_SCALAR. 329 * 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -327,18 +342,19 @@ EXPORT_SYMBOL_GPL(ktime_get);
327 */ 342 */
328void ktime_get_ts(struct timespec *ts) 343void ktime_get_ts(struct timespec *ts)
329{ 344{
345 struct timekeeper *tk = &timekeeper;
330 struct timespec tomono; 346 struct timespec tomono;
331 unsigned int seq; 347 unsigned int seq;
332 348
333 WARN_ON(timekeeping_suspended); 349 WARN_ON(timekeeping_suspended);
334 350
335 do { 351 do {
336 seq = read_seqbegin(&timekeeper.lock); 352 seq = read_seqbegin(&tk->lock);
337 ts->tv_sec = timekeeper.xtime_sec; 353 ts->tv_sec = tk->xtime_sec;
338 ts->tv_nsec = timekeeping_get_ns(&timekeeper); 354 ts->tv_nsec = timekeeping_get_ns(tk);
339 tomono = timekeeper.wall_to_monotonic; 355 tomono = tk->wall_to_monotonic;
340 356
341 } while (read_seqretry(&timekeeper.lock, seq)); 357 } while (read_seqretry(&tk->lock, seq));
342 358
343 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, 359 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
344 ts->tv_nsec + tomono.tv_nsec); 360 ts->tv_nsec + tomono.tv_nsec);
@@ -358,22 +374,23 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
358 */ 374 */
359void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 375void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
360{ 376{
377 struct timekeeper *tk = &timekeeper;
361 unsigned long seq; 378 unsigned long seq;
362 s64 nsecs_raw, nsecs_real; 379 s64 nsecs_raw, nsecs_real;
363 380
364 WARN_ON_ONCE(timekeeping_suspended); 381 WARN_ON_ONCE(timekeeping_suspended);
365 382
366 do { 383 do {
367 seq = read_seqbegin(&timekeeper.lock); 384 seq = read_seqbegin(&tk->lock);
368 385
369 *ts_raw = timekeeper.raw_time; 386 *ts_raw = tk->raw_time;
370 ts_real->tv_sec = timekeeper.xtime_sec; 387 ts_real->tv_sec = tk->xtime_sec;
371 ts_real->tv_nsec = 0; 388 ts_real->tv_nsec = 0;
372 389
373 nsecs_raw = timekeeping_get_ns_raw(&timekeeper); 390 nsecs_raw = timekeeping_get_ns_raw(tk);
374 nsecs_real = timekeeping_get_ns(&timekeeper); 391 nsecs_real = timekeeping_get_ns(tk);
375 392
376 } while (read_seqretry(&timekeeper.lock, seq)); 393 } while (read_seqretry(&tk->lock, seq));
377 394
378 timespec_add_ns(ts_raw, nsecs_raw); 395 timespec_add_ns(ts_raw, nsecs_raw);
379 timespec_add_ns(ts_real, nsecs_real); 396 timespec_add_ns(ts_real, nsecs_real);
@@ -406,28 +423,28 @@ EXPORT_SYMBOL(do_gettimeofday);
406 */ 423 */
407int do_settimeofday(const struct timespec *tv) 424int do_settimeofday(const struct timespec *tv)
408{ 425{
426 struct timekeeper *tk = &timekeeper;
409 struct timespec ts_delta, xt; 427 struct timespec ts_delta, xt;
410 unsigned long flags; 428 unsigned long flags;
411 429
412 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 430 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
413 return -EINVAL; 431 return -EINVAL;
414 432
415 write_seqlock_irqsave(&timekeeper.lock, flags); 433 write_seqlock_irqsave(&tk->lock, flags);
416 434
417 timekeeping_forward_now(&timekeeper); 435 timekeeping_forward_now(tk);
418 436
419 xt = tk_xtime(&timekeeper); 437 xt = tk_xtime(tk);
420 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; 438 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
421 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; 439 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
422 440
423 timekeeper.wall_to_monotonic = 441 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
424 timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
425 442
426 tk_set_xtime(&timekeeper, tv); 443 tk_set_xtime(tk, tv);
427 444
428 timekeeping_update(&timekeeper, true); 445 timekeeping_update(tk, true);
429 446
430 write_sequnlock_irqrestore(&timekeeper.lock, flags); 447 write_sequnlock_irqrestore(&tk->lock, flags);
431 448
432 /* signal hrtimers about time change */ 449 /* signal hrtimers about time change */
433 clock_was_set(); 450 clock_was_set();
@@ -436,7 +453,6 @@ int do_settimeofday(const struct timespec *tv)
436} 453}
437EXPORT_SYMBOL(do_settimeofday); 454EXPORT_SYMBOL(do_settimeofday);
438 455
439
440/** 456/**
441 * timekeeping_inject_offset - Adds or subtracts from the current time. 457 * timekeeping_inject_offset - Adds or subtracts from the current time.
442 * @tv: pointer to the timespec variable containing the offset 458 * @tv: pointer to the timespec variable containing the offset
@@ -445,23 +461,23 @@ EXPORT_SYMBOL(do_settimeofday);
445 */ 461 */
446int timekeeping_inject_offset(struct timespec *ts) 462int timekeeping_inject_offset(struct timespec *ts)
447{ 463{
464 struct timekeeper *tk = &timekeeper;
448 unsigned long flags; 465 unsigned long flags;
449 466
450 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 467 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
451 return -EINVAL; 468 return -EINVAL;
452 469
453 write_seqlock_irqsave(&timekeeper.lock, flags); 470 write_seqlock_irqsave(&tk->lock, flags);
454 471
455 timekeeping_forward_now(&timekeeper); 472 timekeeping_forward_now(tk);
456 473
457 474
458 tk_xtime_add(&timekeeper, ts); 475 tk_xtime_add(tk, ts);
459 timekeeper.wall_to_monotonic = 476 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
460 timespec_sub(timekeeper.wall_to_monotonic, *ts);
461 477
462 timekeeping_update(&timekeeper, true); 478 timekeeping_update(tk, true);
463 479
464 write_sequnlock_irqrestore(&timekeeper.lock, flags); 480 write_sequnlock_irqrestore(&tk->lock, flags);
465 481
466 /* signal hrtimers about time change */ 482 /* signal hrtimers about time change */
467 clock_was_set(); 483 clock_was_set();
@@ -477,23 +493,24 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
477 */ 493 */
478static int change_clocksource(void *data) 494static int change_clocksource(void *data)
479{ 495{
496 struct timekeeper *tk = &timekeeper;
480 struct clocksource *new, *old; 497 struct clocksource *new, *old;
481 unsigned long flags; 498 unsigned long flags;
482 499
483 new = (struct clocksource *) data; 500 new = (struct clocksource *) data;
484 501
485 write_seqlock_irqsave(&timekeeper.lock, flags); 502 write_seqlock_irqsave(&tk->lock, flags);
486 503
487 timekeeping_forward_now(&timekeeper); 504 timekeeping_forward_now(tk);
488 if (!new->enable || new->enable(new) == 0) { 505 if (!new->enable || new->enable(new) == 0) {
489 old = timekeeper.clock; 506 old = tk->clock;
490 tk_setup_internals(&timekeeper, new); 507 tk_setup_internals(tk, new);
491 if (old->disable) 508 if (old->disable)
492 old->disable(old); 509 old->disable(old);
493 } 510 }
494 timekeeping_update(&timekeeper, true); 511 timekeeping_update(tk, true);
495 512
496 write_sequnlock_irqrestore(&timekeeper.lock, flags); 513 write_sequnlock_irqrestore(&tk->lock, flags);
497 514
498 return 0; 515 return 0;
499} 516}
@@ -507,7 +524,9 @@ static int change_clocksource(void *data)
507 */ 524 */
508void timekeeping_notify(struct clocksource *clock) 525void timekeeping_notify(struct clocksource *clock)
509{ 526{
510 if (timekeeper.clock == clock) 527 struct timekeeper *tk = &timekeeper;
528
529 if (tk->clock == clock)
511 return; 530 return;
512 stop_machine(change_clocksource, clock, NULL); 531 stop_machine(change_clocksource, clock, NULL);
513 tick_clock_notify(); 532 tick_clock_notify();
@@ -536,35 +555,36 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
536 */ 555 */
537void getrawmonotonic(struct timespec *ts) 556void getrawmonotonic(struct timespec *ts)
538{ 557{
558 struct timekeeper *tk = &timekeeper;
539 unsigned long seq; 559 unsigned long seq;
540 s64 nsecs; 560 s64 nsecs;
541 561
542 do { 562 do {
543 seq = read_seqbegin(&timekeeper.lock); 563 seq = read_seqbegin(&tk->lock);
544 nsecs = timekeeping_get_ns_raw(&timekeeper); 564 nsecs = timekeeping_get_ns_raw(tk);
545 *ts = timekeeper.raw_time; 565 *ts = tk->raw_time;
546 566
547 } while (read_seqretry(&timekeeper.lock, seq)); 567 } while (read_seqretry(&tk->lock, seq));
548 568
549 timespec_add_ns(ts, nsecs); 569 timespec_add_ns(ts, nsecs);
550} 570}
551EXPORT_SYMBOL(getrawmonotonic); 571EXPORT_SYMBOL(getrawmonotonic);
552 572
553
554/** 573/**
555 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 574 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
556 */ 575 */
557int timekeeping_valid_for_hres(void) 576int timekeeping_valid_for_hres(void)
558{ 577{
578 struct timekeeper *tk = &timekeeper;
559 unsigned long seq; 579 unsigned long seq;
560 int ret; 580 int ret;
561 581
562 do { 582 do {
563 seq = read_seqbegin(&timekeeper.lock); 583 seq = read_seqbegin(&tk->lock);
564 584
565 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 585 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
566 586
567 } while (read_seqretry(&timekeeper.lock, seq)); 587 } while (read_seqretry(&tk->lock, seq));
568 588
569 return ret; 589 return ret;
570} 590}
@@ -574,15 +594,16 @@ int timekeeping_valid_for_hres(void)
574 */ 594 */
575u64 timekeeping_max_deferment(void) 595u64 timekeeping_max_deferment(void)
576{ 596{
597 struct timekeeper *tk = &timekeeper;
577 unsigned long seq; 598 unsigned long seq;
578 u64 ret; 599 u64 ret;
579 600
580 do { 601 do {
581 seq = read_seqbegin(&timekeeper.lock); 602 seq = read_seqbegin(&tk->lock);
582 603
583 ret = timekeeper.clock->max_idle_ns; 604 ret = tk->clock->max_idle_ns;
584 605
585 } while (read_seqretry(&timekeeper.lock, seq)); 606 } while (read_seqretry(&tk->lock, seq));
586 607
587 return ret; 608 return ret;
588} 609}
@@ -622,46 +643,43 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts)
622 */ 643 */
623void __init timekeeping_init(void) 644void __init timekeeping_init(void)
624{ 645{
646 struct timekeeper *tk = &timekeeper;
625 struct clocksource *clock; 647 struct clocksource *clock;
626 unsigned long flags; 648 unsigned long flags;
627 struct timespec now, boot; 649 struct timespec now, boot, tmp;
628 650
629 read_persistent_clock(&now); 651 read_persistent_clock(&now);
630 read_boot_clock(&boot); 652 read_boot_clock(&boot);
631 653
632 seqlock_init(&timekeeper.lock); 654 seqlock_init(&tk->lock);
633 655
634 ntp_init(); 656 ntp_init();
635 657
636 write_seqlock_irqsave(&timekeeper.lock, flags); 658 write_seqlock_irqsave(&tk->lock, flags);
637 clock = clocksource_default_clock(); 659 clock = clocksource_default_clock();
638 if (clock->enable) 660 if (clock->enable)
639 clock->enable(clock); 661 clock->enable(clock);
640 tk_setup_internals(&timekeeper, clock); 662 tk_setup_internals(tk, clock);
641 663
642 tk_set_xtime(&timekeeper, &now); 664 tk_set_xtime(tk, &now);
643 timekeeper.raw_time.tv_sec = 0; 665 tk->raw_time.tv_sec = 0;
644 timekeeper.raw_time.tv_nsec = 0; 666 tk->raw_time.tv_nsec = 0;
645 if (boot.tv_sec == 0 && boot.tv_nsec == 0) 667 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
646 boot = tk_xtime(&timekeeper); 668 boot = tk_xtime(tk);
647 669
648 set_normalized_timespec(&timekeeper.wall_to_monotonic, 670 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
649 -boot.tv_sec, -boot.tv_nsec); 671 tk_set_wall_to_mono(tk, tmp);
650 update_rt_offset(&timekeeper); 672
651 timekeeper.total_sleep_time.tv_sec = 0; 673 tmp.tv_sec = 0;
652 timekeeper.total_sleep_time.tv_nsec = 0; 674 tmp.tv_nsec = 0;
653 write_sequnlock_irqrestore(&timekeeper.lock, flags); 675 tk_set_sleep_time(tk, tmp);
676
677 write_sequnlock_irqrestore(&tk->lock, flags);
654} 678}
655 679
656/* time in seconds when suspend began */ 680/* time in seconds when suspend began */
657static struct timespec timekeeping_suspend_time; 681static struct timespec timekeeping_suspend_time;
658 682
659static void update_sleep_time(struct timespec t)
660{
661 timekeeper.total_sleep_time = t;
662 timekeeper.offs_boot = timespec_to_ktime(t);
663}
664
665/** 683/**
666 * __timekeeping_inject_sleeptime - Internal function to add sleep interval 684 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
667 * @delta: pointer to a timespec delta value 685 * @delta: pointer to a timespec delta value
@@ -677,13 +695,11 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
677 "sleep delta value!\n"); 695 "sleep delta value!\n");
678 return; 696 return;
679 } 697 }
680
681 tk_xtime_add(tk, delta); 698 tk_xtime_add(tk, delta);
682 tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta); 699 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
683 update_sleep_time(timespec_add(tk->total_sleep_time, *delta)); 700 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
684} 701}
685 702
686
687/** 703/**
688 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values 704 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
689 * @delta: pointer to a timespec delta value 705 * @delta: pointer to a timespec delta value
@@ -696,6 +712,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
696 */ 712 */
697void timekeeping_inject_sleeptime(struct timespec *delta) 713void timekeeping_inject_sleeptime(struct timespec *delta)
698{ 714{
715 struct timekeeper *tk = &timekeeper;
699 unsigned long flags; 716 unsigned long flags;
700 struct timespec ts; 717 struct timespec ts;
701 718
@@ -704,21 +721,20 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
704 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) 721 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
705 return; 722 return;
706 723
707 write_seqlock_irqsave(&timekeeper.lock, flags); 724 write_seqlock_irqsave(&tk->lock, flags);
708 725
709 timekeeping_forward_now(&timekeeper); 726 timekeeping_forward_now(tk);
710 727
711 __timekeeping_inject_sleeptime(&timekeeper, delta); 728 __timekeeping_inject_sleeptime(tk, delta);
712 729
713 timekeeping_update(&timekeeper, true); 730 timekeeping_update(tk, true);
714 731
715 write_sequnlock_irqrestore(&timekeeper.lock, flags); 732 write_sequnlock_irqrestore(&tk->lock, flags);
716 733
717 /* signal hrtimers about time change */ 734 /* signal hrtimers about time change */
718 clock_was_set(); 735 clock_was_set();
719} 736}
720 737
721
722/** 738/**
723 * timekeeping_resume - Resumes the generic timekeeping subsystem. 739 * timekeeping_resume - Resumes the generic timekeeping subsystem.
724 * 740 *
@@ -728,6 +744,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
728 */ 744 */
729static void timekeeping_resume(void) 745static void timekeeping_resume(void)
730{ 746{
747 struct timekeeper *tk = &timekeeper;
731 unsigned long flags; 748 unsigned long flags;
732 struct timespec ts; 749 struct timespec ts;
733 750
@@ -735,18 +752,18 @@ static void timekeeping_resume(void)
735 752
736 clocksource_resume(); 753 clocksource_resume();
737 754
738 write_seqlock_irqsave(&timekeeper.lock, flags); 755 write_seqlock_irqsave(&tk->lock, flags);
739 756
740 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 757 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
741 ts = timespec_sub(ts, timekeeping_suspend_time); 758 ts = timespec_sub(ts, timekeeping_suspend_time);
742 __timekeeping_inject_sleeptime(&timekeeper, &ts); 759 __timekeeping_inject_sleeptime(tk, &ts);
743 } 760 }
744 /* re-base the last cycle value */ 761 /* re-base the last cycle value */
745 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 762 tk->clock->cycle_last = tk->clock->read(tk->clock);
746 timekeeper.ntp_error = 0; 763 tk->ntp_error = 0;
747 timekeeping_suspended = 0; 764 timekeeping_suspended = 0;
748 timekeeping_update(&timekeeper, false); 765 timekeeping_update(tk, false);
749 write_sequnlock_irqrestore(&timekeeper.lock, flags); 766 write_sequnlock_irqrestore(&tk->lock, flags);
750 767
751 touch_softlockup_watchdog(); 768 touch_softlockup_watchdog();
752 769
@@ -758,14 +775,15 @@ static void timekeeping_resume(void)
758 775
759static int timekeeping_suspend(void) 776static int timekeeping_suspend(void)
760{ 777{
778 struct timekeeper *tk = &timekeeper;
761 unsigned long flags; 779 unsigned long flags;
762 struct timespec delta, delta_delta; 780 struct timespec delta, delta_delta;
763 static struct timespec old_delta; 781 static struct timespec old_delta;
764 782
765 read_persistent_clock(&timekeeping_suspend_time); 783 read_persistent_clock(&timekeeping_suspend_time);
766 784
767 write_seqlock_irqsave(&timekeeper.lock, flags); 785 write_seqlock_irqsave(&tk->lock, flags);
768 timekeeping_forward_now(&timekeeper); 786 timekeeping_forward_now(tk);
769 timekeeping_suspended = 1; 787 timekeeping_suspended = 1;
770 788
771 /* 789 /*
@@ -774,7 +792,7 @@ static int timekeeping_suspend(void)
774 * try to compensate so the difference in system time 792 * try to compensate so the difference in system time
775 * and persistent_clock time stays close to constant. 793 * and persistent_clock time stays close to constant.
776 */ 794 */
777 delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); 795 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
778 delta_delta = timespec_sub(delta, old_delta); 796 delta_delta = timespec_sub(delta, old_delta);
779 if (abs(delta_delta.tv_sec) >= 2) { 797 if (abs(delta_delta.tv_sec) >= 2) {
780 /* 798 /*
@@ -787,7 +805,7 @@ static int timekeeping_suspend(void)
787 timekeeping_suspend_time = 805 timekeeping_suspend_time =
788 timespec_add(timekeeping_suspend_time, delta_delta); 806 timespec_add(timekeeping_suspend_time, delta_delta);
789 } 807 }
790 write_sequnlock_irqrestore(&timekeeper.lock, flags); 808 write_sequnlock_irqrestore(&tk->lock, flags);
791 809
792 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 810 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
793 clocksource_suspend(); 811 clocksource_suspend();
@@ -898,27 +916,29 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
898 * the error. This causes the likely below to be unlikely. 916 * the error. This causes the likely below to be unlikely.
899 * 917 *
900 * The proper fix is to avoid rounding up by using 918 * The proper fix is to avoid rounding up by using
901 * the high precision timekeeper.xtime_nsec instead of 919 * the high precision tk->xtime_nsec instead of
902 * xtime.tv_nsec everywhere. Fixing this will take some 920 * xtime.tv_nsec everywhere. Fixing this will take some
903 * time. 921 * time.
904 */ 922 */
905 if (likely(error <= interval)) 923 if (likely(error <= interval))
906 adj = 1; 924 adj = 1;
907 else 925 else
908 adj = timekeeping_bigadjust(tk, error, &interval, 926 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
909 &offset); 927 } else {
910 } else if (error < -interval) { 928 if (error < -interval) {
911 /* See comment above, this is just switched for the negative */ 929 /* See comment above, this is just switched for the negative */
912 error >>= 2; 930 error >>= 2;
913 if (likely(error >= -interval)) { 931 if (likely(error >= -interval)) {
914 adj = -1; 932 adj = -1;
915 interval = -interval; 933 interval = -interval;
916 offset = -offset; 934 offset = -offset;
917 } else 935 } else {
918 adj = timekeeping_bigadjust(tk, error, &interval, 936 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
919 &offset); 937 }
920 } else 938 } else {
921 return; 939 goto out_adjust;
940 }
941 }
922 942
923 if (unlikely(tk->clock->maxadj && 943 if (unlikely(tk->clock->maxadj &&
924 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { 944 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
@@ -981,6 +1001,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
981 tk->xtime_nsec -= offset; 1001 tk->xtime_nsec -= offset;
982 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; 1002 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
983 1003
1004out_adjust:
984 /* 1005 /*
985 * It may be possible that when we entered this function, xtime_nsec 1006 * It may be possible that when we entered this function, xtime_nsec
986 * was very small. Further, if we're slightly speeding the clocksource 1007 * was very small. Further, if we're slightly speeding the clocksource
@@ -1003,7 +1024,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1003 1024
1004} 1025}
1005 1026
1006
1007/** 1027/**
1008 * accumulate_nsecs_to_secs - Accumulates nsecs into secs 1028 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1009 * 1029 *
@@ -1024,15 +1044,21 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1024 1044
1025 /* Figure out if its a leap sec and apply if needed */ 1045 /* Figure out if its a leap sec and apply if needed */
1026 leap = second_overflow(tk->xtime_sec); 1046 leap = second_overflow(tk->xtime_sec);
1027 tk->xtime_sec += leap; 1047 if (unlikely(leap)) {
1028 tk->wall_to_monotonic.tv_sec -= leap; 1048 struct timespec ts;
1029 if (leap) 1049
1030 clock_was_set_delayed(); 1050 tk->xtime_sec += leap;
1051
1052 ts.tv_sec = leap;
1053 ts.tv_nsec = 0;
1054 tk_set_wall_to_mono(tk,
1055 timespec_sub(tk->wall_to_monotonic, ts));
1031 1056
1057 clock_was_set_delayed();
1058 }
1032 } 1059 }
1033} 1060}
1034 1061
1035
1036/** 1062/**
1037 * logarithmic_accumulation - shifted accumulation of cycles 1063 * logarithmic_accumulation - shifted accumulation of cycles
1038 * 1064 *
@@ -1076,7 +1102,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1076 return offset; 1102 return offset;
1077} 1103}
1078 1104
1079
1080/** 1105/**
1081 * update_wall_time - Uses the current clocksource to increment the wall time 1106 * update_wall_time - Uses the current clocksource to increment the wall time
1082 * 1107 *
@@ -1084,21 +1109,22 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1084static void update_wall_time(void) 1109static void update_wall_time(void)
1085{ 1110{
1086 struct clocksource *clock; 1111 struct clocksource *clock;
1112 struct timekeeper *tk = &timekeeper;
1087 cycle_t offset; 1113 cycle_t offset;
1088 int shift = 0, maxshift; 1114 int shift = 0, maxshift;
1089 unsigned long flags; 1115 unsigned long flags;
1090 s64 remainder; 1116 s64 remainder;
1091 1117
1092 write_seqlock_irqsave(&timekeeper.lock, flags); 1118 write_seqlock_irqsave(&tk->lock, flags);
1093 1119
1094 /* Make sure we're fully resumed: */ 1120 /* Make sure we're fully resumed: */
1095 if (unlikely(timekeeping_suspended)) 1121 if (unlikely(timekeeping_suspended))
1096 goto out; 1122 goto out;
1097 1123
1098 clock = timekeeper.clock; 1124 clock = tk->clock;
1099 1125
1100#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 1126#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1101 offset = timekeeper.cycle_interval; 1127 offset = tk->cycle_interval;
1102#else 1128#else
1103 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1129 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1104#endif 1130#endif
@@ -1111,19 +1137,19 @@ static void update_wall_time(void)
1111 * chunk in one go, and then try to consume the next smaller 1137 * chunk in one go, and then try to consume the next smaller
1112 * doubled multiple. 1138 * doubled multiple.
1113 */ 1139 */
1114 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 1140 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1115 shift = max(0, shift); 1141 shift = max(0, shift);
1116 /* Bound shift to one less than what overflows tick_length */ 1142 /* Bound shift to one less than what overflows tick_length */
1117 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1143 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1118 shift = min(shift, maxshift); 1144 shift = min(shift, maxshift);
1119 while (offset >= timekeeper.cycle_interval) { 1145 while (offset >= tk->cycle_interval) {
1120 offset = logarithmic_accumulation(&timekeeper, offset, shift); 1146 offset = logarithmic_accumulation(tk, offset, shift);
1121 if(offset < timekeeper.cycle_interval<<shift) 1147 if (offset < tk->cycle_interval<<shift)
1122 shift--; 1148 shift--;
1123 } 1149 }
1124 1150
1125 /* correct the clock when NTP error is too big */ 1151 /* correct the clock when NTP error is too big */
1126 timekeeping_adjust(&timekeeper, offset); 1152 timekeeping_adjust(tk, offset);
1127 1153
1128 1154
1129 /* 1155 /*
@@ -1135,21 +1161,21 @@ static void update_wall_time(void)
1135 * the vsyscall implementations are converted to use xtime_nsec 1161 * the vsyscall implementations are converted to use xtime_nsec
1136 * (shifted nanoseconds), this can be killed. 1162 * (shifted nanoseconds), this can be killed.
1137 */ 1163 */
1138 remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1); 1164 remainder = tk->xtime_nsec & ((1 << tk->shift) - 1);
1139 timekeeper.xtime_nsec -= remainder; 1165 tk->xtime_nsec -= remainder;
1140 timekeeper.xtime_nsec += 1 << timekeeper.shift; 1166 tk->xtime_nsec += 1 << tk->shift;
1141 timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift; 1167 tk->ntp_error += remainder << tk->ntp_error_shift;
1142 1168
1143 /* 1169 /*
1144 * Finally, make sure that after the rounding 1170 * Finally, make sure that after the rounding
1145 * xtime_nsec isn't larger than NSEC_PER_SEC 1171 * xtime_nsec isn't larger than NSEC_PER_SEC
1146 */ 1172 */
1147 accumulate_nsecs_to_secs(&timekeeper); 1173 accumulate_nsecs_to_secs(tk);
1148 1174
1149 timekeeping_update(&timekeeper, false); 1175 timekeeping_update(tk, false);
1150 1176
1151out: 1177out:
1152 write_sequnlock_irqrestore(&timekeeper.lock, flags); 1178 write_sequnlock_irqrestore(&tk->lock, flags);
1153 1179
1154} 1180}
1155 1181
@@ -1166,18 +1192,18 @@ out:
1166 */ 1192 */
1167void getboottime(struct timespec *ts) 1193void getboottime(struct timespec *ts)
1168{ 1194{
1195 struct timekeeper *tk = &timekeeper;
1169 struct timespec boottime = { 1196 struct timespec boottime = {
1170 .tv_sec = timekeeper.wall_to_monotonic.tv_sec + 1197 .tv_sec = tk->wall_to_monotonic.tv_sec +
1171 timekeeper.total_sleep_time.tv_sec, 1198 tk->total_sleep_time.tv_sec,
1172 .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + 1199 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1173 timekeeper.total_sleep_time.tv_nsec 1200 tk->total_sleep_time.tv_nsec
1174 }; 1201 };
1175 1202
1176 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 1203 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1177} 1204}
1178EXPORT_SYMBOL_GPL(getboottime); 1205EXPORT_SYMBOL_GPL(getboottime);
1179 1206
1180
1181/** 1207/**
1182 * get_monotonic_boottime - Returns monotonic time since boot 1208 * get_monotonic_boottime - Returns monotonic time since boot
1183 * @ts: pointer to the timespec to be set 1209 * @ts: pointer to the timespec to be set
@@ -1189,19 +1215,20 @@ EXPORT_SYMBOL_GPL(getboottime);
1189 */ 1215 */
1190void get_monotonic_boottime(struct timespec *ts) 1216void get_monotonic_boottime(struct timespec *ts)
1191{ 1217{
1218 struct timekeeper *tk = &timekeeper;
1192 struct timespec tomono, sleep; 1219 struct timespec tomono, sleep;
1193 unsigned int seq; 1220 unsigned int seq;
1194 1221
1195 WARN_ON(timekeeping_suspended); 1222 WARN_ON(timekeeping_suspended);
1196 1223
1197 do { 1224 do {
1198 seq = read_seqbegin(&timekeeper.lock); 1225 seq = read_seqbegin(&tk->lock);
1199 ts->tv_sec = timekeeper.xtime_sec; 1226 ts->tv_sec = tk->xtime_sec;
1200 ts->tv_nsec = timekeeping_get_ns(&timekeeper); 1227 ts->tv_nsec = timekeeping_get_ns(tk);
1201 tomono = timekeeper.wall_to_monotonic; 1228 tomono = tk->wall_to_monotonic;
1202 sleep = timekeeper.total_sleep_time; 1229 sleep = tk->total_sleep_time;
1203 1230
1204 } while (read_seqretry(&timekeeper.lock, seq)); 1231 } while (read_seqretry(&tk->lock, seq));
1205 1232
1206 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, 1233 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1207 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); 1234 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
@@ -1231,31 +1258,38 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
1231 */ 1258 */
1232void monotonic_to_bootbased(struct timespec *ts) 1259void monotonic_to_bootbased(struct timespec *ts)
1233{ 1260{
1234 *ts = timespec_add(*ts, timekeeper.total_sleep_time); 1261 struct timekeeper *tk = &timekeeper;
1262
1263 *ts = timespec_add(*ts, tk->total_sleep_time);
1235} 1264}
1236EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 1265EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1237 1266
1238unsigned long get_seconds(void) 1267unsigned long get_seconds(void)
1239{ 1268{
1240 return timekeeper.xtime_sec; 1269 struct timekeeper *tk = &timekeeper;
1270
1271 return tk->xtime_sec;
1241} 1272}
1242EXPORT_SYMBOL(get_seconds); 1273EXPORT_SYMBOL(get_seconds);
1243 1274
1244struct timespec __current_kernel_time(void) 1275struct timespec __current_kernel_time(void)
1245{ 1276{
1246 return tk_xtime(&timekeeper); 1277 struct timekeeper *tk = &timekeeper;
1278
1279 return tk_xtime(tk);
1247} 1280}
1248 1281
1249struct timespec current_kernel_time(void) 1282struct timespec current_kernel_time(void)
1250{ 1283{
1284 struct timekeeper *tk = &timekeeper;
1251 struct timespec now; 1285 struct timespec now;
1252 unsigned long seq; 1286 unsigned long seq;
1253 1287
1254 do { 1288 do {
1255 seq = read_seqbegin(&timekeeper.lock); 1289 seq = read_seqbegin(&tk->lock);
1256 1290
1257 now = tk_xtime(&timekeeper); 1291 now = tk_xtime(tk);
1258 } while (read_seqretry(&timekeeper.lock, seq)); 1292 } while (read_seqretry(&tk->lock, seq));
1259 1293
1260 return now; 1294 return now;
1261} 1295}
@@ -1263,15 +1297,16 @@ EXPORT_SYMBOL(current_kernel_time);
1263 1297
1264struct timespec get_monotonic_coarse(void) 1298struct timespec get_monotonic_coarse(void)
1265{ 1299{
1300 struct timekeeper *tk = &timekeeper;
1266 struct timespec now, mono; 1301 struct timespec now, mono;
1267 unsigned long seq; 1302 unsigned long seq;
1268 1303
1269 do { 1304 do {
1270 seq = read_seqbegin(&timekeeper.lock); 1305 seq = read_seqbegin(&tk->lock);
1271 1306
1272 now = tk_xtime(&timekeeper); 1307 now = tk_xtime(tk);
1273 mono = timekeeper.wall_to_monotonic; 1308 mono = tk->wall_to_monotonic;
1274 } while (read_seqretry(&timekeeper.lock, seq)); 1309 } while (read_seqretry(&tk->lock, seq));
1275 1310
1276 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 1311 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1277 now.tv_nsec + mono.tv_nsec); 1312 now.tv_nsec + mono.tv_nsec);
@@ -1300,14 +1335,15 @@ void do_timer(unsigned long ticks)
1300void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, 1335void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1301 struct timespec *wtom, struct timespec *sleep) 1336 struct timespec *wtom, struct timespec *sleep)
1302{ 1337{
1338 struct timekeeper *tk = &timekeeper;
1303 unsigned long seq; 1339 unsigned long seq;
1304 1340
1305 do { 1341 do {
1306 seq = read_seqbegin(&timekeeper.lock); 1342 seq = read_seqbegin(&tk->lock);
1307 *xtim = tk_xtime(&timekeeper); 1343 *xtim = tk_xtime(tk);
1308 *wtom = timekeeper.wall_to_monotonic; 1344 *wtom = tk->wall_to_monotonic;
1309 *sleep = timekeeper.total_sleep_time; 1345 *sleep = tk->total_sleep_time;
1310 } while (read_seqretry(&timekeeper.lock, seq)); 1346 } while (read_seqretry(&tk->lock, seq));
1311} 1347}
1312 1348
1313#ifdef CONFIG_HIGH_RES_TIMERS 1349#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1321,19 +1357,20 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1321 */ 1357 */
1322ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) 1358ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1323{ 1359{
1360 struct timekeeper *tk = &timekeeper;
1324 ktime_t now; 1361 ktime_t now;
1325 unsigned int seq; 1362 unsigned int seq;
1326 u64 secs, nsecs; 1363 u64 secs, nsecs;
1327 1364
1328 do { 1365 do {
1329 seq = read_seqbegin(&timekeeper.lock); 1366 seq = read_seqbegin(&tk->lock);
1330 1367
1331 secs = timekeeper.xtime_sec; 1368 secs = tk->xtime_sec;
1332 nsecs = timekeeping_get_ns(&timekeeper); 1369 nsecs = timekeeping_get_ns(tk);
1333 1370
1334 *offs_real = timekeeper.offs_real; 1371 *offs_real = tk->offs_real;
1335 *offs_boot = timekeeper.offs_boot; 1372 *offs_boot = tk->offs_boot;
1336 } while (read_seqretry(&timekeeper.lock, seq)); 1373 } while (read_seqretry(&tk->lock, seq));
1337 1374
1338 now = ktime_add_ns(ktime_set(secs, 0), nsecs); 1375 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1339 now = ktime_sub(now, *offs_real); 1376 now = ktime_sub(now, *offs_real);
@@ -1346,19 +1383,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1346 */ 1383 */
1347ktime_t ktime_get_monotonic_offset(void) 1384ktime_t ktime_get_monotonic_offset(void)
1348{ 1385{
1386 struct timekeeper *tk = &timekeeper;
1349 unsigned long seq; 1387 unsigned long seq;
1350 struct timespec wtom; 1388 struct timespec wtom;
1351 1389
1352 do { 1390 do {
1353 seq = read_seqbegin(&timekeeper.lock); 1391 seq = read_seqbegin(&tk->lock);
1354 wtom = timekeeper.wall_to_monotonic; 1392 wtom = tk->wall_to_monotonic;
1355 } while (read_seqretry(&timekeeper.lock, seq)); 1393 } while (read_seqretry(&tk->lock, seq));
1356 1394
1357 return timespec_to_ktime(wtom); 1395 return timespec_to_ktime(wtom);
1358} 1396}
1359EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); 1397EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1360 1398
1361
1362/** 1399/**
1363 * xtime_update() - advances the timekeeping infrastructure 1400 * xtime_update() - advances the timekeeping infrastructure
1364 * @ticks: number of ticks, that have elapsed since the last call. 1401 * @ticks: number of ticks, that have elapsed since the last call.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752ae8f6..8a6d2ee2086c 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
281 281
282 head = this_cpu_ptr(event_function.perf_events); 282 head = this_cpu_ptr(event_function.perf_events);
283 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, 283 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
284 1, &regs, head); 284 1, &regs, head, NULL);
285 285
286#undef ENTRY_SIZE 286#undef ENTRY_SIZE
287} 287}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b31d3d5699fe..1a2117043bb1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003 1003
1004 head = this_cpu_ptr(call->perf_events); 1004 head = this_cpu_ptr(call->perf_events);
1005 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); 1005 perf_trace_buf_submit(entry, size, rctx,
1006 entry->ip, 1, regs, head, NULL);
1006} 1007}
1007 1008
1008/* Kretprobe profile handler */ 1009/* Kretprobe profile handler */
@@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1033 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1034 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1034 1035
1035 head = this_cpu_ptr(call->perf_events); 1036 head = this_cpu_ptr(call->perf_events);
1036 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); 1037 perf_trace_buf_submit(entry, size, rctx,
1038 entry->ret_ip, 1, regs, head, NULL);
1037} 1039}
1038#endif /* CONFIG_PERF_EVENTS */ 1040#endif /* CONFIG_PERF_EVENTS */
1039 1041
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 96fc73369099..60e4d7875672 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
532 (unsigned long *)&rec->args); 532 (unsigned long *)&rec->args);
533 533
534 head = this_cpu_ptr(sys_data->enter_event->perf_events); 534 head = this_cpu_ptr(sys_data->enter_event->perf_events);
535 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); 535 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
536} 536}
537 537
538int perf_sysenter_enable(struct ftrace_event_call *call) 538int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
608 rec->ret = syscall_get_return_value(current, regs); 608 rec->ret = syscall_get_return_value(current, regs);
609 609
610 head = this_cpu_ptr(sys_data->exit_event->perf_events); 610 head = this_cpu_ptr(sys_data->exit_event->perf_events);
611 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); 611 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
612} 612}
613 613
614int perf_sysexit_enable(struct ftrace_event_call *call) 614int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2b36ac68549e..03003cd7dd96 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
670 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 670 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
671 671
672 head = this_cpu_ptr(call->perf_events); 672 head = this_cpu_ptr(call->perf_events);
673 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); 673 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
674 674
675 out: 675 out:
676 preempt_enable(); 676 preempt_enable();
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6b4718e2ee34..b41823cc05e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -39,12 +39,6 @@ DEFINE_SPINLOCK(bdi_lock);
39LIST_HEAD(bdi_list); 39LIST_HEAD(bdi_list);
40LIST_HEAD(bdi_pending_list); 40LIST_HEAD(bdi_pending_list);
41 41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
47
48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) 42void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49{ 43{
50 if (wb1 < wb2) { 44 if (wb1 < wb2) {
@@ -250,12 +244,6 @@ static int __init default_bdi_init(void)
250{ 244{
251 int err; 245 int err;
252 246
253 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
254 BUG_ON(IS_ERR(sync_supers_tsk));
255
256 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
257 bdi_arm_supers_timer();
258
259 err = bdi_init(&default_backing_dev_info); 247 err = bdi_init(&default_backing_dev_info);
260 if (!err) 248 if (!err)
261 bdi_register(&default_backing_dev_info, NULL, "default"); 249 bdi_register(&default_backing_dev_info, NULL, "default");
@@ -270,46 +258,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
270 return wb_has_dirty_io(&bdi->wb); 258 return wb_has_dirty_io(&bdi->wb);
271} 259}
272 260
273/*
274 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
275 * or we risk deadlocking on ->s_umount. The longer term solution would be
276 * to implement sync_supers_bdi() or similar and simply do it from the
277 * bdi writeback thread individually.
278 */
279static int bdi_sync_supers(void *unused)
280{
281 set_user_nice(current, 0);
282
283 while (!kthread_should_stop()) {
284 set_current_state(TASK_INTERRUPTIBLE);
285 schedule();
286
287 /*
288 * Do this periodically, like kupdated() did before.
289 */
290 sync_supers();
291 }
292
293 return 0;
294}
295
296void bdi_arm_supers_timer(void)
297{
298 unsigned long next;
299
300 if (!dirty_writeback_interval)
301 return;
302
303 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
304 mod_timer(&sync_supers_timer, round_jiffies_up(next));
305}
306
307static void sync_supers_timer_fn(unsigned long unused)
308{
309 wake_up_process(sync_supers_tsk);
310 bdi_arm_supers_timer();
311}
312
313static void wakeup_timer_fn(unsigned long data) 261static void wakeup_timer_fn(unsigned long data)
314{ 262{
315 struct backing_dev_info *bdi = (struct backing_dev_info *)data; 263 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e5363f34e025..5ad5ce23c1e0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1532,7 +1532,6 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
1532 void __user *buffer, size_t *length, loff_t *ppos) 1532 void __user *buffer, size_t *length, loff_t *ppos)
1533{ 1533{
1534 proc_dointvec(table, write, buffer, length, ppos); 1534 proc_dointvec(table, write, buffer, length, ppos);
1535 bdi_arm_supers_timer();
1536 return 0; 1535 return 0;
1537} 1536}
1538 1537
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc49d2cd..fc866f2e4528 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
201 goto out; 201 goto out;
202 202
203 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
204 goto out;
205
206 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 203 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
207 204
205 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
206 goto out;
207
208 next_gw = batadv_gw_get_best_gw_node(bat_priv); 208 next_gw = batadv_gw_get_best_gw_node(bat_priv);
209 209
210 if (curr_gw == next_gw) 210 if (curr_gw == next_gw)
diff --git a/net/core/dev.c b/net/core/dev.c
index 0cb3fe8d8e72..f91abf800161 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2134,6 +2134,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2134 __be16 protocol = skb->protocol; 2134 __be16 protocol = skb->protocol;
2135 netdev_features_t features = skb->dev->features; 2135 netdev_features_t features = skb->dev->features;
2136 2136
2137 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2138 features &= ~NETIF_F_GSO_MASK;
2139
2137 if (protocol == htons(ETH_P_8021Q)) { 2140 if (protocol == htons(ETH_P_8021Q)) {
2138 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2141 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2139 protocol = veh->h_vlan_encapsulated_proto; 2142 protocol = veh->h_vlan_encapsulated_proto;
@@ -5986,6 +5989,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5986 dev_net_set(dev, &init_net); 5989 dev_net_set(dev, &init_net);
5987 5990
5988 dev->gso_max_size = GSO_MAX_SIZE; 5991 dev->gso_max_size = GSO_MAX_SIZE;
5992 dev->gso_max_segs = GSO_MAX_SEGS;
5989 5993
5990 INIT_LIST_HEAD(&dev->napi_list); 5994 INIT_LIST_HEAD(&dev->napi_list);
5991 INIT_LIST_HEAD(&dev->unreg_list); 5995 INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6b654b3ddfda..8f67ced8d6a8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1458 } else { 1458 } else {
1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1460 sk->sk_gso_max_size = dst->dev->gso_max_size; 1460 sk->sk_gso_max_size = dst->dev->gso_max_size;
1461 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1461 } 1462 }
1462 } 1463 }
1463} 1464}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52d18c1..76dde25fb9a0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop); 197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh)) 198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); 199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
200 if (neigh) { 200 if (!IS_ERR(neigh)) {
201 int res = dst_neigh_output(dst, neigh, skb); 201 int res = dst_neigh_output(dst, neigh, skb);
202 202
203 rcu_read_unlock_bh(); 203 rcu_read_unlock_bh();
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c035251beb07..e4ba974f143c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -70,7 +70,6 @@
70#include <linux/types.h> 70#include <linux/types.h>
71#include <linux/kernel.h> 71#include <linux/kernel.h>
72#include <linux/mm.h> 72#include <linux/mm.h>
73#include <linux/bootmem.h>
74#include <linux/string.h> 73#include <linux/string.h>
75#include <linux/socket.h> 74#include <linux/socket.h>
76#include <linux/sockios.h> 75#include <linux/sockios.h>
@@ -80,7 +79,6 @@
80#include <linux/netdevice.h> 79#include <linux/netdevice.h>
81#include <linux/proc_fs.h> 80#include <linux/proc_fs.h>
82#include <linux/init.h> 81#include <linux/init.h>
83#include <linux/workqueue.h>
84#include <linux/skbuff.h> 82#include <linux/skbuff.h>
85#include <linux/inetdevice.h> 83#include <linux/inetdevice.h>
86#include <linux/igmp.h> 84#include <linux/igmp.h>
@@ -88,11 +86,9 @@
88#include <linux/mroute.h> 86#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h> 87#include <linux/netfilter_ipv4.h>
90#include <linux/random.h> 88#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h> 89#include <linux/rcupdate.h>
93#include <linux/times.h> 90#include <linux/times.h>
94#include <linux/slab.h> 91#include <linux/slab.h>
95#include <linux/prefetch.h>
96#include <net/dst.h> 92#include <net/dst.h>
97#include <net/net_namespace.h> 93#include <net/net_namespace.h>
98#include <net/protocol.h> 94#include <net/protocol.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e7e6eeae49c0..2109ff4a1daf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
811 old_size_goal + mss_now > xmit_size_goal)) { 811 old_size_goal + mss_now > xmit_size_goal)) {
812 xmit_size_goal = old_size_goal; 812 xmit_size_goal = old_size_goal;
813 } else { 813 } else {
814 tp->xmit_size_goal_segs = xmit_size_goal / mss_now; 814 tp->xmit_size_goal_segs =
815 min_t(u16, xmit_size_goal / mss_now,
816 sk->sk_gso_max_segs);
815 xmit_size_goal = tp->xmit_size_goal_segs * mss_now; 817 xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
816 } 818 }
817 } 819 }
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4d4db16e336e..1432cdb0644c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
291 left = tp->snd_cwnd - in_flight; 291 left = tp->snd_cwnd - in_flight;
292 if (sk_can_gso(sk) && 292 if (sk_can_gso(sk) &&
293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
294 left * tp->mss_cache < sk->sk_gso_max_size) 294 left * tp->mss_cache < sk->sk_gso_max_size &&
295 left < sk->sk_gso_max_segs)
295 return true; 296 return true;
296 return left <= tcp_max_tso_deferred_mss(tp); 297 return left <= tcp_max_tso_deferred_mss(tp);
297} 298}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2fd2bc9e3c64..85308b90df80 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5392{ 5392{
5393 struct tcp_sock *tp = tcp_sk(sk); 5393 struct tcp_sock *tp = tcp_sk(sk);
5394 5394
5395 if (unlikely(sk->sk_rx_dst == NULL))
5396 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5395 /* 5397 /*
5396 * Header prediction. 5398 * Header prediction.
5397 * The code loosely follows the one in the famous 5399 * The code loosely follows the one in the famous
@@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5605 tcp_set_state(sk, TCP_ESTABLISHED); 5607 tcp_set_state(sk, TCP_ESTABLISHED);
5606 5608
5607 if (skb != NULL) { 5609 if (skb != NULL) {
5608 inet_sk_rx_dst_set(sk, skb); 5610 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5609 security_inet_conn_established(sk, skb); 5611 security_inet_conn_established(sk, skb);
5610 } 5612 }
5611 5613
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 42b2a6a73092..272241f16fcb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1627 sk->sk_rx_dst = NULL; 1627 sk->sk_rx_dst = NULL;
1628 } 1628 }
1629 } 1629 }
1630 if (unlikely(sk->sk_rx_dst == NULL))
1631 inet_sk_rx_dst_set(sk, skb);
1632
1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1630 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 rsk = sk; 1631 rsk = sk;
1635 goto reset; 1632 goto reset;
@@ -1872,10 +1869,20 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
1872 .twsk_destructor= tcp_twsk_destructor, 1869 .twsk_destructor= tcp_twsk_destructor,
1873}; 1870};
1874 1871
1872static void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1873{
1874 struct dst_entry *dst = skb_dst(skb);
1875
1876 dst_hold(dst);
1877 sk->sk_rx_dst = dst;
1878 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1879}
1880
1875const struct inet_connection_sock_af_ops ipv4_specific = { 1881const struct inet_connection_sock_af_ops ipv4_specific = {
1876 .queue_xmit = ip_queue_xmit, 1882 .queue_xmit = ip_queue_xmit,
1877 .send_check = tcp_v4_send_check, 1883 .send_check = tcp_v4_send_check,
1878 .rebuild_header = inet_sk_rebuild_header, 1884 .rebuild_header = inet_sk_rebuild_header,
1885 .sk_rx_dst_set = inet_sk_rx_dst_set,
1879 .conn_request = tcp_v4_conn_request, 1886 .conn_request = tcp_v4_conn_request,
1880 .syn_recv_sock = tcp_v4_syn_recv_sock, 1887 .syn_recv_sock = tcp_v4_syn_recv_sock,
1881 .net_header_len = sizeof(struct iphdr), 1888 .net_header_len = sizeof(struct iphdr),
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 232a90c3ec86..d9c9dcef2de3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
387 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
389 389
390 inet_sk_rx_dst_set(newsk, skb); 390 newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
391 391
392 /* TCP Cookie Transactions require space for the cookie pair, 392 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to 393 * as it differs for each connection. There is no need to
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f1bcff0b10b..20dfd892c86f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void)
940 * We cant xmit new skbs from this context, as we might already 940 * We cant xmit new skbs from this context, as we might already
941 * hold qdisc lock. 941 * hold qdisc lock.
942 */ 942 */
943void tcp_wfree(struct sk_buff *skb) 943static void tcp_wfree(struct sk_buff *skb)
944{ 944{
945 struct sock *sk = skb->sk; 945 struct sock *sk = skb->sk;
946 struct tcp_sock *tp = tcp_sk(sk); 946 struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk)
1522 * when we would be allowed to send the split-due-to-Nagle skb fully. 1522 * when we would be allowed to send the split-due-to-Nagle skb fully.
1523 */ 1523 */
1524static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1524static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1525 unsigned int mss_now, unsigned int cwnd) 1525 unsigned int mss_now, unsigned int max_segs)
1526{ 1526{
1527 const struct tcp_sock *tp = tcp_sk(sk); 1527 const struct tcp_sock *tp = tcp_sk(sk);
1528 u32 needed, window, cwnd_len; 1528 u32 needed, window, max_len;
1529 1529
1530 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1530 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1531 cwnd_len = mss_now * cwnd; 1531 max_len = mss_now * max_segs;
1532 1532
1533 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1533 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1534 return cwnd_len; 1534 return max_len;
1535 1535
1536 needed = min(skb->len, window); 1536 needed = min(skb->len, window);
1537 1537
1538 if (cwnd_len <= needed) 1538 if (max_len <= needed)
1539 return cwnd_len; 1539 return max_len;
1540 1540
1541 return needed - needed % mss_now; 1541 return needed - needed % mss_now;
1542} 1542}
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1765 limit = min(send_win, cong_win); 1765 limit = min(send_win, cong_win);
1766 1766
1767 /* If a full-sized TSO skb can be sent, do it. */ 1767 /* If a full-sized TSO skb can be sent, do it. */
1768 if (limit >= sk->sk_gso_max_size) 1768 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1769 sk->sk_gso_max_segs * tp->mss_cache))
1769 goto send_now; 1770 goto send_now;
1770 1771
1771 /* Middle in queue won't get any more data, full sendable already? */ 1772 /* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1999 limit = mss_now; 2000 limit = mss_now;
2000 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2001 if (tso_segs > 1 && !tcp_urg_mode(tp))
2001 limit = tcp_mss_split_point(sk, skb, mss_now, 2002 limit = tcp_mss_split_point(sk, skb, mss_now,
2002 cwnd_quota); 2003 min_t(unsigned int,
2004 cwnd_quota,
2005 sk->sk_gso_max_segs));
2003 2006
2004 if (skb->len > limit && 2007 if (skb->len > limit &&
2005 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2008 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c66b90f71c9b..5a439e9a4c01 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); 1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1448 1448
1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 struct dst_entry *dst = sk->sk_rx_dst;
1451
1450 sock_rps_save_rxhash(sk, skb); 1452 sock_rps_save_rxhash(sk, skb);
1453 if (dst) {
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1456 dst_release(dst);
1457 sk->sk_rx_dst = NULL;
1458 }
1459 }
1460
1451 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1461 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1452 goto reset; 1462 goto reset;
1453 if (opt_skb) 1463 if (opt_skb)
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1705 struct dst_entry *dst = sk->sk_rx_dst; 1715 struct dst_entry *dst = sk->sk_rx_dst;
1706 struct inet_sock *icsk = inet_sk(sk); 1716 struct inet_sock *icsk = inet_sk(sk);
1707 if (dst) 1717 if (dst)
1708 dst = dst_check(dst, 0); 1718 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1709 if (dst && 1719 if (dst &&
1710 icsk->rx_dst_ifindex == inet6_iif(skb)) 1720 icsk->rx_dst_ifindex == skb->skb_iif)
1711 skb_dst_set_noref(skb, dst); 1721 skb_dst_set_noref(skb, dst);
1712 } 1722 }
1713 } 1723 }
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1719 .twsk_destructor= tcp_twsk_destructor, 1729 .twsk_destructor= tcp_twsk_destructor,
1720}; 1730};
1721 1731
1732static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1733{
1734 struct dst_entry *dst = skb_dst(skb);
1735 const struct rt6_info *rt = (const struct rt6_info *)dst;
1736
1737 dst_hold(dst);
1738 sk->sk_rx_dst = dst;
1739 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1740 if (rt->rt6i_node)
1741 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
1742}
1743
1722static const struct inet_connection_sock_af_ops ipv6_specific = { 1744static const struct inet_connection_sock_af_ops ipv6_specific = {
1723 .queue_xmit = inet6_csk_xmit, 1745 .queue_xmit = inet6_csk_xmit,
1724 .send_check = tcp_v6_send_check, 1746 .send_check = tcp_v6_send_check,
1725 .rebuild_header = inet6_sk_rebuild_header, 1747 .rebuild_header = inet6_sk_rebuild_header,
1748 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1726 .conn_request = tcp_v6_conn_request, 1749 .conn_request = tcp_v6_conn_request,
1727 .syn_recv_sock = tcp_v6_syn_recv_sock, 1750 .syn_recv_sock = tcp_v6_syn_recv_sock,
1728 .net_header_len = sizeof(struct ipv6hdr), 1751 .net_header_len = sizeof(struct ipv6hdr),
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d8924b9c..6828e39ec2ec 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
268out: 268out:
269 return rc; 269 return rc;
270free: 270free:
271 kfree_skb(skb); 271 kfree_skb(nskb);
272 goto out; 272 goto out;
273} 273}
274 274
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
293out: 293out:
294 return rc; 294 return rc;
295free: 295free:
296 kfree_skb(skb); 296 kfree_skb(nskb);
297 goto out; 297 goto out;
298} 298}
299 299
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
322out: 322out:
323 return rc; 323 return rc;
324free: 324free:
325 kfree_skb(skb); 325 kfree_skb(nskb);
326 goto out; 326 goto out;
327} 327}
328 328
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6fac18c0423f..85572353a7e3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
622 622
623 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 623 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
624 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 624 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
625 del_timer_sync(&sdata->u.mesh.mesh_path_timer);
625 /* 626 /*
626 * If the timer fired while we waited for it, it will have 627 * If the timer fired while we waited for it, it will have
627 * requeued the work. Now the work will be running again 628 * requeued the work. Now the work will be running again
@@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
634 local->fif_other_bss--; 635 local->fif_other_bss--;
635 atomic_dec(&local->iff_allmultis); 636 atomic_dec(&local->iff_allmultis);
636 ieee80211_configure_filter(local); 637 ieee80211_configure_filter(local);
638
639 sdata->u.mesh.timers_running = 0;
637} 640}
638 641
639static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, 642static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cef0c9e79aba..a4a5acdbaa4d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1430 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1430 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
1431 del_timer_sync(&sdata->u.mgd.timer); 1431 del_timer_sync(&sdata->u.mgd.timer);
1432 del_timer_sync(&sdata->u.mgd.chswitch_timer); 1432 del_timer_sync(&sdata->u.mgd.chswitch_timer);
1433
1434 sdata->u.mgd.timers_running = 0;
1433} 1435}
1434 1436
1435void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1437void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcaee5d12839..839dd9737989 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
299 if (local->scan_req != local->int_scan_req) 299 if (local->scan_req != local->int_scan_req)
300 cfg80211_scan_done(local->scan_req, aborted); 300 cfg80211_scan_done(local->scan_req, aborted);
301 local->scan_req = NULL; 301 local->scan_req = NULL;
302 local->scan_sdata = NULL; 302 rcu_assign_pointer(local->scan_sdata, NULL);
303 303
304 local->scanning = 0; 304 local->scanning = 0;
305 local->scan_channel = NULL; 305 local->scan_channel = NULL;
@@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
984 kfree(local->sched_scan_ies.ie[i]); 984 kfree(local->sched_scan_ies.ie[i]);
985 985
986 drv_sched_scan_stop(local, sdata); 986 drv_sched_scan_stop(local, sdata);
987 rcu_assign_pointer(local->sched_scan_sdata, NULL);
988 } 987 }
989out: 988out:
990 mutex_unlock(&local->mtx); 989 mutex_unlock(&local->mtx);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb8256442..05d60859d8e3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
67 struct tcf_common *pc; 67 struct tcf_common *pc;
68 int ret = 0; 68 int ret = 0;
69 int err; 69 int err;
70#ifdef CONFIG_GACT_PROB
71 struct tc_gact_p *p_parm = NULL;
72#endif
70 73
71 if (nla == NULL) 74 if (nla == NULL)
72 return -EINVAL; 75 return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
82#ifndef CONFIG_GACT_PROB 85#ifndef CONFIG_GACT_PROB
83 if (tb[TCA_GACT_PROB] != NULL) 86 if (tb[TCA_GACT_PROB] != NULL)
84 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
88#else
89 if (tb[TCA_GACT_PROB]) {
90 p_parm = nla_data(tb[TCA_GACT_PROB]);
91 if (p_parm->ptype >= MAX_RAND)
92 return -EINVAL;
93 }
85#endif 94#endif
86 95
87 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); 96 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
103 spin_lock_bh(&gact->tcf_lock); 112 spin_lock_bh(&gact->tcf_lock);
104 gact->tcf_action = parm->action; 113 gact->tcf_action = parm->action;
105#ifdef CONFIG_GACT_PROB 114#ifdef CONFIG_GACT_PROB
106 if (tb[TCA_GACT_PROB] != NULL) { 115 if (p_parm) {
107 struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
108 gact->tcfg_paction = p_parm->paction; 116 gact->tcfg_paction = p_parm->paction;
109 gact->tcfg_pval = p_parm->pval; 117 gact->tcfg_pval = p_parm->pval;
110 gact->tcfg_ptype = p_parm->ptype; 118 gact->tcfg_ptype = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
133 141
134 spin_lock(&gact->tcf_lock); 142 spin_lock(&gact->tcf_lock);
135#ifdef CONFIG_GACT_PROB 143#ifdef CONFIG_GACT_PROB
136 if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) 144 if (gact->tcfg_ptype)
137 action = gact_rand[gact->tcfg_ptype](gact); 145 action = gact_rand[gact->tcfg_ptype](gact);
138 else 146 else
139 action = gact->tcf_action; 147 action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
185err2: 185err2:
186 kfree(tname); 186 kfree(tname);
187err1: 187err1:
188 kfree(pc); 188 if (ret == ACT_P_CREATED) {
189 if (est)
190 gen_kill_estimator(&pc->tcfc_bstats,
191 &pc->tcfc_rate_est);
192 kfree_rcu(pc, tcfc_rcu);
193 }
189 return err; 194 return err;
190} 195}
191 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
74 p = to_pedit(pc); 74 p = to_pedit(pc);
75 keys = kmalloc(ksize, GFP_KERNEL); 75 keys = kmalloc(ksize, GFP_KERNEL);
76 if (keys == NULL) { 76 if (keys == NULL) {
77 kfree(pc); 77 if (est)
78 gen_kill_estimator(&pc->tcfc_bstats,
79 &pc->tcfc_rate_est);
80 kfree_rcu(pc, tcfc_rcu);
78 return -ENOMEM; 81 return -ENOMEM;
79 } 82 }
80 ret = ACT_P_CREATED; 83 ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
131 d = to_defact(pc); 131 d = to_defact(pc);
132 ret = alloc_defdata(d, defdata); 132 ret = alloc_defdata(d, defdata);
133 if (ret < 0) { 133 if (ret < 0) {
134 kfree(pc); 134 if (est)
135 gen_kill_estimator(&pc->tcfc_bstats,
136 &pc->tcfc_rate_est);
137 kfree_rcu(pc, tcfc_rcu);
135 return ret; 138 return ret;
136 } 139 }
137 d->tcf_action = parm->action; 140 d->tcf_action = parm->action;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2303ee73b50a..2ded3c7fad06 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags)
680 channel_flags |= IEEE80211_CHAN_NO_IBSS; 680 channel_flags |= IEEE80211_CHAN_NO_IBSS;
681 if (rd_flags & NL80211_RRF_DFS) 681 if (rd_flags & NL80211_RRF_DFS)
682 channel_flags |= IEEE80211_CHAN_RADAR; 682 channel_flags |= IEEE80211_CHAN_RADAR;
683 if (rd_flags & NL80211_RRF_NO_OFDM)
684 channel_flags |= IEEE80211_CHAN_NO_OFDM;
683 return channel_flags; 685 return channel_flags;
684} 686}
685 687
@@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy,
901 chan->max_antenna_gain = min(chan->orig_mag, 903 chan->max_antenna_gain = min(chan->orig_mag,
902 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 904 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
903 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); 905 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
904 chan->max_power = min(chan->max_power, chan->max_reg_power); 906 if (chan->orig_mpwr) {
907 /*
908 * Devices that have their own custom regulatory domain
909 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
910 * passed country IE power settings.
911 */
912 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
913 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
914 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
915 chan->max_power = chan->max_reg_power;
916 else
917 chan->max_power = min(chan->orig_mpwr,
918 chan->max_reg_power);
919 } else
920 chan->max_power = chan->max_reg_power;
905} 921}
906 922
907static void handle_band(struct wiphy *wiphy, 923static void handle_band(struct wiphy *wiphy,
@@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
1885 chan->flags = chan->orig_flags; 1901 chan->flags = chan->orig_flags;
1886 chan->max_antenna_gain = chan->orig_mag; 1902 chan->max_antenna_gain = chan->orig_mag;
1887 chan->max_power = chan->orig_mpwr; 1903 chan->max_power = chan->orig_mpwr;
1904 chan->beacon_found = false;
1888 } 1905 }
1889 } 1906 }
1890} 1907}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5b228f97d4b3..87cd0e4d4282 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
415 if (x->lft.hard_add_expires_seconds) { 415 if (x->lft.hard_add_expires_seconds) {
416 long tmo = x->lft.hard_add_expires_seconds + 416 long tmo = x->lft.hard_add_expires_seconds +
417 x->curlft.add_time - now; 417 x->curlft.add_time - now;
418 if (tmo <= 0) 418 if (tmo <= 0) {
419 goto expired; 419 if (x->xflags & XFRM_SOFT_EXPIRE) {
420 /* enter hard expire without soft expire first?!
421 * setting a new date could trigger this.
422 * workarbound: fix x->curflt.add_time by below:
423 */
424 x->curlft.add_time = now - x->saved_tmo - 1;
425 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
426 } else
427 goto expired;
428 }
420 if (tmo < next) 429 if (tmo < next)
421 next = tmo; 430 next = tmo;
422 } 431 }
@@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
433 if (x->lft.soft_add_expires_seconds) { 442 if (x->lft.soft_add_expires_seconds) {
434 long tmo = x->lft.soft_add_expires_seconds + 443 long tmo = x->lft.soft_add_expires_seconds +
435 x->curlft.add_time - now; 444 x->curlft.add_time - now;
436 if (tmo <= 0) 445 if (tmo <= 0) {
437 warn = 1; 446 warn = 1;
438 else if (tmo < next) 447 x->xflags &= ~XFRM_SOFT_EXPIRE;
448 } else if (tmo < next) {
439 next = tmo; 449 next = tmo;
450 x->xflags |= XFRM_SOFT_EXPIRE;
451 x->saved_tmo = tmo;
452 }
440 } 453 }
441 if (x->lft.soft_use_expires_seconds) { 454 if (x->lft.soft_use_expires_seconds) {
442 long tmo = x->lft.soft_use_expires_seconds + 455 long tmo = x->lft.soft_use_expires_seconds +
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 4e7ec2b49873..d0f00356fc11 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
101 if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, 101 if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
102 chunk, &tmpb) < 0) { 102 chunk, &tmpb) < 0) {
103 if (!sgbuf->pages) 103 if (!sgbuf->pages)
104 return NULL; 104 goto _failed;
105 if (!res_size) 105 if (!res_size)
106 goto _failed; 106 goto _failed;
107 size = sgbuf->pages * PAGE_SIZE; 107 size = sgbuf->pages * PAGE_SIZE;
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f502a2bdc3c..0a436626182b 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
326 for (page = blk->first_page; page <= blk->last_page; page++, idx++) { 326 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
327 unsigned long ofs = idx << PAGE_SHIFT; 327 unsigned long ofs = idx << PAGE_SHIFT;
328 dma_addr_t addr; 328 dma_addr_t addr;
329 addr = snd_pcm_sgbuf_get_addr(substream, ofs); 329 if (ofs >= runtime->dma_bytes)
330 addr = emu->silent_page.addr;
331 else
332 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
330 if (! is_valid_page(emu, addr)) { 333 if (! is_valid_page(emu, addr)) {
331 printk(KERN_ERR "emu: failure page = %d\n", idx); 334 printk(KERN_ERR "emu: failure page = %d\n", idx);
332 mutex_unlock(&hdr->block_mutex); 335 mutex_unlock(&hdr->block_mutex);
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 647218d69f68..4f7d2dfcef7b 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
332 if (cfg->dig_outs) 332 if (cfg->dig_outs)
333 snd_printd(" dig-out=0x%x/0x%x\n", 333 snd_printd(" dig-out=0x%x/0x%x\n",
334 cfg->dig_out_pins[0], cfg->dig_out_pins[1]); 334 cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
335 snd_printd(" inputs:"); 335 snd_printd(" inputs:\n");
336 for (i = 0; i < cfg->num_inputs; i++) { 336 for (i = 0; i < cfg->num_inputs; i++) {
337 snd_printd(" %s=0x%x", 337 snd_printd(" %s=0x%x\n",
338 hda_get_autocfg_input_label(codec, cfg, i), 338 hda_get_autocfg_input_label(codec, cfg, i),
339 cfg->inputs[i].pin); 339 cfg->inputs[i].pin);
340 } 340 }
341 snd_printd("\n");
342 if (cfg->dig_in_pin) 341 if (cfg->dig_in_pin)
343 snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin); 342 snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin);
344 343
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14361184ae1e..5e22a8f43d2e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
2967}; 2967};
2968 2968
2969static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { 2969static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2970 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
2971 SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), 2970 SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
2972 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), 2971 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
2973 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), 2972 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
2974 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), 2973 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
2975 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
2976 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 2974 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2977 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), 2975 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
2978 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), 2976 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
@@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2988 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), 2986 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
2989 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), 2987 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
2990 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), 2988 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
2991 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
2992 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
2993 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), 2989 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
2994 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), 2990 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
2995 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), 2991 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
2996 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), 2992 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
2997 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
2998 SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
2999 {} 2993 {}
3000}; 2994};
3001 2995
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 69b928449789..8f23374fa642 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -877,8 +877,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
877 struct hdmi_eld *eld; 877 struct hdmi_eld *eld;
878 struct hdmi_spec_per_cvt *per_cvt = NULL; 878 struct hdmi_spec_per_cvt *per_cvt = NULL;
879 879
880 hinfo->nid = 0; /* clear the leftover value */
881
882 /* Validate hinfo */ 880 /* Validate hinfo */
883 pin_idx = hinfo_to_pin_index(spec, hinfo); 881 pin_idx = hinfo_to_pin_index(spec, hinfo);
884 if (snd_BUG_ON(pin_idx < 0)) 882 if (snd_BUG_ON(pin_idx < 0))
@@ -1163,6 +1161,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
1163 return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); 1161 return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
1164} 1162}
1165 1163
1164static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
1165 struct hda_codec *codec,
1166 struct snd_pcm_substream *substream)
1167{
1168 snd_hda_codec_cleanup_stream(codec, hinfo->nid);
1169 return 0;
1170}
1171
1166static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, 1172static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
1167 struct hda_codec *codec, 1173 struct hda_codec *codec,
1168 struct snd_pcm_substream *substream) 1174 struct snd_pcm_substream *substream)
@@ -1202,6 +1208,7 @@ static const struct hda_pcm_ops generic_ops = {
1202 .open = hdmi_pcm_open, 1208 .open = hdmi_pcm_open,
1203 .close = hdmi_pcm_close, 1209 .close = hdmi_pcm_close,
1204 .prepare = generic_hdmi_playback_pcm_prepare, 1210 .prepare = generic_hdmi_playback_pcm_prepare,
1211 .cleanup = generic_hdmi_playback_pcm_cleanup,
1205}; 1212};
1206 1213
1207static int generic_hdmi_build_pcms(struct hda_codec *codec) 1214static int generic_hdmi_build_pcms(struct hda_codec *codec)
@@ -1220,7 +1227,6 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
1220 pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK]; 1227 pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
1221 pstr->substreams = 1; 1228 pstr->substreams = 1;
1222 pstr->ops = generic_ops; 1229 pstr->ops = generic_ops;
1223 pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */
1224 /* other pstr fields are set in open */ 1230 /* other pstr fields are set in open */
1225 } 1231 }
1226 1232
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 344b221d2102..4f81dd44c837 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6099,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = {
6099 [ALC269_FIXUP_PCM_44K] = { 6099 [ALC269_FIXUP_PCM_44K] = {
6100 .type = ALC_FIXUP_FUNC, 6100 .type = ALC_FIXUP_FUNC,
6101 .v.func = alc269_fixup_pcm_44k, 6101 .v.func = alc269_fixup_pcm_44k,
6102 .chained = true,
6103 .chain_id = ALC269_FIXUP_QUANTA_MUTE
6102 }, 6104 },
6103 [ALC269_FIXUP_STEREO_DMIC] = { 6105 [ALC269_FIXUP_STEREO_DMIC] = {
6104 .type = ALC_FIXUP_FUNC, 6106 .type = ALC_FIXUP_FUNC,
@@ -6206,9 +6208,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6206 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), 6208 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
6207 SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), 6209 SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
6208 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), 6210 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
6211 SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
6212 SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
6213 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
6209 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), 6214 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
6210 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE), 6215 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
6211 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
6212 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 6216 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
6213 6217
6214#if 0 6218#if 0
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 3c795921c5f6..23b40186f9b8 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2406,6 +2406,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
2406 2406
2407 /* Setup AB8500 according to board-settings */ 2407 /* Setup AB8500 according to board-settings */
2408 pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent); 2408 pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
2409
2410 /* Inform SoC Core that we have our own I/O arrangements. */
2411 codec->control_data = (void *)true;
2412
2409 status = ab8500_audio_setup_mics(codec, &pdata->codec->amics); 2413 status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
2410 if (status < 0) { 2414 if (status < 0) {
2411 pr_err("%s: Failed to setup mics (%d)!\n", __func__, status); 2415 pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 8c39dddd7d00..11b1b714b8b5 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -186,6 +186,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
186 186
187 printk(KERN_INFO "AD1980 SoC Audio Codec\n"); 187 printk(KERN_INFO "AD1980 SoC Audio Codec\n");
188 188
189 codec->control_data = codec; /* we don't use regmap! */
189 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); 190 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
190 if (ret < 0) { 191 if (ret < 0) {
191 printk(KERN_ERR "ad1980: failed to register AC97 codec\n"); 192 printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 6276e352125f..8f726c063f42 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -581,6 +581,8 @@ static int mc13783_probe(struct snd_soc_codec *codec)
581{ 581{
582 struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec); 582 struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
583 583
584 codec->control_data = priv->mc13xxx;
585
584 mc13xxx_lock(priv->mc13xxx); 586 mc13xxx_lock(priv->mc13xxx);
585 587
586 /* these are the reset values */ 588 /* these are the reset values */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 8af6a5245b18..df2f99d1d428 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -239,6 +239,7 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
239 {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */ 239 {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
240 {"LO", NULL, "DAC"}, /* dac --> line_out */ 240 {"LO", NULL, "DAC"}, /* dac --> line_out */
241 241
242 {"LINE_IN", NULL, "VAG_POWER"},
242 {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */ 243 {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
243 {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */ 244 {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
244 245
@@ -1357,8 +1358,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
1357 if (ret) 1358 if (ret)
1358 goto err; 1359 goto err;
1359 1360
1360 snd_soc_dapm_new_widgets(&codec->dapm);
1361
1362 return 0; 1361 return 0;
1363 1362
1364err: 1363err:
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 982e437799a8..33c0f3d39c87 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -340,6 +340,7 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec)
340 340
341 printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION); 341 printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
342 342
343 codec->control_data = codec; /* we don't use regmap! */
343 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); 344 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
344 if (ret < 0) 345 if (ret < 0)
345 goto codec_err; 346 goto codec_err;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index eaf65863ec21..aa9ce9dd7d8a 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
2501 /* VMID 2*250k */ 2501 /* VMID 2*250k */
2502 snd_soc_update_bits(codec, WM8962_PWR_MGMT_1, 2502 snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
2503 WM8962_VMID_SEL_MASK, 0x100); 2503 WM8962_VMID_SEL_MASK, 0x100);
2504
2505 if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
2506 msleep(100);
2504 break; 2507 break;
2505 2508
2506 case SND_SOC_BIAS_OFF: 2509 case SND_SOC_BIAS_OFF:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index bb62f4b3d563..04ef03175c51 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
2649 return -EINVAL; 2649 return -EINVAL;
2650 } 2650 }
2651 2651
2652 bclk_rate = params_rate(params) * 2; 2652 bclk_rate = params_rate(params) * 4;
2653 switch (params_format(params)) { 2653 switch (params_format(params)) {
2654 case SNDRV_PCM_FORMAT_S16_LE: 2654 case SNDRV_PCM_FORMAT_S16_LE:
2655 bclk_rate *= 16; 2655 bclk_rate *= 16;
@@ -3253,10 +3253,13 @@ static void wm8994_mic_work(struct work_struct *work)
3253 int ret; 3253 int ret;
3254 int report; 3254 int report;
3255 3255
3256 pm_runtime_get_sync(dev);
3257
3256 ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, &reg); 3258 ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, &reg);
3257 if (ret < 0) { 3259 if (ret < 0) {
3258 dev_err(dev, "Failed to read microphone status: %d\n", 3260 dev_err(dev, "Failed to read microphone status: %d\n",
3259 ret); 3261 ret);
3262 pm_runtime_put(dev);
3260 return; 3263 return;
3261 } 3264 }
3262 3265
@@ -3299,6 +3302,8 @@ static void wm8994_mic_work(struct work_struct *work)
3299 3302
3300 snd_soc_jack_report(priv->micdet[1].jack, report, 3303 snd_soc_jack_report(priv->micdet[1].jack, report,
3301 SND_JACK_HEADSET | SND_JACK_BTN_0); 3304 SND_JACK_HEADSET | SND_JACK_BTN_0);
3305
3306 pm_runtime_put(dev);
3302} 3307}
3303 3308
3304static irqreturn_t wm8994_mic_irq(int irq, void *data) 3309static irqreturn_t wm8994_mic_irq(int irq, void *data)
@@ -3421,12 +3426,15 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
3421 int reg; 3426 int reg;
3422 bool present; 3427 bool present;
3423 3428
3429 pm_runtime_get_sync(codec->dev);
3430
3424 mutex_lock(&wm8994->accdet_lock); 3431 mutex_lock(&wm8994->accdet_lock);
3425 3432
3426 reg = snd_soc_read(codec, WM1811_JACKDET_CTRL); 3433 reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
3427 if (reg < 0) { 3434 if (reg < 0) {
3428 dev_err(codec->dev, "Failed to read jack status: %d\n", reg); 3435 dev_err(codec->dev, "Failed to read jack status: %d\n", reg);
3429 mutex_unlock(&wm8994->accdet_lock); 3436 mutex_unlock(&wm8994->accdet_lock);
3437 pm_runtime_put(codec->dev);
3430 return IRQ_NONE; 3438 return IRQ_NONE;
3431 } 3439 }
3432 3440
@@ -3491,6 +3499,7 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
3491 SND_JACK_MECHANICAL | SND_JACK_HEADSET | 3499 SND_JACK_MECHANICAL | SND_JACK_HEADSET |
3492 wm8994->btn_mask); 3500 wm8994->btn_mask);
3493 3501
3502 pm_runtime_put(codec->dev);
3494 return IRQ_HANDLED; 3503 return IRQ_HANDLED;
3495} 3504}
3496 3505
@@ -3602,6 +3611,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3602 if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA)) 3611 if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
3603 return IRQ_HANDLED; 3612 return IRQ_HANDLED;
3604 3613
3614 pm_runtime_get_sync(codec->dev);
3615
3605 /* We may occasionally read a detection without an impedence 3616 /* We may occasionally read a detection without an impedence
3606 * range being provided - if that happens loop again. 3617 * range being provided - if that happens loop again.
3607 */ 3618 */
@@ -3612,6 +3623,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3612 dev_err(codec->dev, 3623 dev_err(codec->dev,
3613 "Failed to read mic detect status: %d\n", 3624 "Failed to read mic detect status: %d\n",
3614 reg); 3625 reg);
3626 pm_runtime_put(codec->dev);
3615 return IRQ_NONE; 3627 return IRQ_NONE;
3616 } 3628 }
3617 3629
@@ -3639,6 +3651,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
3639 dev_warn(codec->dev, "Accessory detection with no callback\n"); 3651 dev_warn(codec->dev, "Accessory detection with no callback\n");
3640 3652
3641out: 3653out:
3654 pm_runtime_put(codec->dev);
3642 return IRQ_HANDLED; 3655 return IRQ_HANDLED;
3643} 3656}
3644 3657
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 099e6ec32125..f16fb361a4eb 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -619,6 +619,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
619{ 619{
620 int ret = 0; 620 int ret = 0;
621 621
622 codec->control_data = codec; /* we don't use regmap! */
622 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); 623 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
623 if (ret < 0) { 624 if (ret < 0) {
624 printk(KERN_ERR "wm9712: failed to register AC97 codec\n"); 625 printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 3eb19fb71d17..d0b8a3287a85 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1196,6 +1196,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1196 if (wm9713 == NULL) 1196 if (wm9713 == NULL)
1197 return -ENOMEM; 1197 return -ENOMEM;
1198 snd_soc_codec_set_drvdata(codec, wm9713); 1198 snd_soc_codec_set_drvdata(codec, wm9713);
1199 codec->control_data = wm9713; /* we don't use regmap! */
1199 1200
1200 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); 1201 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
1201 if (ret < 0) 1202 if (ret < 0)
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index aba71bfa33b1..b3030718c228 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -394,9 +394,14 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
394 struct snd_soc_dai *cpu_dai) 394 struct snd_soc_dai *cpu_dai)
395{ 395{
396 struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); 396 struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
397 struct mxs_saif *master_saif;
397 u32 scr, stat; 398 u32 scr, stat;
398 int ret; 399 int ret;
399 400
401 master_saif = mxs_saif_get_master(saif);
402 if (!master_saif)
403 return -EINVAL;
404
400 /* mclk should already be set */ 405 /* mclk should already be set */
401 if (!saif->mclk && saif->mclk_in_use) { 406 if (!saif->mclk && saif->mclk_in_use) {
402 dev_err(cpu_dai->dev, "set mclk first\n"); 407 dev_err(cpu_dai->dev, "set mclk first\n");
@@ -420,6 +425,25 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
420 return ret; 425 return ret;
421 } 426 }
422 427
428 /* prepare clk in hw_param, enable in trigger */
429 clk_prepare(saif->clk);
430 if (saif != master_saif) {
431 /*
432 * Set an initial clock rate for the saif internal logic to work
433 * properly. This is important when working in EXTMASTER mode
434 * that uses the other saif's BITCLK&LRCLK but it still needs a
435 * basic clock which should be fast enough for the internal
436 * logic.
437 */
438 clk_enable(saif->clk);
439 ret = clk_set_rate(saif->clk, 24000000);
440 clk_disable(saif->clk);
441 if (ret)
442 return ret;
443
444 clk_prepare(master_saif->clk);
445 }
446
423 scr = __raw_readl(saif->base + SAIF_CTRL); 447 scr = __raw_readl(saif->base + SAIF_CTRL);
424 448
425 scr &= ~BM_SAIF_CTRL_WORD_LENGTH; 449 scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 1046083e90a0..acdd3ef14e08 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -820,3 +820,4 @@ module_platform_driver(asoc_mcbsp_driver);
820MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); 820MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
821MODULE_DESCRIPTION("OMAP I2S SoC Interface"); 821MODULE_DESCRIPTION("OMAP I2S SoC Interface");
822MODULE_LICENSE("GPL"); 822MODULE_LICENSE("GPL");
823MODULE_ALIAS("platform:omap-mcbsp");
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 5a649da9122a..f0feb06615f8 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -441,3 +441,4 @@ module_platform_driver(omap_pcm_driver);
441MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); 441MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
442MODULE_DESCRIPTION("OMAP PCM DMA module"); 442MODULE_DESCRIPTION("OMAP PCM DMA module");
443MODULE_LICENSE("GPL"); 443MODULE_LICENSE("GPL");
444MODULE_ALIAS("platform:omap-pcm-audio");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f219b2f7ee68..f81c5976b961 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card,
1096 } 1096 }
1097 1097
1098 /* If the driver didn't set I/O up try regmap */ 1098 /* If the driver didn't set I/O up try regmap */
1099 if (!codec->control_data) 1099 if (!codec->write && dev_get_regmap(codec->dev, NULL))
1100 snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); 1100 snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
1101 1101
1102 if (driver->controls) 1102 if (driver->controls)
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index d684df294c0c..e463529b38bb 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -177,7 +177,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
177 } 177 }
178 178
179 alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); 179 alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
180 if (alc5632->gpio_hp_det == -ENODEV) 180 if (alc5632->gpio_hp_det == -EPROBE_DEFER)
181 return -EPROBE_DEFER; 181 return -EPROBE_DEFER;
182 182
183 ret = snd_soc_of_parse_card_name(card, "nvidia,model"); 183 ret = snd_soc_of_parse_card_name(card, "nvidia,model");
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0c5bb33d258e..d4f14e492341 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
284 } else if (np) { 284 } else if (np) {
285 pdata->gpio_spkr_en = of_get_named_gpio(np, 285 pdata->gpio_spkr_en = of_get_named_gpio(np,
286 "nvidia,spkr-en-gpios", 0); 286 "nvidia,spkr-en-gpios", 0);
287 if (pdata->gpio_spkr_en == -ENODEV) 287 if (pdata->gpio_spkr_en == -EPROBE_DEFER)
288 return -EPROBE_DEFER; 288 return -EPROBE_DEFER;
289 289
290 pdata->gpio_hp_mute = of_get_named_gpio(np, 290 pdata->gpio_hp_mute = of_get_named_gpio(np,
291 "nvidia,hp-mute-gpios", 0); 291 "nvidia,hp-mute-gpios", 0);
292 if (pdata->gpio_hp_mute == -ENODEV) 292 if (pdata->gpio_hp_mute == -EPROBE_DEFER)
293 return -EPROBE_DEFER; 293 return -EPROBE_DEFER;
294 294
295 pdata->gpio_hp_det = of_get_named_gpio(np, 295 pdata->gpio_hp_det = of_get_named_gpio(np,
296 "nvidia,hp-det-gpios", 0); 296 "nvidia,hp-det-gpios", 0);
297 if (pdata->gpio_hp_det == -ENODEV) 297 if (pdata->gpio_hp_det == -EPROBE_DEFER)
298 return -EPROBE_DEFER; 298 return -EPROBE_DEFER;
299 299
300 pdata->gpio_int_mic_en = of_get_named_gpio(np, 300 pdata->gpio_int_mic_en = of_get_named_gpio(np,
301 "nvidia,int-mic-en-gpios", 0); 301 "nvidia,int-mic-en-gpios", 0);
302 if (pdata->gpio_int_mic_en == -ENODEV) 302 if (pdata->gpio_int_mic_en == -EPROBE_DEFER)
303 return -EPROBE_DEFER; 303 return -EPROBE_DEFER;
304 304
305 pdata->gpio_ext_mic_en = of_get_named_gpio(np, 305 pdata->gpio_ext_mic_en = of_get_named_gpio(np,
306 "nvidia,ext-mic-en-gpios", 0); 306 "nvidia,ext-mic-en-gpios", 0);
307 if (pdata->gpio_ext_mic_en == -ENODEV) 307 if (pdata->gpio_ext_mic_en == -EPROBE_DEFER)
308 return -EPROBE_DEFER; 308 return -EPROBE_DEFER;
309 } 309 }
310 310
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 62ac0285bfaf..057e28ef770e 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -21,7 +21,7 @@
21#include <linux/mfd/dbx500-prcmu.h> 21#include <linux/mfd/dbx500-prcmu.h>
22 22
23#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/board-mop500-msp.h> 24#include <mach/msp.h>
25 25
26#include <sound/soc.h> 26#include <sound/soc.h>
27#include <sound/soc-dai.h> 27#include <sound/soc-dai.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index ee14d2dac2f5..5c472f335a64 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -19,7 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <mach/board-mop500-msp.h> 22#include <mach/msp.h>
23 23
24#include <sound/soc.h> 24#include <sound/soc.h>
25 25
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
index 7f71b4a0d4bc..2d9136da9865 100644
--- a/sound/soc/ux500/ux500_msp_i2s.h
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -17,7 +17,7 @@
17 17
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#include <mach/board-mop500-msp.h> 20#include <mach/msp.h>
21 21
22#define MSP_INPUT_FREQ_APB 48000000 22#define MSP_INPUT_FREQ_APB 48000000
23 23
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 77f124fe57ad..35655c3a7b7a 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -319,6 +319,8 @@ LIB_H += $(ARCH_INCLUDE)
319LIB_H += util/cgroup.h 319LIB_H += util/cgroup.h
320LIB_H += $(TRACE_EVENT_DIR)event-parse.h 320LIB_H += $(TRACE_EVENT_DIR)event-parse.h
321LIB_H += util/target.h 321LIB_H += util/target.h
322LIB_H += util/rblist.h
323LIB_H += util/intlist.h
322 324
323LIB_OBJS += $(OUTPUT)util/abspath.o 325LIB_OBJS += $(OUTPUT)util/abspath.o
324LIB_OBJS += $(OUTPUT)util/alias.o 326LIB_OBJS += $(OUTPUT)util/alias.o
@@ -383,6 +385,8 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o
383LIB_OBJS += $(OUTPUT)util/cpumap.o 385LIB_OBJS += $(OUTPUT)util/cpumap.o
384LIB_OBJS += $(OUTPUT)util/cgroup.o 386LIB_OBJS += $(OUTPUT)util/cgroup.o
385LIB_OBJS += $(OUTPUT)util/target.o 387LIB_OBJS += $(OUTPUT)util/target.o
388LIB_OBJS += $(OUTPUT)util/rblist.o
389LIB_OBJS += $(OUTPUT)util/intlist.o
386 390
387BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 391BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
388 392
@@ -983,7 +987,8 @@ clean:
983 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* 987 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
984 $(MAKE) -C Documentation/ clean 988 $(MAKE) -C Documentation/ clean
985 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS 989 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
986 $(RM) $(OUTPUT)util/*-{bison,flex}* 990 $(RM) $(OUTPUT)util/*-bison*
991 $(RM) $(OUTPUT)util/*-flex*
987 $(python-clean) 992 $(python-clean)
988 993
989.PHONY: all install clean strip $(LIBTRACEEVENT) 994.PHONY: all install clean strip $(LIBTRACEEVENT)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f5a6452931e6..4db6e1ba54e3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -313,7 +313,7 @@ try_again:
313 } 313 }
314 } 314 }
315 315
316 perf_session__update_sample_type(session); 316 perf_session__set_id_hdr_size(session);
317} 317}
318 318
319static int process_buildids(struct perf_record *rec) 319static int process_buildids(struct perf_record *rec)
@@ -844,8 +844,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
844 struct perf_record *rec = &record; 844 struct perf_record *rec = &record;
845 char errbuf[BUFSIZ]; 845 char errbuf[BUFSIZ];
846 846
847 perf_header__set_cmdline(argc, argv);
848
849 evsel_list = perf_evlist__new(NULL, NULL); 847 evsel_list = perf_evlist__new(NULL, NULL);
850 if (evsel_list == NULL) 848 if (evsel_list == NULL)
851 return -ENOMEM; 849 return -ENOMEM;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 69b1c1185159..7c88a243b5db 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -249,8 +249,9 @@ static int process_read_event(struct perf_tool *tool,
249static int perf_report__setup_sample_type(struct perf_report *rep) 249static int perf_report__setup_sample_type(struct perf_report *rep)
250{ 250{
251 struct perf_session *self = rep->session; 251 struct perf_session *self = rep->session;
252 u64 sample_type = perf_evlist__sample_type(self->evlist);
252 253
253 if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { 254 if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
254 if (sort__has_parent) { 255 if (sort__has_parent) {
255 ui__error("Selected --sort parent, but no " 256 ui__error("Selected --sort parent, but no "
256 "callchain data. Did you call " 257 "callchain data. Did you call "
@@ -274,7 +275,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
274 275
275 if (sort__branch_mode == 1) { 276 if (sort__branch_mode == 1) {
276 if (!self->fd_pipe && 277 if (!self->fd_pipe &&
277 !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { 278 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
278 ui__error("Selected -b but no branch data. " 279 ui__error("Selected -b but no branch data. "
279 "Did you call perf record without -b?\n"); 280 "Did you call perf record without -b?\n");
280 return -1; 281 return -1;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index d909eb74a0eb..1d592f5cbea9 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -478,7 +478,6 @@ static int test__basic_mmap(void)
478 unsigned int nr_events[nsyscalls], 478 unsigned int nr_events[nsyscalls],
479 expected_nr_events[nsyscalls], i, j; 479 expected_nr_events[nsyscalls], i, j;
480 struct perf_evsel *evsels[nsyscalls], *evsel; 480 struct perf_evsel *evsels[nsyscalls], *evsel;
481 int sample_size = __perf_evsel__sample_size(attr.sample_type);
482 481
483 for (i = 0; i < nsyscalls; ++i) { 482 for (i = 0; i < nsyscalls; ++i) {
484 char name[64]; 483 char name[64];
@@ -563,8 +562,7 @@ static int test__basic_mmap(void)
563 goto out_munmap; 562 goto out_munmap;
564 } 563 }
565 564
566 err = perf_event__parse_sample(event, attr.sample_type, sample_size, 565 err = perf_evlist__parse_sample(evlist, event, &sample, false);
567 false, &sample, false);
568 if (err) { 566 if (err) {
569 pr_err("Can't parse sample, err = %d\n", err); 567 pr_err("Can't parse sample, err = %d\n", err);
570 goto out_munmap; 568 goto out_munmap;
@@ -661,12 +659,12 @@ static int test__PERF_RECORD(void)
661 const char *cmd = "sleep"; 659 const char *cmd = "sleep";
662 const char *argv[] = { cmd, "1", NULL, }; 660 const char *argv[] = { cmd, "1", NULL, };
663 char *bname; 661 char *bname;
664 u64 sample_type, prev_time = 0; 662 u64 prev_time = 0;
665 bool found_cmd_mmap = false, 663 bool found_cmd_mmap = false,
666 found_libc_mmap = false, 664 found_libc_mmap = false,
667 found_vdso_mmap = false, 665 found_vdso_mmap = false,
668 found_ld_mmap = false; 666 found_ld_mmap = false;
669 int err = -1, errs = 0, i, wakeups = 0, sample_size; 667 int err = -1, errs = 0, i, wakeups = 0;
670 u32 cpu; 668 u32 cpu;
671 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; 669 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
672 670
@@ -757,13 +755,6 @@ static int test__PERF_RECORD(void)
757 } 755 }
758 756
759 /* 757 /*
760 * We'll need these two to parse the PERF_SAMPLE_* fields in each
761 * event.
762 */
763 sample_type = perf_evlist__sample_type(evlist);
764 sample_size = __perf_evsel__sample_size(sample_type);
765
766 /*
767 * Now that all is properly set up, enable the events, they will 758 * Now that all is properly set up, enable the events, they will
768 * count just on workload.pid, which will start... 759 * count just on workload.pid, which will start...
769 */ 760 */
@@ -788,9 +779,7 @@ static int test__PERF_RECORD(void)
788 if (type < PERF_RECORD_MAX) 779 if (type < PERF_RECORD_MAX)
789 nr_events[type]++; 780 nr_events[type]++;
790 781
791 err = perf_event__parse_sample(event, sample_type, 782 err = perf_evlist__parse_sample(evlist, event, &sample, false);
792 sample_size, true,
793 &sample, false);
794 if (err < 0) { 783 if (err < 0) {
795 if (verbose) 784 if (verbose)
796 perf_event__fprintf(event, stderr); 785 perf_event__fprintf(event, stderr);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 35e86c6df713..68cd61ef6ac5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -38,6 +38,7 @@
38#include "util/cpumap.h" 38#include "util/cpumap.h"
39#include "util/xyarray.h" 39#include "util/xyarray.h"
40#include "util/sort.h" 40#include "util/sort.h"
41#include "util/intlist.h"
41 42
42#include "util/debug.h" 43#include "util/debug.h"
43 44
@@ -706,8 +707,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
706 int err; 707 int err;
707 708
708 if (!machine && perf_guest) { 709 if (!machine && perf_guest) {
709 pr_err("Can't find guest [%d]'s kernel information\n", 710 static struct intlist *seen;
710 event->ip.pid); 711
712 if (!seen)
713 seen = intlist__new();
714
715 if (!intlist__has_entry(seen, event->ip.pid)) {
716 pr_err("Can't find guest [%d]'s kernel information\n",
717 event->ip.pid);
718 intlist__add(seen, event->ip.pid);
719 }
711 return; 720 return;
712 } 721 }
713 722
@@ -811,7 +820,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
811 int ret; 820 int ret;
812 821
813 while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { 822 while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
814 ret = perf_session__parse_sample(session, event, &sample); 823 ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
815 if (ret) { 824 if (ret) {
816 pr_err("Can't parse sample, err = %d\n", ret); 825 pr_err("Can't parse sample, err = %d\n", ret);
817 continue; 826 continue;
@@ -943,8 +952,10 @@ try_again:
943 * based cpu-clock-tick sw counter, which 952 * based cpu-clock-tick sw counter, which
944 * is always available even if no PMU support: 953 * is always available even if no PMU support:
945 */ 954 */
946 if (attr->type == PERF_TYPE_HARDWARE && 955 if ((err == ENOENT || err == ENXIO) &&
947 attr->config == PERF_COUNT_HW_CPU_CYCLES) { 956 (attr->type == PERF_TYPE_HARDWARE) &&
957 (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
958
948 if (verbose) 959 if (verbose)
949 ui__warning("Cycles event not supported,\n" 960 ui__warning("Cycles event not supported,\n"
950 "trying to fall back to cpu-clock-ticks\n"); 961 "trying to fall back to cpu-clock-ticks\n");
@@ -1032,7 +1043,7 @@ static int __cmd_top(struct perf_top *top)
1032 &top->session->host_machine); 1043 &top->session->host_machine);
1033 perf_top__start_counters(top); 1044 perf_top__start_counters(top);
1034 top->session->evlist = top->evlist; 1045 top->session->evlist = top->evlist;
1035 perf_session__update_sample_type(top->session); 1046 perf_session__set_id_hdr_size(top->session);
1036 1047
1037 /* Wait for a minimal set of events before starting the snapshot */ 1048 /* Wait for a minimal set of events before starting the snapshot */
1038 poll(top->evlist->pollfd, top->evlist->nr_fds, 100); 1049 poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1b197280c621..d84870b06426 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -197,9 +197,6 @@ int perf_event__preprocess_sample(const union perf_event *self,
197 197
198const char *perf_event__name(unsigned int id); 198const char *perf_event__name(unsigned int id);
199 199
200int perf_event__parse_sample(const union perf_event *event, u64 type,
201 int sample_size, bool sample_id_all,
202 struct perf_sample *sample, bool swapped);
203int perf_event__synthesize_sample(union perf_event *event, u64 type, 200int perf_event__synthesize_sample(union perf_event *event, u64 type,
204 const struct perf_sample *sample, 201 const struct perf_sample *sample,
205 bool swapped); 202 bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3edfd3483816..9b38681add9e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -881,3 +881,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
881 881
882 return 0; 882 return 0;
883} 883}
884
885int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
886 struct perf_sample *sample, bool swapped)
887{
888 struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
889 return perf_evsel__parse_sample(e, event, sample, swapped);
890}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 40d4d3cdced0..528c1acd9298 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -122,6 +122,9 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
122bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); 122bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
123u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); 123u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
124 124
125int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
126 struct perf_sample *sample, bool swapped);
127
125bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); 128bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
126bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); 129bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
127 130
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e81771364867..2eaae140def2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -20,7 +20,7 @@
20#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 20#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
21#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) 21#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
22 22
23int __perf_evsel__sample_size(u64 sample_type) 23static int __perf_evsel__sample_size(u64 sample_type)
24{ 24{
25 u64 mask = sample_type & PERF_SAMPLE_MASK; 25 u64 mask = sample_type & PERF_SAMPLE_MASK;
26 int size = 0; 26 int size = 0;
@@ -53,6 +53,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
53 evsel->attr = *attr; 53 evsel->attr = *attr;
54 INIT_LIST_HEAD(&evsel->node); 54 INIT_LIST_HEAD(&evsel->node);
55 hists__init(&evsel->hists); 55 hists__init(&evsel->hists);
56 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
56} 57}
57 58
58struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) 59struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -728,10 +729,10 @@ static bool sample_overlap(const union perf_event *event,
728 return false; 729 return false;
729} 730}
730 731
731int perf_event__parse_sample(const union perf_event *event, u64 type, 732int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
732 int sample_size, bool sample_id_all,
733 struct perf_sample *data, bool swapped) 733 struct perf_sample *data, bool swapped)
734{ 734{
735 u64 type = evsel->attr.sample_type;
735 const u64 *array; 736 const u64 *array;
736 737
737 /* 738 /*
@@ -746,14 +747,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
746 data->period = 1; 747 data->period = 1;
747 748
748 if (event->header.type != PERF_RECORD_SAMPLE) { 749 if (event->header.type != PERF_RECORD_SAMPLE) {
749 if (!sample_id_all) 750 if (!evsel->attr.sample_id_all)
750 return 0; 751 return 0;
751 return perf_event__parse_id_sample(event, type, data, swapped); 752 return perf_event__parse_id_sample(event, type, data, swapped);
752 } 753 }
753 754
754 array = event->sample.array; 755 array = event->sample.array;
755 756
756 if (sample_size + sizeof(event->header) > event->header.size) 757 if (evsel->sample_size + sizeof(event->header) > event->header.size)
757 return -EFAULT; 758 return -EFAULT;
758 759
759 if (type & PERF_SAMPLE_IP) { 760 if (type & PERF_SAMPLE_IP) {
@@ -895,7 +896,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
895 u.val32[1] = sample->tid; 896 u.val32[1] = sample->tid;
896 if (swapped) { 897 if (swapped) {
897 /* 898 /*
898 * Inverse of what is done in perf_event__parse_sample 899 * Inverse of what is done in perf_evsel__parse_sample
899 */ 900 */
900 u.val32[0] = bswap_32(u.val32[0]); 901 u.val32[0] = bswap_32(u.val32[0]);
901 u.val32[1] = bswap_32(u.val32[1]); 902 u.val32[1] = bswap_32(u.val32[1]);
@@ -930,7 +931,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
930 u.val32[0] = sample->cpu; 931 u.val32[0] = sample->cpu;
931 if (swapped) { 932 if (swapped) {
932 /* 933 /*
933 * Inverse of what is done in perf_event__parse_sample 934 * Inverse of what is done in perf_evsel__parse_sample
934 */ 935 */
935 u.val32[0] = bswap_32(u.val32[0]); 936 u.val32[0] = bswap_32(u.val32[0]);
936 u.val64 = bswap_64(u.val64); 937 u.val64 = bswap_64(u.val64);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 67cc5033d192..b559929983bb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -65,6 +65,7 @@ struct perf_evsel {
65 void *func; 65 void *func;
66 void *data; 66 void *data;
67 } handler; 67 } handler;
68 unsigned int sample_size;
68 bool supported; 69 bool supported;
69}; 70};
70 71
@@ -177,13 +178,8 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
177 return __perf_evsel__read(evsel, ncpus, nthreads, true); 178 return __perf_evsel__read(evsel, ncpus, nthreads, true);
178} 179}
179 180
180int __perf_evsel__sample_size(u64 sample_type);
181
182static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
183{
184 return __perf_evsel__sample_size(evsel->attr.sample_type);
185}
186
187void hists__init(struct hists *hists); 181void hists__init(struct hists *hists);
188 182
183int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
184 struct perf_sample *sample, bool swapped);
189#endif /* __PERF_EVSEL_H */ 185#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3a6d20443330..74ea3c2f8138 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -174,6 +174,15 @@ perf_header__set_cmdline(int argc, const char **argv)
174{ 174{
175 int i; 175 int i;
176 176
177 /*
178 * If header_argv has already been set, do not override it.
179 * This allows a command to set the cmdline, parse args and
180 * then call another builtin function that implements a
181 * command -- e.g, cmd_kvm calling cmd_record.
182 */
183 if (header_argv)
184 return 0;
185
177 header_argc = (u32)argc; 186 header_argc = (u32)argc;
178 187
179 /* do not include NULL termination */ 188 /* do not include NULL termination */
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
new file mode 100644
index 000000000000..fd530dced9cb
--- /dev/null
+++ b/tools/perf/util/intlist.c
@@ -0,0 +1,101 @@
1/*
2 * Based on intlist.c by:
3 * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Licensed under the GPLv2.
6 */
7
8#include <errno.h>
9#include <stdlib.h>
10#include <linux/compiler.h>
11
12#include "intlist.h"
13
14static struct rb_node *intlist__node_new(struct rblist *rblist __used,
15 const void *entry)
16{
17 int i = (int)((long)entry);
18 struct rb_node *rc = NULL;
19 struct int_node *node = malloc(sizeof(*node));
20
21 if (node != NULL) {
22 node->i = i;
23 rc = &node->rb_node;
24 }
25
26 return rc;
27}
28
29static void int_node__delete(struct int_node *ilist)
30{
31 free(ilist);
32}
33
34static void intlist__node_delete(struct rblist *rblist __used,
35 struct rb_node *rb_node)
36{
37 struct int_node *node = container_of(rb_node, struct int_node, rb_node);
38
39 int_node__delete(node);
40}
41
42static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
43{
44 int i = (int)((long)entry);
45 struct int_node *node = container_of(rb_node, struct int_node, rb_node);
46
47 return node->i - i;
48}
49
50int intlist__add(struct intlist *ilist, int i)
51{
52 return rblist__add_node(&ilist->rblist, (void *)((long)i));
53}
54
55void intlist__remove(struct intlist *ilist __used, struct int_node *node)
56{
57 int_node__delete(node);
58}
59
60struct int_node *intlist__find(struct intlist *ilist, int i)
61{
62 struct int_node *node = NULL;
63 struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
64
65 if (rb_node)
66 node = container_of(rb_node, struct int_node, rb_node);
67
68 return node;
69}
70
71struct intlist *intlist__new(void)
72{
73 struct intlist *ilist = malloc(sizeof(*ilist));
74
75 if (ilist != NULL) {
76 rblist__init(&ilist->rblist);
77 ilist->rblist.node_cmp = intlist__node_cmp;
78 ilist->rblist.node_new = intlist__node_new;
79 ilist->rblist.node_delete = intlist__node_delete;
80 }
81
82 return ilist;
83}
84
85void intlist__delete(struct intlist *ilist)
86{
87 if (ilist != NULL)
88 rblist__delete(&ilist->rblist);
89}
90
91struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
92{
93 struct int_node *node = NULL;
94 struct rb_node *rb_node;
95
96 rb_node = rblist__entry(&ilist->rblist, idx);
97 if (rb_node)
98 node = container_of(rb_node, struct int_node, rb_node);
99
100 return node;
101}
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
new file mode 100644
index 000000000000..6d63ab90db50
--- /dev/null
+++ b/tools/perf/util/intlist.h
@@ -0,0 +1,75 @@
1#ifndef __PERF_INTLIST_H
2#define __PERF_INTLIST_H
3
4#include <linux/rbtree.h>
5#include <stdbool.h>
6
7#include "rblist.h"
8
9struct int_node {
10 struct rb_node rb_node;
11 int i;
12};
13
14struct intlist {
15 struct rblist rblist;
16};
17
18struct intlist *intlist__new(void);
19void intlist__delete(struct intlist *ilist);
20
21void intlist__remove(struct intlist *ilist, struct int_node *in);
22int intlist__add(struct intlist *ilist, int i);
23
24struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
25struct int_node *intlist__find(struct intlist *ilist, int i);
26
27static inline bool intlist__has_entry(struct intlist *ilist, int i)
28{
29 return intlist__find(ilist, i) != NULL;
30}
31
32static inline bool intlist__empty(const struct intlist *ilist)
33{
34 return rblist__empty(&ilist->rblist);
35}
36
37static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
38{
39 return rblist__nr_entries(&ilist->rblist);
40}
41
42/* For intlist iteration */
43static inline struct int_node *intlist__first(struct intlist *ilist)
44{
45 struct rb_node *rn = rb_first(&ilist->rblist.entries);
46 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
47}
48static inline struct int_node *intlist__next(struct int_node *in)
49{
50 struct rb_node *rn;
51 if (!in)
52 return NULL;
53 rn = rb_next(&in->rb_node);
54 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
55}
56
57/**
58 * intlist_for_each - iterate over a intlist
59 * @pos: the &struct int_node to use as a loop cursor.
60 * @ilist: the &struct intlist for loop.
61 */
62#define intlist__for_each(pos, ilist) \
63 for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
64
65/**
66 * intlist_for_each_safe - iterate over a intlist safe against removal of
67 * int_node
68 * @pos: the &struct int_node to use as a loop cursor.
69 * @n: another &struct int_node to use as temporary storage.
70 * @ilist: the &struct intlist for loop.
71 */
72#define intlist__for_each_safe(pos, n, ilist) \
73 for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
74 pos = n, n = intlist__next(n))
75#endif /* __PERF_INTLIST_H */
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 1b997d2b89ce..127d648cc548 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -13,6 +13,9 @@ do { \
13 } \ 13 } \
14} while (0) 14} while (0)
15 15
16#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
17 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
18
16static int test__checkevent_tracepoint(struct perf_evlist *evlist) 19static int test__checkevent_tracepoint(struct perf_evlist *evlist)
17{ 20{
18 struct perf_evsel *evsel = list_entry(evlist->entries.next, 21 struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -21,8 +24,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
21 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); 24 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
22 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); 25 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
23 TEST_ASSERT_VAL("wrong sample_type", 26 TEST_ASSERT_VAL("wrong sample_type",
24 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == 27 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
25 evsel->attr.sample_type);
26 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); 28 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
27 return 0; 29 return 0;
28} 30}
@@ -37,8 +39,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
37 TEST_ASSERT_VAL("wrong type", 39 TEST_ASSERT_VAL("wrong type",
38 PERF_TYPE_TRACEPOINT == evsel->attr.type); 40 PERF_TYPE_TRACEPOINT == evsel->attr.type);
39 TEST_ASSERT_VAL("wrong sample_type", 41 TEST_ASSERT_VAL("wrong sample_type",
40 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) 42 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
41 == evsel->attr.sample_type);
42 TEST_ASSERT_VAL("wrong sample_period", 43 TEST_ASSERT_VAL("wrong sample_period",
43 1 == evsel->attr.sample_period); 44 1 == evsel->attr.sample_period);
44 } 45 }
@@ -428,8 +429,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
428 evsel = list_entry(evsel->node.next, struct perf_evsel, node); 429 evsel = list_entry(evsel->node.next, struct perf_evsel, node);
429 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); 430 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
430 TEST_ASSERT_VAL("wrong sample_type", 431 TEST_ASSERT_VAL("wrong sample_type",
431 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == 432 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
432 evsel->attr.sample_type);
433 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); 433 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
434 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); 434 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
435 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); 435 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 99d02aa57dbf..594f8fad5ecd 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -1,6 +1,7 @@
1#include "util.h" 1#include "util.h"
2#include "parse-options.h" 2#include "parse-options.h"
3#include "cache.h" 3#include "cache.h"
4#include "header.h"
4 5
5#define OPT_SHORT 1 6#define OPT_SHORT 1
6#define OPT_UNSET 2 7#define OPT_UNSET 2
@@ -413,6 +414,8 @@ int parse_options(int argc, const char **argv, const struct option *options,
413{ 414{
414 struct parse_opt_ctx_t ctx; 415 struct parse_opt_ctx_t ctx;
415 416
417 perf_header__set_cmdline(argc, argv);
418
416 parse_options_start(&ctx, argc, argv, flags); 419 parse_options_start(&ctx, argc, argv, flags);
417 switch (parse_options_step(&ctx, options, usagestr)) { 420 switch (parse_options_step(&ctx, options, usagestr)) {
418 case PARSE_OPT_HELP: 421 case PARSE_OPT_HELP:
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index e03b58a48424..0688bfb6d280 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -797,17 +797,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
797 797
798 event = perf_evlist__mmap_read(evlist, cpu); 798 event = perf_evlist__mmap_read(evlist, cpu);
799 if (event != NULL) { 799 if (event != NULL) {
800 struct perf_evsel *first;
801 PyObject *pyevent = pyrf_event__new(event); 800 PyObject *pyevent = pyrf_event__new(event);
802 struct pyrf_event *pevent = (struct pyrf_event *)pyevent; 801 struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
803 802
804 if (pyevent == NULL) 803 if (pyevent == NULL)
805 return PyErr_NoMemory(); 804 return PyErr_NoMemory();
806 805
807 first = list_entry(evlist->entries.next, struct perf_evsel, node); 806 err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
808 err = perf_event__parse_sample(event, first->attr.sample_type,
809 perf_evsel__sample_size(first),
810 sample_id_all, &pevent->sample, false);
811 if (err) 807 if (err)
812 return PyErr_Format(PyExc_OSError, 808 return PyErr_Format(PyExc_OSError,
813 "perf: can't parse sample, err=%d", err); 809 "perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
new file mode 100644
index 000000000000..0171fb611004
--- /dev/null
+++ b/tools/perf/util/rblist.c
@@ -0,0 +1,107 @@
1/*
2 * Based on strlist.c by:
3 * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Licensed under the GPLv2.
6 */
7
8#include <errno.h>
9#include <stdio.h>
10#include <stdlib.h>
11
12#include "rblist.h"
13
14int rblist__add_node(struct rblist *rblist, const void *new_entry)
15{
16 struct rb_node **p = &rblist->entries.rb_node;
17 struct rb_node *parent = NULL, *new_node;
18
19 while (*p != NULL) {
20 int rc;
21
22 parent = *p;
23
24 rc = rblist->node_cmp(parent, new_entry);
25 if (rc > 0)
26 p = &(*p)->rb_left;
27 else if (rc < 0)
28 p = &(*p)->rb_right;
29 else
30 return -EEXIST;
31 }
32
33 new_node = rblist->node_new(rblist, new_entry);
34 if (new_node == NULL)
35 return -ENOMEM;
36
37 rb_link_node(new_node, parent, p);
38 rb_insert_color(new_node, &rblist->entries);
39 ++rblist->nr_entries;
40
41 return 0;
42}
43
44void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
45{
46 rb_erase(rb_node, &rblist->entries);
47 rblist->node_delete(rblist, rb_node);
48}
49
50struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
51{
52 struct rb_node **p = &rblist->entries.rb_node;
53 struct rb_node *parent = NULL;
54
55 while (*p != NULL) {
56 int rc;
57
58 parent = *p;
59
60 rc = rblist->node_cmp(parent, entry);
61 if (rc > 0)
62 p = &(*p)->rb_left;
63 else if (rc < 0)
64 p = &(*p)->rb_right;
65 else
66 return parent;
67 }
68
69 return NULL;
70}
71
72void rblist__init(struct rblist *rblist)
73{
74 if (rblist != NULL) {
75 rblist->entries = RB_ROOT;
76 rblist->nr_entries = 0;
77 }
78
79 return;
80}
81
82void rblist__delete(struct rblist *rblist)
83{
84 if (rblist != NULL) {
85 struct rb_node *pos, *next = rb_first(&rblist->entries);
86
87 while (next) {
88 pos = next;
89 next = rb_next(pos);
90 rb_erase(pos, &rblist->entries);
91 rblist->node_delete(rblist, pos);
92 }
93 free(rblist);
94 }
95}
96
97struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
98{
99 struct rb_node *node;
100
101 for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
102 if (!idx--)
103 return node;
104 }
105
106 return NULL;
107}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
new file mode 100644
index 000000000000..6d0cae5ae83d
--- /dev/null
+++ b/tools/perf/util/rblist.h
@@ -0,0 +1,47 @@
1#ifndef __PERF_RBLIST_H
2#define __PERF_RBLIST_H
3
4#include <linux/rbtree.h>
5#include <stdbool.h>
6
7/*
8 * create node structs of the form:
9 * struct my_node {
10 * struct rb_node rb_node;
11 * ... my data ...
12 * };
13 *
14 * create list structs of the form:
15 * struct mylist {
16 * struct rblist rblist;
17 * ... my data ...
18 * };
19 */
20
21struct rblist {
22 struct rb_root entries;
23 unsigned int nr_entries;
24
25 int (*node_cmp)(struct rb_node *rbn, const void *entry);
26 struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
27 void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
28};
29
30void rblist__init(struct rblist *rblist);
31void rblist__delete(struct rblist *rblist);
32int rblist__add_node(struct rblist *rblist, const void *new_entry);
33void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
34struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
35struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
36
37static inline bool rblist__empty(const struct rblist *rblist)
38{
39 return rblist->nr_entries == 0;
40}
41
42static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
43{
44 return rblist->nr_entries;
45}
46
47#endif /* __PERF_RBLIST_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8e4f0755d2aa..2437fb0b463a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -80,14 +80,12 @@ out_close:
80 return -1; 80 return -1;
81} 81}
82 82
83void perf_session__update_sample_type(struct perf_session *self) 83void perf_session__set_id_hdr_size(struct perf_session *session)
84{ 84{
85 self->sample_type = perf_evlist__sample_type(self->evlist); 85 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
86 self->sample_size = __perf_evsel__sample_size(self->sample_type); 86
87 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 87 session->host_machine.id_hdr_size = id_hdr_size;
88 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); 88 machines__set_id_hdr_size(&session->machines, id_hdr_size);
89 self->host_machine.id_hdr_size = self->id_hdr_size;
90 machines__set_id_hdr_size(&self->machines, self->id_hdr_size);
91} 89}
92 90
93int perf_session__create_kernel_maps(struct perf_session *self) 91int perf_session__create_kernel_maps(struct perf_session *self)
@@ -147,7 +145,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
147 if (mode == O_RDONLY) { 145 if (mode == O_RDONLY) {
148 if (perf_session__open(self, force) < 0) 146 if (perf_session__open(self, force) < 0)
149 goto out_delete; 147 goto out_delete;
150 perf_session__update_sample_type(self); 148 perf_session__set_id_hdr_size(self);
151 } else if (mode == O_WRONLY) { 149 } else if (mode == O_WRONLY) {
152 /* 150 /*
153 * In O_RDONLY mode this will be performed when reading the 151 * In O_RDONLY mode this will be performed when reading the
@@ -158,7 +156,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
158 } 156 }
159 157
160 if (tool && tool->ordering_requires_timestamps && 158 if (tool && tool->ordering_requires_timestamps &&
161 tool->ordered_samples && !self->sample_id_all) { 159 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
162 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
163 tool->ordered_samples = false; 161 tool->ordered_samples = false;
164 } 162 }
@@ -673,7 +671,8 @@ static void flush_sample_queue(struct perf_session *s,
673 if (iter->timestamp > limit) 671 if (iter->timestamp > limit)
674 break; 672 break;
675 673
676 ret = perf_session__parse_sample(s, iter->event, &sample); 674 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
675 s->header.needs_swap);
677 if (ret) 676 if (ret)
678 pr_err("Can't parse sample, err = %d\n", ret); 677 pr_err("Can't parse sample, err = %d\n", ret);
679 else 678 else
@@ -865,16 +864,18 @@ static void perf_session__print_tstamp(struct perf_session *session,
865 union perf_event *event, 864 union perf_event *event,
866 struct perf_sample *sample) 865 struct perf_sample *sample)
867{ 866{
867 u64 sample_type = perf_evlist__sample_type(session->evlist);
868
868 if (event->header.type != PERF_RECORD_SAMPLE && 869 if (event->header.type != PERF_RECORD_SAMPLE &&
869 !session->sample_id_all) { 870 !perf_evlist__sample_id_all(session->evlist)) {
870 fputs("-1 -1 ", stdout); 871 fputs("-1 -1 ", stdout);
871 return; 872 return;
872 } 873 }
873 874
874 if ((session->sample_type & PERF_SAMPLE_CPU)) 875 if ((sample_type & PERF_SAMPLE_CPU))
875 printf("%u ", sample->cpu); 876 printf("%u ", sample->cpu);
876 877
877 if (session->sample_type & PERF_SAMPLE_TIME) 878 if (sample_type & PERF_SAMPLE_TIME)
878 printf("%" PRIu64 " ", sample->time); 879 printf("%" PRIu64 " ", sample->time);
879} 880}
880 881
@@ -899,6 +900,8 @@ static void dump_event(struct perf_session *session, union perf_event *event,
899static void dump_sample(struct perf_session *session, union perf_event *event, 900static void dump_sample(struct perf_session *session, union perf_event *event,
900 struct perf_sample *sample) 901 struct perf_sample *sample)
901{ 902{
903 u64 sample_type;
904
902 if (!dump_trace) 905 if (!dump_trace)
903 return; 906 return;
904 907
@@ -906,10 +909,12 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
906 event->header.misc, sample->pid, sample->tid, sample->ip, 909 event->header.misc, sample->pid, sample->tid, sample->ip,
907 sample->period, sample->addr); 910 sample->period, sample->addr);
908 911
909 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 912 sample_type = perf_evlist__sample_type(session->evlist);
913
914 if (sample_type & PERF_SAMPLE_CALLCHAIN)
910 callchain__printf(sample); 915 callchain__printf(sample);
911 916
912 if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) 917 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
913 branch_stack__printf(sample); 918 branch_stack__printf(sample);
914} 919}
915 920
@@ -1006,7 +1011,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
1006 union perf_event *event, struct perf_sample *sample) 1011 union perf_event *event, struct perf_sample *sample)
1007{ 1012{
1008 if (event->header.type != PERF_RECORD_SAMPLE || 1013 if (event->header.type != PERF_RECORD_SAMPLE ||
1009 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 1014 !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
1010 return 0; 1015 return 0;
1011 1016
1012 if (!ip_callchain__valid(sample->callchain, event)) { 1017 if (!ip_callchain__valid(sample->callchain, event)) {
@@ -1030,7 +1035,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
1030 case PERF_RECORD_HEADER_ATTR: 1035 case PERF_RECORD_HEADER_ATTR:
1031 err = tool->attr(event, &session->evlist); 1036 err = tool->attr(event, &session->evlist);
1032 if (err == 0) 1037 if (err == 0)
1033 perf_session__update_sample_type(session); 1038 perf_session__set_id_hdr_size(session);
1034 return err; 1039 return err;
1035 case PERF_RECORD_HEADER_EVENT_TYPE: 1040 case PERF_RECORD_HEADER_EVENT_TYPE:
1036 return tool->event_type(tool, event); 1041 return tool->event_type(tool, event);
@@ -1065,7 +1070,7 @@ static int perf_session__process_event(struct perf_session *session,
1065 int ret; 1070 int ret;
1066 1071
1067 if (session->header.needs_swap) 1072 if (session->header.needs_swap)
1068 event_swap(event, session->sample_id_all); 1073 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1069 1074
1070 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1075 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1071 return -EINVAL; 1076 return -EINVAL;
@@ -1078,7 +1083,8 @@ static int perf_session__process_event(struct perf_session *session,
1078 /* 1083 /*
1079 * For all kernel events we get the sample data 1084 * For all kernel events we get the sample data
1080 */ 1085 */
1081 ret = perf_session__parse_sample(session, event, &sample); 1086 ret = perf_evlist__parse_sample(session->evlist, event, &sample,
1087 session->header.needs_swap);
1082 if (ret) 1088 if (ret)
1083 return ret; 1089 return ret;
1084 1090
@@ -1389,9 +1395,9 @@ int perf_session__process_events(struct perf_session *self,
1389 return err; 1395 return err;
1390} 1396}
1391 1397
1392bool perf_session__has_traces(struct perf_session *self, const char *msg) 1398bool perf_session__has_traces(struct perf_session *session, const char *msg)
1393{ 1399{
1394 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 1400 if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1395 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1401 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1396 return false; 1402 return false;
1397 } 1403 }
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7c435bde6eb0..1f7ec87db7d7 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -41,13 +41,9 @@ struct perf_session {
41 * perf.data file. 41 * perf.data file.
42 */ 42 */
43 struct hists hists; 43 struct hists hists;
44 u64 sample_type;
45 int sample_size;
46 int fd; 44 int fd;
47 bool fd_pipe; 45 bool fd_pipe;
48 bool repipe; 46 bool repipe;
49 bool sample_id_all;
50 u16 id_hdr_size;
51 int cwdlen; 47 int cwdlen;
52 char *cwd; 48 char *cwd;
53 struct ordered_samples ordered_samples; 49 struct ordered_samples ordered_samples;
@@ -86,7 +82,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
86 82
87int perf_session__create_kernel_maps(struct perf_session *self); 83int perf_session__create_kernel_maps(struct perf_session *self);
88 84
89void perf_session__update_sample_type(struct perf_session *self); 85void perf_session__set_id_hdr_size(struct perf_session *session);
90void perf_session__remove_thread(struct perf_session *self, struct thread *th); 86void perf_session__remove_thread(struct perf_session *self, struct thread *th);
91 87
92static inline 88static inline
@@ -130,24 +126,6 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
130 126
131size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); 127size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
132 128
133static inline int perf_session__parse_sample(struct perf_session *session,
134 const union perf_event *event,
135 struct perf_sample *sample)
136{
137 return perf_event__parse_sample(event, session->sample_type,
138 session->sample_size,
139 session->sample_id_all, sample,
140 session->header.needs_swap);
141}
142
143static inline int perf_session__synthesize_sample(struct perf_session *session,
144 union perf_event *event,
145 const struct perf_sample *sample)
146{
147 return perf_event__synthesize_sample(event, session->sample_type,
148 sample, session->header.needs_swap);
149}
150
151struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 129struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
152 unsigned int type); 130 unsigned int type);
153 131
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 6783a2043555..95856ff3dda4 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -10,23 +10,28 @@
10#include <stdlib.h> 10#include <stdlib.h>
11#include <string.h> 11#include <string.h>
12 12
13static struct str_node *str_node__new(const char *s, bool dupstr) 13static
14struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
14{ 15{
15 struct str_node *self = malloc(sizeof(*self)); 16 const char *s = entry;
17 struct rb_node *rc = NULL;
18 struct strlist *strlist = container_of(rblist, struct strlist, rblist);
19 struct str_node *snode = malloc(sizeof(*snode));
16 20
17 if (self != NULL) { 21 if (snode != NULL) {
18 if (dupstr) { 22 if (strlist->dupstr) {
19 s = strdup(s); 23 s = strdup(s);
20 if (s == NULL) 24 if (s == NULL)
21 goto out_delete; 25 goto out_delete;
22 } 26 }
23 self->s = s; 27 snode->s = s;
28 rc = &snode->rb_node;
24 } 29 }
25 30
26 return self; 31 return rc;
27 32
28out_delete: 33out_delete:
29 free(self); 34 free(snode);
30 return NULL; 35 return NULL;
31} 36}
32 37
@@ -37,36 +42,26 @@ static void str_node__delete(struct str_node *self, bool dupstr)
37 free(self); 42 free(self);
38} 43}
39 44
40int strlist__add(struct strlist *self, const char *new_entry) 45static
46void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
41{ 47{
42 struct rb_node **p = &self->entries.rb_node; 48 struct strlist *slist = container_of(rblist, struct strlist, rblist);
43 struct rb_node *parent = NULL; 49 struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
44 struct str_node *sn;
45
46 while (*p != NULL) {
47 int rc;
48
49 parent = *p;
50 sn = rb_entry(parent, struct str_node, rb_node);
51 rc = strcmp(sn->s, new_entry);
52
53 if (rc > 0)
54 p = &(*p)->rb_left;
55 else if (rc < 0)
56 p = &(*p)->rb_right;
57 else
58 return -EEXIST;
59 }
60 50
61 sn = str_node__new(new_entry, self->dupstr); 51 str_node__delete(snode, slist->dupstr);
62 if (sn == NULL) 52}
63 return -ENOMEM;
64 53
65 rb_link_node(&sn->rb_node, parent, p); 54static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
66 rb_insert_color(&sn->rb_node, &self->entries); 55{
67 ++self->nr_entries; 56 const char *str = entry;
57 struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
58
59 return strcmp(snode->s, str);
60}
68 61
69 return 0; 62int strlist__add(struct strlist *self, const char *new_entry)
63{
64 return rblist__add_node(&self->rblist, new_entry);
70} 65}
71 66
72int strlist__load(struct strlist *self, const char *filename) 67int strlist__load(struct strlist *self, const char *filename)
@@ -96,34 +91,20 @@ out:
96 return err; 91 return err;
97} 92}
98 93
99void strlist__remove(struct strlist *self, struct str_node *sn) 94void strlist__remove(struct strlist *slist, struct str_node *snode)
100{ 95{
101 rb_erase(&sn->rb_node, &self->entries); 96 str_node__delete(snode, slist->dupstr);
102 str_node__delete(sn, self->dupstr);
103} 97}
104 98
105struct str_node *strlist__find(struct strlist *self, const char *entry) 99struct str_node *strlist__find(struct strlist *slist, const char *entry)
106{ 100{
107 struct rb_node **p = &self->entries.rb_node; 101 struct str_node *snode = NULL;
108 struct rb_node *parent = NULL; 102 struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
109
110 while (*p != NULL) {
111 struct str_node *sn;
112 int rc;
113
114 parent = *p;
115 sn = rb_entry(parent, struct str_node, rb_node);
116 rc = strcmp(sn->s, entry);
117
118 if (rc > 0)
119 p = &(*p)->rb_left;
120 else if (rc < 0)
121 p = &(*p)->rb_right;
122 else
123 return sn;
124 }
125 103
126 return NULL; 104 if (rb_node)
105 snode = container_of(rb_node, struct str_node, rb_node);
106
107 return snode;
127} 108}
128 109
129static int strlist__parse_list_entry(struct strlist *self, const char *s) 110static int strlist__parse_list_entry(struct strlist *self, const char *s)
@@ -156,9 +137,12 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
156 struct strlist *self = malloc(sizeof(*self)); 137 struct strlist *self = malloc(sizeof(*self));
157 138
158 if (self != NULL) { 139 if (self != NULL) {
159 self->entries = RB_ROOT; 140 rblist__init(&self->rblist);
141 self->rblist.node_cmp = strlist__node_cmp;
142 self->rblist.node_new = strlist__node_new;
143 self->rblist.node_delete = strlist__node_delete;
144
160 self->dupstr = dupstr; 145 self->dupstr = dupstr;
161 self->nr_entries = 0;
162 if (slist && strlist__parse_list(self, slist) != 0) 146 if (slist && strlist__parse_list(self, slist) != 0)
163 goto out_error; 147 goto out_error;
164 } 148 }
@@ -171,30 +155,18 @@ out_error:
171 155
172void strlist__delete(struct strlist *self) 156void strlist__delete(struct strlist *self)
173{ 157{
174 if (self != NULL) { 158 if (self != NULL)
175 struct str_node *pos; 159 rblist__delete(&self->rblist);
176 struct rb_node *next = rb_first(&self->entries);
177
178 while (next) {
179 pos = rb_entry(next, struct str_node, rb_node);
180 next = rb_next(&pos->rb_node);
181 strlist__remove(self, pos);
182 }
183 self->entries = RB_ROOT;
184 free(self);
185 }
186} 160}
187 161
188struct str_node *strlist__entry(const struct strlist *self, unsigned int idx) 162struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
189{ 163{
190 struct rb_node *nd; 164 struct str_node *snode = NULL;
165 struct rb_node *rb_node;
191 166
192 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { 167 rb_node = rblist__entry(&slist->rblist, idx);
193 struct str_node *pos = rb_entry(nd, struct str_node, rb_node); 168 if (rb_node)
169 snode = container_of(rb_node, struct str_node, rb_node);
194 170
195 if (!idx--) 171 return snode;
196 return pos;
197 }
198
199 return NULL;
200} 172}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 3ba839007d2c..dd9f922ec67c 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -4,14 +4,15 @@
4#include <linux/rbtree.h> 4#include <linux/rbtree.h>
5#include <stdbool.h> 5#include <stdbool.h>
6 6
7#include "rblist.h"
8
7struct str_node { 9struct str_node {
8 struct rb_node rb_node; 10 struct rb_node rb_node;
9 const char *s; 11 const char *s;
10}; 12};
11 13
12struct strlist { 14struct strlist {
13 struct rb_root entries; 15 struct rblist rblist;
14 unsigned int nr_entries;
15 bool dupstr; 16 bool dupstr;
16}; 17};
17 18
@@ -32,18 +33,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry)
32 33
33static inline bool strlist__empty(const struct strlist *self) 34static inline bool strlist__empty(const struct strlist *self)
34{ 35{
35 return self->nr_entries == 0; 36 return rblist__empty(&self->rblist);
36} 37}
37 38
38static inline unsigned int strlist__nr_entries(const struct strlist *self) 39static inline unsigned int strlist__nr_entries(const struct strlist *self)
39{ 40{
40 return self->nr_entries; 41 return rblist__nr_entries(&self->rblist);
41} 42}
42 43
43/* For strlist iteration */ 44/* For strlist iteration */
44static inline struct str_node *strlist__first(struct strlist *self) 45static inline struct str_node *strlist__first(struct strlist *self)
45{ 46{
46 struct rb_node *rn = rb_first(&self->entries); 47 struct rb_node *rn = rb_first(&self->rblist.entries);
47 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
48} 49}
49static inline struct str_node *strlist__next(struct str_node *sn) 50static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fdad4eeeb429..8b63b678e127 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -64,7 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = {
64 DSO_BINARY_TYPE__NOT_FOUND, 64 DSO_BINARY_TYPE__NOT_FOUND,
65}; 65};
66 66
67#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab) 67#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
68 68
69static enum dso_binary_type binary_type_data[] = { 69static enum dso_binary_type binary_type_data[] = {
70 DSO_BINARY_TYPE__BUILD_ID_CACHE, 70 DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -72,7 +72,7 @@ static enum dso_binary_type binary_type_data[] = {
72 DSO_BINARY_TYPE__NOT_FOUND, 72 DSO_BINARY_TYPE__NOT_FOUND,
73}; 73};
74 74
75#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data) 75#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data)
76 76
77int dso__name_len(const struct dso *dso) 77int dso__name_len(const struct dso *dso)
78{ 78{
@@ -2875,6 +2875,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
2875 int i, items = 0; 2875 int i, items = 0;
2876 char path[PATH_MAX]; 2876 char path[PATH_MAX];
2877 pid_t pid; 2877 pid_t pid;
2878 char *endp;
2878 2879
2879 if (symbol_conf.default_guest_vmlinux_name || 2880 if (symbol_conf.default_guest_vmlinux_name ||
2880 symbol_conf.default_guest_modules || 2881 symbol_conf.default_guest_modules ||
@@ -2891,7 +2892,14 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
2891 /* Filter out . and .. */ 2892 /* Filter out . and .. */
2892 continue; 2893 continue;
2893 } 2894 }
2894 pid = atoi(namelist[i]->d_name); 2895 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
2896 if ((*endp != '\0') ||
2897 (endp == namelist[i]->d_name) ||
2898 (errno == ERANGE)) {
2899 pr_debug("invalid directory (%s). Skipping.\n",
2900 namelist[i]->d_name);
2901 continue;
2902 }
2895 sprintf(path, "%s/%s/proc/kallsyms", 2903 sprintf(path, "%s/%s/proc/kallsyms",
2896 symbol_conf.guestmount, 2904 symbol_conf.guestmount,
2897 namelist[i]->d_name); 2905 namelist[i]->d_name);
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 3f59c496e64c..051eaa68095e 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -110,7 +110,7 @@ int perf_target__strerror(struct perf_target *target, int errnum,
110 int idx; 110 int idx;
111 const char *msg; 111 const char *msg;
112 112
113 BUG_ON(buflen > 0); 113 BUG_ON(buflen == 0);
114 114
115 if (errnum >= 0) { 115 if (errnum >= 0) {
116 const char *err = strerror_r(errnum, buf, buflen); 116 const char *err = strerror_r(errnum, buf, buflen);