aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cciss.txt3
-rw-r--r--Documentation/email-clients.txt25
-rw-r--r--Documentation/filesystems/vfat.txt32
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/system.h4
-rw-r--r--arch/arm/kernel/elf.c6
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mm/cache-xsc3l2.c4
-rw-r--r--arch/arm/mm/mmu.c111
-rw-r--r--arch/arm/mm/proc-v7.S12
-rw-r--r--arch/arm/plat-omap/clock.c20
-rw-r--r--arch/arm/plat-omap/include/mach/entry-macro.S4
-rw-r--r--arch/arm/plat-omap/include/mach/irqs.h2
-rw-r--r--arch/ia64/Kconfig19
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c9
-rw-r--r--arch/ia64/include/asm/io.h24
-rw-r--r--arch/ia64/include/asm/machvec.h22
-rw-r--r--arch/ia64/include/asm/meminit.h1
-rw-r--r--arch/ia64/include/asm/sal.h15
-rw-r--r--arch/ia64/include/asm/sn/sn_sal.h45
-rw-r--r--arch/ia64/kernel/acpi.c29
-rw-r--r--arch/ia64/kernel/pci-dma.c2
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--arch/ia64/mm/discontig.c1
-rw-r--r--arch/ia64/uv/kernel/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/ras.c1
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/irq_vectors.h20
-rw-r--r--arch/x86/include/asm/msr.h2
-rw-r--r--arch/x86/include/asm/topology.h7
-rw-r--r--arch/x86/include/asm/tsc.h8
-rw-r--r--arch/x86/include/asm/voyager.h1
-rw-r--r--arch/x86/kernel/amd_iommu.c9
-rw-r--r--arch/x86/kernel/io_apic.c2
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/tlb_32.c6
-rw-r--r--arch/x86/kernel/tlb_64.c5
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/mach-voyager/setup.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/mm/pageattr.c8
-rw-r--r--arch/x86/oprofile/op_model_ppro.c9
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/mmu.c13
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-merge.c21
-rw-r--r--block/blk-timeout.c20
-rw-r--r--block/elevator.c12
-rw-r--r--drivers/ata/libata-core.c72
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/ata/libata.h19
-rw-r--r--drivers/ata/sata_nv.c53
-rw-r--r--drivers/ata/sata_promise.c20
-rw-r--r--drivers/ata/sata_via.c4
-rw-r--r--drivers/block/cciss.c28
-rw-r--r--drivers/block/cpqarray.c7
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--drivers/firewire/fw-device.c14
-rw-r--r--drivers/firewire/fw-ohci.c2
-rw-r--r--drivers/firewire/fw-sbp2.c2
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/hwmon/applesmc.c36
-rw-r--r--drivers/ieee1394/dv1394.c10
-rw-r--r--drivers/ieee1394/hosts.c4
-rw-r--r--drivers/ieee1394/nodemgr.c14
-rw-r--r--drivers/ieee1394/raw1394.c9
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/message/fusion/mptlan.c108
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c16
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c13
-rw-r--r--drivers/mtd/chips/jedec_probe.c10
-rw-r--r--drivers/mtd/onenand/omap2.c1
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/atl1e/atl1e.h1
-rw-r--r--drivers/net/bnx2x_init.h9
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c6
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/wireless/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath5k/desc.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c15
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/oprofile/event_buffer.c6
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/rom.c6
-rw-r--r--drivers/pnp/interface.c1
-rw-r--r--drivers/ps3/ps3-lpm.c1
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/serial/atmel_serial.c17
-rw-r--r--drivers/staging/Kconfig20
-rw-r--r--drivers/staging/echo/echo.c1
-rw-r--r--drivers/staging/me4000/me4000.c1
-rw-r--r--drivers/staging/usbip/Kconfig2
-rw-r--r--drivers/video/Kconfig32
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/fbmem.c63
-rw-r--r--drivers/video/mb862xx/Makefile5
-rw-r--r--drivers/video/mb862xx/mb862xx_reg.h138
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c1061
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h83
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/at91sam9_wdt.c2
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--fs/Makefile2
-rw-r--r--fs/autofs4/dev-ioctl.c5
-rw-r--r--fs/autofs4/expire.c19
-rw-r--r--fs/block_dev.c23
-rw-r--r--fs/cifs/CHANGES6
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c50
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/transport.c48
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--fs/ext4/mballoc.c1
-rw-r--r--fs/ext4/super.c24
-rw-r--r--fs/fat/Makefile6
-rw-r--r--fs/fat/cache.c25
-rw-r--r--fs/fat/dir.c20
-rw-r--r--fs/fat/fat.h329
-rw-r--r--fs/fat/fatent.c24
-rw-r--r--fs/fat/file.c49
-rw-r--r--fs/fat/inode.c131
-rw-r--r--fs/fat/misc.c155
-rw-r--r--fs/fat/namei_msdos.c (renamed from fs/msdos/namei.c)42
-rw-r--r--fs/fat/namei_vfat.c (renamed from fs/vfat/namei.c)161
-rw-r--r--fs/jbd/checkpoint.c31
-rw-r--r--fs/jbd2/checkpoint.c32
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/background.c10
-rw-r--r--fs/jffs2/compr_lzo.c15
-rw-r--r--fs/jffs2/nodemgmt.c2
-rw-r--r--fs/msdos/Makefile7
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/vfat/Makefile7
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/linux/bio.h6
-rw-r--r--include/linux/cnt32_to_63.h22
-rw-r--r--include/linux/cpumask.h559
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/msdos_fs.h281
-rw-r--r--include/linux/mtd/cfi.h22
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/smp.h9
-rw-r--r--include/linux/timer.h5
-rw-r--r--include/linux/topology.h8
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/scm.h5
-rw-r--r--include/sound/core.h10
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c76
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/smp.c18
-rw-r--r--kernel/timer.c129
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c17
-rw-r--r--kernel/workqueue.c45
-rw-r--r--lib/cpumask.c79
-rw-r--r--mm/hugetlb.c49
-rw-r--r--mm/internal.h29
-rw-r--r--mm/mempolicy.c18
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/page_isolation.c5
-rw-r--r--mm/sparse-vmemmap.c2
-rw-r--r--mm/vmalloc.c9
-rw-r--r--net/8021q/vlan_core.c46
-rw-r--r--net/9p/client.c59
-rw-r--r--net/9p/trans_rdma.c5
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/scm.c24
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/udp.c28
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/netfilter/nf_conntrack_helper.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c5
-rw-r--r--net/rfkill/rfkill.c2
-rw-r--r--net/unix/af_unix.c31
-rw-r--r--net/unix/garbage.c49
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--scripts/package/builddeb24
-rw-r--r--sound/isa/Kconfig2
-rw-r--r--sound/pci/hda/hda_proc.c2
-rw-r--r--sound/pci/hda/patch_analog.c2
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/rme9652/hdsp.c27
228 files changed, 4396 insertions, 1451 deletions
diff --git a/Documentation/cciss.txt b/Documentation/cciss.txt
index 8244c6442faa..89698e8df7d4 100644
--- a/Documentation/cciss.txt
+++ b/Documentation/cciss.txt
@@ -21,11 +21,14 @@ This driver is known to work with the following cards:
21 * SA E200 21 * SA E200
22 * SA E200i 22 * SA E200i
23 * SA E500 23 * SA E500
24 * SA P700m
24 * SA P212 25 * SA P212
25 * SA P410 26 * SA P410
26 * SA P410i 27 * SA P410i
27 * SA P411 28 * SA P411
28 * SA P812 29 * SA P812
30 * SA P712m
31 * SA P711m
29 32
30Detecting drive failures: 33Detecting drive failures:
31------------------------- 34-------------------------
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 2ebb94d6ed8e..a618efab7b15 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -213,4 +213,29 @@ TkRat (GUI)
213 213
214Works. Use "Insert file..." or external editor. 214Works. Use "Insert file..." or external editor.
215 215
216~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
217Gmail (Web GUI)
218
219If you just have to use Gmail to send patches, it CAN be made to work. It
220requires a bit of external help, though.
221
222The first problem is that Gmail converts tabs to spaces. This will
223totally break your patches. To prevent this, you have to use a different
224editor. There is a firefox extension called "ViewSourceWith"
225(https://addons.mozilla.org/en-US/firefox/addon/394) which allows you to
226edit any text box in the editor of your choice. Configure it to launch
227your favorite editor. When you want to send a patch, use this technique.
228Once you have crafted your messsage + patch, save and exit the editor,
229which should reload the Gmail edit box. GMAIL WILL PRESERVE THE TABS.
230Hoorah. Apparently you can cut-n-paste literal tabs, but Gmail will
231convert those to spaces upon sending!
232
233The second problem is that Gmail converts tabs to spaces on replies. If
234you reply to a patch, don't expect to be able to apply it as a patch.
235
236The last problem is that Gmail will base64-encode any message that has a
237non-ASCII character. That includes things like European names. Be aware.
238
239Gmail is not convenient for lkml patches, but CAN be made to work.
240
216 ### 241 ###
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index bbac4f1d9056..3a5ddc96901a 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -8,6 +8,12 @@ if you want to format from within Linux.
8 8
9VFAT MOUNT OPTIONS 9VFAT MOUNT OPTIONS
10---------------------------------------------------------------------- 10----------------------------------------------------------------------
11uid=### -- Set the owner of all files on this filesystem.
12 The default is the uid of current process.
13
14gid=### -- Set the group of all files on this filesystem.
15 The default is the gid of current process.
16
11umask=### -- The permission mask (for files and directories, see umask(1)). 17umask=### -- The permission mask (for files and directories, see umask(1)).
12 The default is the umask of current process. 18 The default is the umask of current process.
13 19
@@ -36,7 +42,7 @@ codepage=### -- Sets the codepage number for converting to shortname
36 characters on FAT filesystem. 42 characters on FAT filesystem.
37 By default, FAT_DEFAULT_CODEPAGE setting is used. 43 By default, FAT_DEFAULT_CODEPAGE setting is used.
38 44
39iocharset=name -- Character set to use for converting between the 45iocharset=<name> -- Character set to use for converting between the
40 encoding is used for user visible filename and 16 bit 46 encoding is used for user visible filename and 16 bit
41 Unicode characters. Long filenames are stored on disk 47 Unicode characters. Long filenames are stored on disk
42 in Unicode format, but Unix for the most part doesn't 48 in Unicode format, but Unix for the most part doesn't
@@ -86,6 +92,8 @@ check=s|r|n -- Case sensitivity checking setting.
86 r: relaxed, case insensitive 92 r: relaxed, case insensitive
87 n: normal, default setting, currently case insensitive 93 n: normal, default setting, currently case insensitive
88 94
95nocase -- This was deprecated for vfat. Use shortname=win95 instead.
96
89shortname=lower|win95|winnt|mixed 97shortname=lower|win95|winnt|mixed
90 -- Shortname display/create setting. 98 -- Shortname display/create setting.
91 lower: convert to lowercase for display, 99 lower: convert to lowercase for display,
@@ -99,11 +107,31 @@ shortname=lower|win95|winnt|mixed
99tz=UTC -- Interpret timestamps as UTC rather than local time. 107tz=UTC -- Interpret timestamps as UTC rather than local time.
100 This option disables the conversion of timestamps 108 This option disables the conversion of timestamps
101 between local time (as used by Windows on FAT) and UTC 109 between local time (as used by Windows on FAT) and UTC
102 (which Linux uses internally). This is particuluarly 110 (which Linux uses internally). This is particularly
103 useful when mounting devices (like digital cameras) 111 useful when mounting devices (like digital cameras)
104 that are set to UTC in order to avoid the pitfalls of 112 that are set to UTC in order to avoid the pitfalls of
105 local time. 113 local time.
106 114
115showexec -- If set, the execute permission bits of the file will be
116 allowed only if the extension part of the name is .EXE,
117 .COM, or .BAT. Not set by default.
118
119debug -- Can be set, but unused by the current implementation.
120
121sys_immutable -- If set, ATTR_SYS attribute on FAT is handled as
122 IMMUTABLE flag on Linux. Not set by default.
123
124flush -- If set, the filesystem will try to flush to disk more
125 early than normal. Not set by default.
126
127rodir -- FAT has the ATTR_RO (read-only) attribute. But on Windows,
128 the ATTR_RO of the directory will be just ignored actually,
129 and is used by only applications as flag. E.g. it's setted
130 for the customized folder.
131
132 If you want to use ATTR_RO as read-only flag even for
133 the directory, set this option.
134
107<bool>: 0,1,yes,no,true,false 135<bool>: 0,1,yes,no,true,false
108 136
109TODO 137TODO
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1bbcaa8982b6..c86c07459712 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -995,13 +995,15 @@ and is between 256 and 4096 characters. It is defined in the file
995 Format: 995 Format:
996 <cpu number>,...,<cpu number> 996 <cpu number>,...,<cpu number>
997 or 997 or
998 <cpu number>-<cpu number> (must be a positive range in ascending order) 998 <cpu number>-<cpu number>
999 (must be a positive range in ascending order)
999 or a mixture 1000 or a mixture
1000 <cpu number>,...,<cpu number>-<cpu number> 1001 <cpu number>,...,<cpu number>-<cpu number>
1002
1001 This option can be used to specify one or more CPUs 1003 This option can be used to specify one or more CPUs
1002 to isolate from the general SMP balancing and scheduling 1004 to isolate from the general SMP balancing and scheduling
1003 algorithms. The only way to move a process onto or off 1005 algorithms. You can move a process onto or off an
1004 an "isolated" CPU is via the CPU affinity syscalls. 1006 "isolated" CPU via the CPU affinity syscalls or cpuset.
1005 <cpu number> begins at 0 and the maximum value is 1007 <cpu number> begins at 0 and the maximum value is
1006 "number of CPUs in system - 1". 1008 "number of CPUs in system - 1".
1007 1009
@@ -1470,8 +1472,6 @@ and is between 256 and 4096 characters. It is defined in the file
1470 Valid arguments: on, off 1472 Valid arguments: on, off
1471 Default: on 1473 Default: on
1472 1474
1473 noirqbalance [X86-32,SMP,KNL] Disable kernel irq balancing
1474
1475 noirqdebug [X86-32] Disables the code which attempts to detect and 1475 noirqdebug [X86-32] Disables the code which attempts to detect and
1476 disable unhandled interrupt sources. 1476 disable unhandled interrupt sources.
1477 1477
diff --git a/MAINTAINERS b/MAINTAINERS
index d643e862b8e4..7e6a17e1de09 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -721,7 +721,7 @@ W: http://sourceforge.net/projects/acpi4asus
721W: http://xf.iksaif.net/acpi4asus 721W: http://xf.iksaif.net/acpi4asus
722S: Maintained 722S: Maintained
723 723
724ASYNCHRONOUS TRANSFERS/TRANSFORMS API 724ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
725P: Dan Williams 725P: Dan Williams
726M: dan.j.williams@intel.com 726M: dan.j.williams@intel.com
727P: Maciej Sosnowski 727P: Maciej Sosnowski
diff --git a/Makefile b/Makefile
index 29abe62ccbad..7f9ff9bf1544 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 28 3SUBLEVEL = 28
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc4
5NAME = Killer Bat of Doom 5NAME = Killer Bat of Doom
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index e6ab550bceb3..8977d99987cb 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -21,7 +21,7 @@ config OPROFILE_IBS
21 Instruction-Based Sampling (IBS) is a new profiling 21 Instruction-Based Sampling (IBS) is a new profiling
22 technique that provides rich, precise program performance 22 technique that provides rich, precise program performance
23 information. IBS is introduced by AMD Family10h processors 23 information. IBS is introduced by AMD Family10h processors
24 (AMD Opteron Quad-Core processor Barcelona) to overcome 24 (AMD Opteron Quad-Core processor "Barcelona") to overcome
25 the limitations of conventional performance counter 25 the limitations of conventional performance counter
26 sampling. 26 sampling.
27 27
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 809ff9ab853a..77764301844b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,10 +44,10 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#define MODULE_END (PAGE_OFFSET) 47#define MODULES_END (PAGE_OFFSET)
48#define MODULE_START (MODULE_END - 16*1048576) 48#define MODULES_VADDR (MODULES_END - 16*1048576)
49 49
50#if TASK_SIZE > MODULE_START 50#if TASK_SIZE > MODULES_VADDR
51#error Top of user space clashes with start of module space 51#error Top of user space clashes with start of module space
52#endif 52#endif
53 53
@@ -56,7 +56,7 @@
56 * Since we use sections to map it, this macro replaces the physical address 56 * Since we use sections to map it, this macro replaces the physical address
57 * with its virtual address while keeping offset from the base section. 57 * with its virtual address while keeping offset from the base section.
58 */ 58 */
59#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) 59#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
60 60
61/* 61/*
62 * Allow 16MB-aligned ioremap pages 62 * Allow 16MB-aligned ioremap pages
@@ -94,8 +94,8 @@
94/* 94/*
95 * The module can be at any place in ram in nommu mode. 95 * The module can be at any place in ram in nommu mode.
96 */ 96 */
97#define MODULE_END (END_MEM) 97#define MODULES_END (END_MEM)
98#define MODULE_START (PHYS_OFFSET) 98#define MODULES_VADDR (PHYS_OFFSET)
99 99
100#endif /* !CONFIG_MMU */ 100#endif /* !CONFIG_MMU */
101 101
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 7aad78420f18..568020b34e3e 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -42,6 +42,10 @@
42#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_U (1 << 22) /* Unaligned access operation */
43#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_XP (1 << 23) /* Extended page tables */
44#define CR_VE (1 << 24) /* Vectored interrupts */ 44#define CR_VE (1 << 24) /* Vectored interrupts */
45#define CR_EE (1 << 25) /* Exception (Big) Endian */
46#define CR_TRE (1 << 28) /* TEX remap enable */
47#define CR_AFE (1 << 29) /* Access flag enable */
48#define CR_TE (1 << 30) /* Thumb exception enable */
45 49
46/* 50/*
47 * This is used to ensure the compiler did actually allocate the register we 51 * This is used to ensure the compiler did actually allocate the register we
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 513f332f040d..84849098c8e8 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -21,12 +21,16 @@ int elf_check_arch(const struct elf32_hdr *x)
21 21
22 eflags = x->e_flags; 22 eflags = x->e_flags;
23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { 23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
24 unsigned int flt_fmt;
25
24 /* APCS26 is only allowed if the CPU supports it */ 26 /* APCS26 is only allowed if the CPU supports it */
25 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) 27 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
26 return 0; 28 return 0;
27 29
30 flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
31
28 /* VFP requires the supporting code */ 32 /* VFP requires the supporting code */
29 if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP)) 33 if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
30 return 0; 34 return 0;
31 } 35 }
32 return 1; 36 return 1;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 9203ba7d58ee..b8d965dcd6fd 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -26,12 +26,12 @@
26/* 26/*
27 * The XIP kernel text is mapped in the module area for modules and 27 * The XIP kernel text is mapped in the module area for modules and
28 * some other stuff to work without any indirect relocations. 28 * some other stuff to work without any indirect relocations.
29 * MODULE_START is redefined here and not in asm/memory.h to avoid 29 * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. 30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
31 */ 31 */
32extern void _etext; 32extern void _etext;
33#undef MODULE_START 33#undef MODULES_VADDR
34#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK) 34#define MODULES_VADDR (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
35#endif 35#endif
36 36
37#ifdef CONFIG_MMU 37#ifdef CONFIG_MMU
@@ -43,7 +43,7 @@ void *module_alloc(unsigned long size)
43 if (!size) 43 if (!size)
44 return NULL; 44 return NULL;
45 45
46 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); 46 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
47 if (!area) 47 if (!area)
48 return NULL; 48 return NULL;
49 49
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 763bdbeaf681..2249049c1d5a 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -429,18 +429,16 @@ void __init gpmc_init(void)
429 gpmc_l3_clk = clk_get(NULL, ck); 429 gpmc_l3_clk = clk_get(NULL, ck);
430 if (IS_ERR(gpmc_l3_clk)) { 430 if (IS_ERR(gpmc_l3_clk)) {
431 printk(KERN_ERR "Could not get GPMC clock %s\n", ck); 431 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
432 return -ENODEV; 432 BUG();
433 } 433 }
434 434
435 gpmc_base = ioremap(l, SZ_4K); 435 gpmc_base = ioremap(l, SZ_4K);
436 if (!gpmc_base) { 436 if (!gpmc_base) {
437 clk_put(gpmc_l3_clk); 437 clk_put(gpmc_l3_clk);
438 printk(KERN_ERR "Could not get GPMC register memory\n"); 438 printk(KERN_ERR "Could not get GPMC register memory\n");
439 return -ENOMEM; 439 BUG();
440 } 440 }
441 441
442 BUG_ON(IS_ERR(gpmc_l3_clk));
443
444 l = gpmc_read_reg(GPMC_REVISION); 442 l = gpmc_read_reg(GPMC_REVISION);
445 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); 443 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
446 /* Set smart idle mode and automatic L3 clock gating */ 444 /* Set smart idle mode and automatic L3 clock gating */
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 10b1bae1a258..464de893a988 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
98 /* 98 /*
99 * Clean and invalidate partial last cache line. 99 * Clean and invalidate partial last cache line.
100 */ 100 */
101 if (end & (CACHE_LINE_SIZE - 1)) { 101 if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); 102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); 103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
104 end &= ~(CACHE_LINE_SIZE - 1); 104 end &= ~(CACHE_LINE_SIZE - 1);
@@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
107 /* 107 /*
108 * Invalidate all full cache lines between 'start' and 'end'. 108 * Invalidate all full cache lines between 'start' and 'end'.
109 */ 109 */
110 while (start != end) { 110 while (start < end) {
111 xsc3_l2_inv_pa(start); 111 xsc3_l2_inv_pa(start);
112 start += CACHE_LINE_SIZE; 112 start += CACHE_LINE_SIZE;
113 } 113 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba754064559..e63db11f16a8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
180#endif 180#endif
181 181
182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE 183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
184 184
185static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED, 188 L_PTE_SHARED,
189 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
191 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
192 }, 192 },
193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
195 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE,
197 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
198 }, 198 },
199 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
205 [MT_DEVICE_WC] = { /* ioremap_wc */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
207 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, 208 .prot_sect = PROT_SECT_DEVICE,
209 .domain = DOMAIN_IO, 209 .domain = DOMAIN_IO,
210 }, 210 },
211 [MT_CACHECLEAN] = { 211 [MT_CACHECLEAN] = {
@@ -273,22 +273,23 @@ static void __init build_mem_type_table(void)
273#endif 273#endif
274 274
275 /* 275 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01 276 * Strip out features not present on earlier architectures.
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 277 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable 278 * without extended page tables don't have the 'Shared' bit.
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */ 279 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 280 if (cpu_arch < CPU_ARCH_ARMv5)
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 281 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; 282 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
284 } 283 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
284 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
285 mem_types[i].prot_sect &= ~PMD_SECT_S;
285 286
286 /* 287 /*
287 * ARMv5 and lower, bit 4 must be set for page tables. 288 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
288 * (was: cache "update-able on write" bit on ARM610) 289 * "update-able on write" bit on ARM610). However, Xscale and
289 * However, Xscale cores require this bit to be cleared. 290 * Xscale3 require this bit to be cleared.
290 */ 291 */
291 if (cpu_is_xscale()) { 292 if (cpu_is_xscale() || cpu_is_xsc3()) {
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 293 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
293 mem_types[i].prot_sect &= ~PMD_BIT4; 294 mem_types[i].prot_sect &= ~PMD_BIT4;
294 mem_types[i].prot_l1 &= ~PMD_BIT4; 295 mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +303,64 @@ static void __init build_mem_type_table(void)
302 } 303 }
303 } 304 }
304 305
306 /*
307 * Mark the device areas according to the CPU/architecture.
308 */
309 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
310 if (!cpu_is_xsc3()) {
311 /*
312 * Mark device regions on ARMv6+ as execute-never
313 * to prevent speculative instruction fetches.
314 */
315 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
316 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
317 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
318 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
319 }
320 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
321 /*
322 * For ARMv7 with TEX remapping,
323 * - shared device is SXCB=1100
324 * - nonshared device is SXCB=0100
325 * - write combine device mem is SXCB=0001
326 * (Uncached Normal memory)
327 */
328 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
329 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
330 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
331 } else if (cpu_is_xsc3()) {
332 /*
333 * For Xscale3,
334 * - shared device is TEXCB=00101
335 * - nonshared device is TEXCB=01000
336 * - write combine device mem is TEXCB=00100
337 * (Inner/Outer Uncacheable in xsc3 parlance)
338 */
339 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
340 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
341 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
342 } else {
343 /*
344 * For ARMv6 and ARMv7 without TEX remapping,
345 * - shared device is TEXCB=00001
346 * - nonshared device is TEXCB=01000
347 * - write combine device mem is TEXCB=00100
348 * (Uncached Normal in ARMv6 parlance).
349 */
350 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
351 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
352 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
353 }
354 } else {
355 /*
356 * On others, write combining is "Uncached/Buffered"
357 */
358 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
359 }
360
361 /*
362 * Now deal with the memory-type mappings
363 */
305 cp = &cache_policies[cachepolicy]; 364 cp = &cache_policies[cachepolicy];
306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 365 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307 366
@@ -317,12 +376,8 @@ static void __init build_mem_type_table(void)
317 * Enable CPU-specific coherency if supported. 376 * Enable CPU-specific coherency if supported.
318 * (Only available on XSC3 at the moment.) 377 * (Only available on XSC3 at the moment.)
319 */ 378 */
320 if (arch_is_coherent()) { 379 if (arch_is_coherent() && cpu_is_xsc3())
321 if (cpu_is_xsc3()) { 380 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
322 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
323 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
324 }
325 }
326 381
327 /* 382 /*
328 * ARMv6 and above have extended page tables. 383 * ARMv6 and above have extended page tables.
@@ -336,11 +391,6 @@ static void __init build_mem_type_table(void)
336 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 391 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
337 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 392 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
338 393
339 /*
340 * Mark the device area as "shared device"
341 */
342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
343
344#ifdef CONFIG_SMP 394#ifdef CONFIG_SMP
345 /* 395 /*
346 * Mark memory with the "shared" attribute for SMP systems 396 * Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +410,6 @@ static void __init build_mem_type_table(void)
360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 410 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 411 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
362 412
363 if (cpu_arch < CPU_ARCH_ARMv5)
364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
365
366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 413 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 414 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
368 L_PTE_DIRTY | L_PTE_WRITE | 415 L_PTE_DIRTY | L_PTE_WRITE |
@@ -654,7 +701,7 @@ static inline void prepare_page_table(struct meminfo *mi)
654 /* 701 /*
655 * Clear out all the mappings below the kernel image. 702 * Clear out all the mappings below the kernel image.
656 */ 703 */
657 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) 704 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
658 pmd_clear(pmd_off_k(addr)); 705 pmd_clear(pmd_off_k(addr));
659 706
660#ifdef CONFIG_XIP_KERNEL 707#ifdef CONFIG_XIP_KERNEL
@@ -766,7 +813,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
766 */ 813 */
767#ifdef CONFIG_XIP_KERNEL 814#ifdef CONFIG_XIP_KERNEL
768 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 815 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
769 map.virtual = MODULE_START; 816 map.virtual = MODULES_VADDR;
770 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 817 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
771 map.type = MT_ROM; 818 map.type = MT_ROM;
772 create_mapping(&map); 819 create_mapping(&map);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 07f82db70945..4d3c0a73e7fb 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext)
115 orr r3, r3, r2 115 orr r3, r3, r2
116 orr r3, r3, #PTE_EXT_AP0 | 2 116 orr r3, r3, #PTE_EXT_AP0 | 2
117 117
118 tst r2, #1 << 4 118 tst r1, #1 << 4
119 orrne r3, r3, #PTE_EXT_TEX(1) 119 orrne r3, r3, #PTE_EXT_TEX(1)
120 120
121 tst r1, #L_PTE_WRITE 121 tst r1, #L_PTE_WRITE
@@ -192,11 +192,11 @@ __v7_setup:
192 mov pc, lr @ return to head.S:__ret 192 mov pc, lr @ return to head.S:__ret
193ENDPROC(__v7_setup) 193ENDPROC(__v7_setup)
194 194
195 /* 195 /* AT
196 * V X F I D LR 196 * TFR EV X F I D LR
197 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 197 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
198 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 198 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
199 * 0 110 0011 1.00 .111 1101 < we want 199 * 1 0 110 0011 1.00 .111 1101 < we want
200 */ 200 */
201 .type v7_crval, #object 201 .type v7_crval, #object
202v7_crval: 202v7_crval:
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index bf6a10c5fc4f..be6aab9c6834 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -428,23 +428,23 @@ static int clk_debugfs_register_one(struct clk *c)
428 if (c->id != 0) 428 if (c->id != 0)
429 sprintf(p, ":%d", c->id); 429 sprintf(p, ":%d", c->id);
430 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); 430 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
431 if (IS_ERR(d)) 431 if (!d)
432 return PTR_ERR(d); 432 return -ENOMEM;
433 c->dent = d; 433 c->dent = d;
434 434
435 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); 435 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
436 if (IS_ERR(d)) { 436 if (!d) {
437 err = PTR_ERR(d); 437 err = -ENOMEM;
438 goto err_out; 438 goto err_out;
439 } 439 }
440 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); 440 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
441 if (IS_ERR(d)) { 441 if (!d) {
442 err = PTR_ERR(d); 442 err = -ENOMEM;
443 goto err_out; 443 goto err_out;
444 } 444 }
445 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); 445 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
446 if (IS_ERR(d)) { 446 if (!d) {
447 err = PTR_ERR(d); 447 err = -ENOMEM;
448 goto err_out; 448 goto err_out;
449 } 449 }
450 return 0; 450 return 0;
@@ -483,8 +483,8 @@ static int __init clk_debugfs_init(void)
483 int err; 483 int err;
484 484
485 d = debugfs_create_dir("clock", NULL); 485 d = debugfs_create_dir("clock", NULL);
486 if (IS_ERR(d)) 486 if (!d)
487 return PTR_ERR(d); 487 return -ENOMEM;
488 clk_debugfs_root = d; 488 clk_debugfs_root = d;
489 489
490 list_for_each_entry(c, &clocks, node) { 490 list_for_each_entry(c, &clocks, node) {
diff --git a/arch/arm/plat-omap/include/mach/entry-macro.S b/arch/arm/plat-omap/include/mach/entry-macro.S
index 030118ee204a..2276f89671d8 100644
--- a/arch/arm/plat-omap/include/mach/entry-macro.S
+++ b/arch/arm/plat-omap/include/mach/entry-macro.S
@@ -65,7 +65,8 @@
65#include <mach/omap34xx.h> 65#include <mach/omap34xx.h>
66#endif 66#endif
67 67
68#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt number */ 68#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt offset */
69#define ACTIVEIRQ_MASK 0x7f /* Active interrupt bits */
69 70
70 .macro disable_fiq 71 .macro disable_fiq
71 .endm 72 .endm
@@ -88,6 +89,7 @@
88 cmp \irqnr, #0x0 89 cmp \irqnr, #0x0
892222: 902222:
90 ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET] 91 ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
92 and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
91 93
92 .endm 94 .endm
93 95
diff --git a/arch/arm/plat-omap/include/mach/irqs.h b/arch/arm/plat-omap/include/mach/irqs.h
index a2929ac8c687..bed5274c910a 100644
--- a/arch/arm/plat-omap/include/mach/irqs.h
+++ b/arch/arm/plat-omap/include/mach/irqs.h
@@ -372,7 +372,7 @@
372 372
373/* External TWL4030 gpio interrupts are optional */ 373/* External TWL4030 gpio interrupts are optional */
374#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END 374#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END
375#ifdef CONFIG_TWL4030_GPIO 375#ifdef CONFIG_GPIO_TWL4030
376#define TWL4030_GPIO_NR_IRQS 18 376#define TWL4030_GPIO_NR_IRQS 18
377#else 377#else
378#define TWL4030_GPIO_NR_IRQS 0 378#define TWL4030_GPIO_NR_IRQS 0
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 27eec71429b0..6bd91ed7cd03 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -148,6 +148,7 @@ config IA64_GENERIC
148 select ACPI_NUMA 148 select ACPI_NUMA
149 select SWIOTLB 149 select SWIOTLB
150 select PCI_MSI 150 select PCI_MSI
151 select DMAR
151 help 152 help
152 This selects the system type of your hardware. A "generic" kernel 153 This selects the system type of your hardware. A "generic" kernel
153 will run on any supported IA-64 system. However, if you configure 154 will run on any supported IA-64 system. However, if you configure
@@ -585,7 +586,7 @@ source "fs/Kconfig.binfmt"
585 586
586endmenu 587endmenu
587 588
588menu "Power management and ACPI" 589menu "Power management and ACPI options"
589 590
590source "kernel/power/Kconfig" 591source "kernel/power/Kconfig"
591 592
@@ -641,6 +642,8 @@ source "net/Kconfig"
641 642
642source "drivers/Kconfig" 643source "drivers/Kconfig"
643 644
645source "arch/ia64/hp/sim/Kconfig"
646
644config MSPEC 647config MSPEC
645 tristate "Memory special operations driver" 648 tristate "Memory special operations driver"
646 depends on IA64 649 depends on IA64
@@ -652,6 +655,12 @@ config MSPEC
652 655
653source "fs/Kconfig" 656source "fs/Kconfig"
654 657
658source "arch/ia64/Kconfig.debug"
659
660source "security/Kconfig"
661
662source "crypto/Kconfig"
663
655source "arch/ia64/kvm/Kconfig" 664source "arch/ia64/kvm/Kconfig"
656 665
657source "lib/Kconfig" 666source "lib/Kconfig"
@@ -678,11 +687,3 @@ config IRQ_PER_CPU
678 687
679config IOMMU_HELPER 688config IOMMU_HELPER
680 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) 689 def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
681
682source "arch/ia64/hp/sim/Kconfig"
683
684source "arch/ia64/Kconfig.debug"
685
686source "security/Kconfig"
687
688source "crypto/Kconfig"
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 88b6e6f3fd88..2769dbfd03bf 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,19 +13,12 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/swiotlb.h>
16 17
17#include <asm/machvec.h> 18#include <asm/machvec.h>
18 19
19/* swiotlb declarations & definitions: */ 20/* swiotlb declarations & definitions: */
20extern int swiotlb_late_init_with_default_size (size_t size); 21extern int swiotlb_late_init_with_default_size (size_t size);
21extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
22extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
23extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
24extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
25extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
26extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
27extern ia64_mv_dma_supported swiotlb_dma_supported;
28extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
29 22
30/* hwiommu declarations & definitions: */ 23/* hwiommu declarations & definitions: */
31 24
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 7f257507cd86..0d9d16e2d949 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -434,28 +434,4 @@ extern void memset_io(volatile void __iomem *s, int c, long n);
434 434
435# endif /* __KERNEL__ */ 435# endif /* __KERNEL__ */
436 436
437/*
438 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
439 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
440 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
441 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
442 * over BIO-level virtual merging.
443 */
444extern unsigned long ia64_max_iommu_merge_mask;
445#if 1
446#define BIO_VMERGE_BOUNDARY 0
447#else
448/*
449 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
450 * replaced by dma_merge_mask() or something of that sort. Note: the only way
451 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
452 * expanded into:
453 *
454 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
455 *
456 * which is precisely what we want.
457 */
458#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
459#endif
460
461#endif /* _ASM_IA64_IO_H */ 437#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 1ea28bcee33b..59c17e446683 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,6 +11,7 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
14 15
15/* forward declarations: */ 16/* forward declarations: */
16struct device; 17struct device;
@@ -298,27 +299,6 @@ extern void machvec_init_from_cmdline(const char *cmdline);
298# endif /* CONFIG_IA64_GENERIC */ 299# endif /* CONFIG_IA64_GENERIC */
299 300
300/* 301/*
301 * Declare default routines which aren't declared anywhere else:
302 */
303extern ia64_mv_dma_init swiotlb_init;
304extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
305extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
306extern ia64_mv_dma_map_single swiotlb_map_single;
307extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
308extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
309extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
310extern ia64_mv_dma_map_sg swiotlb_map_sg;
311extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
312extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
313extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
314extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
315extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
316extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
317extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
318extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
319extern ia64_mv_dma_supported swiotlb_dma_supported;
320
321/*
322 * Define default versions so we can extend machvec for new platforms without having 302 * Define default versions so we can extend machvec for new platforms without having
323 * to update the machvec files for all existing platforms. 303 * to update the machvec files for all existing platforms.
324 */ 304 */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 6bc96ee54327..c0cea375620a 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -48,7 +48,6 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
48 */ 48 */
49#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) 49#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
50#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) 50#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
51#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
52 51
53#ifdef CONFIG_NUMA 52#ifdef CONFIG_NUMA
54 extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); 53 extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
index ea310c0812aa..966797a97c94 100644
--- a/arch/ia64/include/asm/sal.h
+++ b/arch/ia64/include/asm/sal.h
@@ -337,11 +337,24 @@ typedef struct sal_log_record_header {
337#define sal_log_severity_fatal 1 337#define sal_log_severity_fatal 1
338#define sal_log_severity_corrected 2 338#define sal_log_severity_corrected 2
339 339
340/*
341 * Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3
342 * Error Section Error_Recovery_Info Field Definition.
343 */
344#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */
345#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */
346#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */
347#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */
348#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */
349#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */
350#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */
351
340/* Definition of log section header structures */ 352/* Definition of log section header structures */
341typedef struct sal_log_sec_header { 353typedef struct sal_log_sec_header {
342 efi_guid_t guid; /* Unique Section ID */ 354 efi_guid_t guid; /* Unique Section ID */
343 sal_log_revision_t revision; /* Major and Minor revision of Section */ 355 sal_log_revision_t revision; /* Major and Minor revision of Section */
344 u16 reserved; 356 u8 error_recovery_info; /* Platform error recovery status */
357 u8 reserved;
345 u32 len; /* Section length */ 358 u32 len; /* Section length */
346} sal_log_section_hdr_t; 359} sal_log_section_hdr_t;
347 360
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h
index 57e649d388b8..e310fc0135dc 100644
--- a/arch/ia64/include/asm/sn/sn_sal.h
+++ b/arch/ia64/include/asm/sn/sn_sal.h
@@ -90,6 +90,8 @@
90#define SN_SAL_SET_CPU_NUMBER 0x02000068 90#define SN_SAL_SET_CPU_NUMBER 0x02000068
91 91
92#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 92#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
93#define SN_SAL_WATCHLIST_ALLOC 0x02000070
94#define SN_SAL_WATCHLIST_FREE 0x02000071
93 95
94/* 96/*
95 * Service-specific constants 97 * Service-specific constants
@@ -1185,4 +1187,47 @@ ia64_sn_kernel_launch_event(void)
1185 SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); 1187 SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
1186 return rv.status; 1188 return rv.status;
1187} 1189}
1190
1191union sn_watchlist_u {
1192 u64 val;
1193 struct {
1194 u64 blade : 16,
1195 size : 32,
1196 filler : 16;
1197 };
1198};
1199
1200static inline int
1201sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
1202 unsigned long *intr_mmr_offset)
1203{
1204 struct ia64_sal_retval rv;
1205 unsigned long addr;
1206 union sn_watchlist_u size_blade;
1207 int watchlist;
1208
1209 addr = (unsigned long)mq;
1210 size_blade.size = mq_size;
1211 size_blade.blade = blade;
1212
1213 /*
1214 * bios returns watchlist number or negative error number.
1215 */
1216 ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
1217 size_blade.val, (u64)intr_mmr_offset,
1218 (u64)&watchlist, 0, 0, 0);
1219 if (rv.status < 0)
1220 return rv.status;
1221
1222 return watchlist;
1223}
1224
1225static inline int
1226sn_mq_watchlist_free(int blade, int watchlist_num)
1227{
1228 struct ia64_sal_retval rv;
1229 ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
1230 watchlist_num, 0, 0, 0, 0, 0);
1231 return rv.status;
1232}
1188#endif /* _ASM_IA64_SN_SN_SAL_H */ 1233#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 0635015d0aaa..bd7acc71e8a9 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -678,6 +678,30 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
678 return 0; 678 return 0;
679} 679}
680 680
681int __init early_acpi_boot_init(void)
682{
683 int ret;
684
685 /*
686 * do a partial walk of MADT to determine how many CPUs
687 * we have including offline CPUs
688 */
689 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
690 printk(KERN_ERR PREFIX "Can't find MADT\n");
691 return 0;
692 }
693
694 ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
695 acpi_parse_lsapic, NR_CPUS);
696 if (ret < 1)
697 printk(KERN_ERR PREFIX
698 "Error parsing MADT - no LAPIC entries\n");
699
700 return 0;
701}
702
703
704
681int __init acpi_boot_init(void) 705int __init acpi_boot_init(void)
682{ 706{
683 707
@@ -701,11 +725,6 @@ int __init acpi_boot_init(void)
701 printk(KERN_ERR PREFIX 725 printk(KERN_ERR PREFIX
702 "Error parsing LAPIC address override entry\n"); 726 "Error parsing LAPIC address override entry\n");
703 727
704 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
705 < 1)
706 printk(KERN_ERR PREFIX
707 "Error parsing MADT - no LAPIC entries\n");
708
709 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) 728 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
710 < 0) 729 < 0)
711 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 730 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 031abbf9c875..dbdb778efa05 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -12,13 +12,11 @@
12#include <asm/machvec.h> 12#include <asm/machvec.h>
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14 14
15#include <asm/machvec.h>
16#include <asm/system.h> 15#include <asm/system.h>
17 16
18#ifdef CONFIG_DMAR 17#ifdef CONFIG_DMAR
19 18
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/string.h>
22 20
23#include <asm/page.h> 21#include <asm/page.h>
24#include <asm/iommu.h> 22#include <asm/iommu.h>
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ae7911702bf8..865af27c7737 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -359,7 +359,7 @@ reserve_memory (void)
359 } 359 }
360#endif 360#endif
361 361
362#ifdef CONFIG_CRASH_KERNEL 362#ifdef CONFIG_CRASH_DUMP
363 if (reserve_elfcorehdr(&rsvd_region[n].start, 363 if (reserve_elfcorehdr(&rsvd_region[n].start,
364 &rsvd_region[n].end) == 0) 364 &rsvd_region[n].end) == 0)
365 n++; 365 n++;
@@ -561,8 +561,12 @@ setup_arch (char **cmdline_p)
561#ifdef CONFIG_ACPI 561#ifdef CONFIG_ACPI
562 /* Initialize the ACPI boot-time table parser */ 562 /* Initialize the ACPI boot-time table parser */
563 acpi_table_init(); 563 acpi_table_init();
564 early_acpi_boot_init();
564# ifdef CONFIG_ACPI_NUMA 565# ifdef CONFIG_ACPI_NUMA
565 acpi_numa_init(); 566 acpi_numa_init();
567#ifdef CONFIG_ACPI_HOTPLUG_CPU
568 prefill_possible_map();
569#endif
566 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 570 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
567 32 : cpus_weight(early_cpu_possible_map)), 571 32 : cpus_weight(early_cpu_possible_map)),
568 additional_cpus > 0 ? additional_cpus : 0); 572 additional_cpus > 0 ? additional_cpus : 0);
@@ -853,9 +857,6 @@ void __init
853setup_per_cpu_areas (void) 857setup_per_cpu_areas (void)
854{ 858{
855 /* start_kernel() requires this... */ 859 /* start_kernel() requires this... */
856#ifdef CONFIG_ACPI_HOTPLUG_CPU
857 prefill_possible_map();
858#endif
859} 860}
860 861
861/* 862/*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d8c5fcd89e5b..d85ba98d9008 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -635,7 +635,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
635 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; 635 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
636#endif 636#endif
637 start = GRANULEROUNDDOWN(start); 637 start = GRANULEROUNDDOWN(start);
638 start = ORDERROUNDDOWN(start);
639 end = GRANULEROUNDUP(end); 638 end = GRANULEROUNDUP(end);
640 mem_data[node].max_pfn = max(mem_data[node].max_pfn, 639 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
641 end >> PAGE_SHIFT); 640 end >> PAGE_SHIFT);
diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c
index cf5f28ae96c4..7a5ae633198b 100644
--- a/arch/ia64/uv/kernel/setup.c
+++ b/arch/ia64/uv/kernel/setup.c
@@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
19 19
20#ifdef CONFIG_IA64_SGI_UV 20#ifdef CONFIG_IA64_SGI_UV
21int sn_prom_type; 21int sn_prom_type;
22long sn_partition_id;
23EXPORT_SYMBOL(sn_partition_id);
24long sn_coherency_id;
25EXPORT_SYMBOL_GPL(sn_coherency_id);
26long sn_region_size;
27EXPORT_SYMBOL(sn_region_size);
22#endif 28#endif
23 29
24struct redir_addr { 30struct redir_addr {
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index fdf088f2430e..7b4cefa2199b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -16,6 +16,7 @@
16#include <linux/kexec.h> 16#include <linux/kexec.h>
17#include <linux/crash_dump.h> 17#include <linux/crash_dump.h>
18 18
19#include <asm/kexec.h>
19#include <asm/reg.h> 20#include <asm/reg.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 31481dc485de..7190493e9bdc 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -189,7 +189,6 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
189{ 189{
190 struct pci_controller *phb; 190 struct pci_controller *phb;
191 int primary; 191 int primary;
192 struct pci_bus *b;
193 192
194 primary = list_empty(&hose_list); 193 primary = list_empty(&hose_list);
195 phb = pcibios_alloc_controller(dn); 194 phb = pcibios_alloc_controller(dn);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e60c59b81bdd..4cf0ab13d187 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1494,7 +1494,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1494 def_bool X86_64 1494 def_bool X86_64
1495 depends on NUMA 1495 depends on NUMA
1496 1496
1497menu "Power management options" 1497menu "Power management and ACPI options"
1498 depends on !X86_VOYAGER 1498 depends on !X86_VOYAGER
1499 1499
1500config ARCH_HIBERNATION_HEADER 1500config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index d843ed0e9b2e..0005adb0f941 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -101,30 +101,22 @@
101#define LAST_VM86_IRQ 15 101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 103
104#ifdef CONFIG_X86_64 104#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
105# if NR_CPUS < MAX_IO_APICS 105# if NR_CPUS < MAX_IO_APICS
106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) 106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
107# else 107# else
108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) 108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
109# endif 109# endif
110 110
111#elif !defined(CONFIG_X86_VOYAGER) 111#elif defined(CONFIG_X86_VOYAGER)
112 112
113# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) 113# define NR_IRQS 224
114
115# define NR_IRQS 224
116
117# else /* IO_APIC || PARAVIRT */
118
119# define NR_IRQS 16
120
121# endif
122 114
123#else /* !VISWS && !VOYAGER */ 115#else /* IO_APIC || VOYAGER */
124 116
125# define NR_IRQS 224 117# define NR_IRQS 16
126 118
127#endif /* VISWS */ 119#endif
128 120
129/* Voyager specific defines */ 121/* Voyager specific defines */
130/* These define the CPIs we use in linux */ 122/* These define the CPIs we use in linux */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 46be2fa7ac26..c2a812ebde89 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -108,9 +108,7 @@ static __always_inline unsigned long long __native_read_tsc(void)
108{ 108{
109 DECLARE_ARGS(val, low, high); 109 DECLARE_ARGS(val, low, high);
110 110
111 rdtsc_barrier();
112 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); 111 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
113 rdtsc_barrier();
114 112
115 return EAX_EDX_VAL(val, low, high); 113 return EAX_EDX_VAL(val, low, high);
116} 114}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 90ac7718469a..4850e4b02b61 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -154,7 +154,7 @@ extern unsigned long node_remap_size[];
154 154
155#endif 155#endif
156 156
157/* sched_domains SD_NODE_INIT for NUMAQ machines */ 157/* sched_domains SD_NODE_INIT for NUMA machines */
158#define SD_NODE_INIT (struct sched_domain) { \ 158#define SD_NODE_INIT (struct sched_domain) { \
159 .min_interval = 8, \ 159 .min_interval = 8, \
160 .max_interval = 32, \ 160 .max_interval = 32, \
@@ -169,8 +169,9 @@ extern unsigned long node_remap_size[];
169 .flags = SD_LOAD_BALANCE \ 169 .flags = SD_LOAD_BALANCE \
170 | SD_BALANCE_EXEC \ 170 | SD_BALANCE_EXEC \
171 | SD_BALANCE_FORK \ 171 | SD_BALANCE_FORK \
172 | SD_SERIALIZE \ 172 | SD_WAKE_AFFINE \
173 | SD_WAKE_BALANCE, \ 173 | SD_WAKE_BALANCE \
174 | SD_SERIALIZE, \
174 .last_balance = jiffies, \ 175 .last_balance = jiffies, \
175 .balance_interval = 1, \ 176 .balance_interval = 1, \
176} 177}
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 38ae163cc91b..9cd83a8e40d5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -34,6 +34,8 @@ static inline cycles_t get_cycles(void)
34 34
35static __always_inline cycles_t vget_cycles(void) 35static __always_inline cycles_t vget_cycles(void)
36{ 36{
37 cycles_t cycles;
38
37 /* 39 /*
38 * We only do VDSOs on TSC capable CPUs, so this shouldnt 40 * We only do VDSOs on TSC capable CPUs, so this shouldnt
39 * access boot_cpu_data (which is not VDSO-safe): 41 * access boot_cpu_data (which is not VDSO-safe):
@@ -42,7 +44,11 @@ static __always_inline cycles_t vget_cycles(void)
42 if (!cpu_has_tsc) 44 if (!cpu_has_tsc)
43 return 0; 45 return 0;
44#endif 46#endif
45 return (cycles_t)__native_read_tsc(); 47 rdtsc_barrier();
48 cycles = (cycles_t)__native_read_tsc();
49 rdtsc_barrier();
50
51 return cycles;
46} 52}
47 53
48extern void tsc_init(void); 54extern void tsc_init(void);
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index 9c811d2e6f91..b3e647307625 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -520,6 +520,7 @@ extern void voyager_restart(void);
520extern void voyager_cat_power_off(void); 520extern void voyager_cat_power_off(void);
521extern void voyager_cat_do_common_interrupt(void); 521extern void voyager_cat_do_common_interrupt(void);
522extern void voyager_handle_nmi(void); 522extern void voyager_handle_nmi(void);
523extern void voyager_smp_intr_init(void);
523/* Commands for the following are */ 524/* Commands for the following are */
524#define VOYAGER_PSI_READ 0 525#define VOYAGER_PSI_READ 0
525#define VOYAGER_PSI_WRITE 1 526#define VOYAGER_PSI_WRITE 1
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a8fd9ebdc8e2..331b318304eb 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -50,7 +50,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
50/* returns !0 if the IOMMU is caching non-present entries in its TLB */ 50/* returns !0 if the IOMMU is caching non-present entries in its TLB */
51static int iommu_has_npcache(struct amd_iommu *iommu) 51static int iommu_has_npcache(struct amd_iommu *iommu)
52{ 52{
53 return iommu->cap & IOMMU_CAP_NPCACHE; 53 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
54} 54}
55 55
56/**************************************************************************** 56/****************************************************************************
@@ -536,6 +536,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
536{ 536{
537 address >>= PAGE_SHIFT; 537 address >>= PAGE_SHIFT;
538 iommu_area_free(dom->bitmap, address, pages); 538 iommu_area_free(dom->bitmap, address, pages);
539
540 if (address + pages >= dom->next_bit)
541 dom->need_flush = true;
539} 542}
540 543
541/**************************************************************************** 544/****************************************************************************
@@ -992,8 +995,10 @@ static void __unmap_single(struct amd_iommu *iommu,
992 995
993 dma_ops_free_addresses(dma_dom, dma_addr, pages); 996 dma_ops_free_addresses(dma_dom, dma_addr, pages);
994 997
995 if (amd_iommu_unmap_flush) 998 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
996 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); 999 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
1000 dma_dom->need_flush = false;
1001 }
997} 1002}
998 1003
999/* 1004/*
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index b764d7429c61..7a3f2028e2eb 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -3611,6 +3611,8 @@ int __init probe_nr_irqs(void)
3611 /* something wrong ? */ 3611 /* something wrong ? */
3612 if (nr < nr_min) 3612 if (nr < nr_min)
3613 nr = nr_min; 3613 nr = nr_min;
3614 if (WARN_ON(nr > NR_IRQS))
3615 nr = NR_IRQS;
3614 3616
3615 return nr; 3617 return nr;
3616} 3618}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f4c93f1cfc19..724adfc63cb9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,11 +29,7 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32/* 32enum reboot_type reboot_type = BOOT_KBD;
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
37int reboot_force; 33int reboot_force;
38 34
39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index e00534b33534..f4049f3513b6 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
154 flush_mm = mm; 154 flush_mm = mm;
155 flush_va = va; 155 flush_va = va;
156 cpus_or(flush_cpumask, cpumask, flush_cpumask); 156 cpus_or(flush_cpumask, cpumask, flush_cpumask);
157
158 /*
159 * Make the above memory operations globally visible before
160 * sending the IPI.
161 */
162 smp_mb();
157 /* 163 /*
158 * We have to send the IPI only to 164 * We have to send the IPI only to
159 * CPUs affected. 165 * CPUs affected.
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index dcbf7a1159ea..8f919ca69494 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); 183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
184 184
185 /* 185 /*
186 * Make the above memory operations globally visible before
187 * sending the IPI.
188 */
189 smp_mb();
190 /*
186 * We have to send the IPI only to 191 * We have to send the IPI only to
187 * CPUs affected. 192 * CPUs affected.
188 */ 193 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 62348e4fd8d1..424093b157d3 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -55,7 +55,7 @@ u64 native_sched_clock(void)
55 rdtscll(this_offset); 55 rdtscll(this_offset);
56 56
57 /* return the value in ns */ 57 /* return the value in ns */
58 return cycles_2_ns(this_offset); 58 return __cycles_2_ns(this_offset);
59} 59}
60 60
61/* We need to define a real function for sched_clock, to override the 61/* We need to define a real function for sched_clock, to override the
@@ -813,10 +813,6 @@ void __init tsc_init(void)
813 cpu_khz = calibrate_cpu(); 813 cpu_khz = calibrate_cpu();
814#endif 814#endif
815 815
816 lpj = ((u64)tsc_khz * 1000);
817 do_div(lpj, HZ);
818 lpj_fine = lpj;
819
820 printk("Detected %lu.%03lu MHz processor.\n", 816 printk("Detected %lu.%03lu MHz processor.\n",
821 (unsigned long)cpu_khz / 1000, 817 (unsigned long)cpu_khz / 1000,
822 (unsigned long)cpu_khz % 1000); 818 (unsigned long)cpu_khz % 1000);
@@ -836,6 +832,10 @@ void __init tsc_init(void)
836 /* now allow native_sched_clock() to use rdtsc */ 832 /* now allow native_sched_clock() to use rdtsc */
837 tsc_disabled = 0; 833 tsc_disabled = 0;
838 834
835 lpj = ((u64)tsc_khz * 1000);
836 do_div(lpj, HZ);
837 lpj_fine = lpj;
838
839 use_tsc_delay(); 839 use_tsc_delay();
840 /* Check and install the TSC clocksource */ 840 /* Check and install the TSC clocksource */
841 dmi_check_system(bad_tsc_dmi_table); 841 dmi_check_system(bad_tsc_dmi_table);
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 6bbdd633864c..a580b9562e76 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -27,7 +27,7 @@ static struct irqaction irq2 = {
27void __init intr_init_hook(void) 27void __init intr_init_hook(void)
28{ 28{
29#ifdef CONFIG_SMP 29#ifdef CONFIG_SMP
30 smp_intr_init(); 30 voyager_smp_intr_init();
31#endif 31#endif
32 32
33 setup_irq(2, &irq2); 33 setup_irq(2, &irq2);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 7f4c6af14351..0e331652681e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1258,7 +1258,7 @@ static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1258#define QIC_SET_GATE(cpi, vector) \ 1258#define QIC_SET_GATE(cpi, vector) \
1259 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) 1259 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1260 1260
1261void __init smp_intr_init(void) 1261void __init voyager_smp_intr_init(void)
1262{ 1262{
1263 int i; 1263 int i;
1264 1264
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f1dc1b75d166..e89d24815f26 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -67,18 +67,18 @@ static void split_page_count(int level)
67 67
68void arch_report_meminfo(struct seq_file *m) 68void arch_report_meminfo(struct seq_file *m)
69{ 69{
70 seq_printf(m, "DirectMap4k: %8lu kB\n", 70 seq_printf(m, "DirectMap4k: %8lu kB\n",
71 direct_pages_count[PG_LEVEL_4K] << 2); 71 direct_pages_count[PG_LEVEL_4K] << 2);
72#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 72#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
73 seq_printf(m, "DirectMap2M: %8lu kB\n", 73 seq_printf(m, "DirectMap2M: %8lu kB\n",
74 direct_pages_count[PG_LEVEL_2M] << 11); 74 direct_pages_count[PG_LEVEL_2M] << 11);
75#else 75#else
76 seq_printf(m, "DirectMap4M: %8lu kB\n", 76 seq_printf(m, "DirectMap4M: %8lu kB\n",
77 direct_pages_count[PG_LEVEL_2M] << 12); 77 direct_pages_count[PG_LEVEL_2M] << 12);
78#endif 78#endif
79#ifdef CONFIG_X86_64 79#ifdef CONFIG_X86_64
80 if (direct_gbpages) 80 if (direct_gbpages)
81 seq_printf(m, "DirectMap1G: %8lu kB\n", 81 seq_printf(m, "DirectMap1G: %8lu kB\n",
82 direct_pages_count[PG_LEVEL_1G] << 20); 82 direct_pages_count[PG_LEVEL_1G] << 20);
83#endif 83#endif
84} 84}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 0620d6d45f7d..3f1b81a83e2e 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -27,8 +27,7 @@ static int num_counters = 2;
27static int counter_width = 32; 27static int counter_width = 32;
28 28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) 30#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
31#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
32 31
33#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) 32#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
34#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) 33#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@@ -124,14 +123,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
124static int ppro_check_ctrs(struct pt_regs * const regs, 123static int ppro_check_ctrs(struct pt_regs * const regs,
125 struct op_msrs const * const msrs) 124 struct op_msrs const * const msrs)
126{ 125{
127 unsigned int low, high; 126 u64 val;
128 int i; 127 int i;
129 128
130 for (i = 0 ; i < num_counters; ++i) { 129 for (i = 0 ; i < num_counters; ++i) {
131 if (!reset_value[i]) 130 if (!reset_value[i])
132 continue; 131 continue;
133 CTR_READ(low, high, msrs, i); 132 rdmsrl(msrs->counters[i].addr, val);
134 if (CTR_OVERFLOWED(low)) { 133 if (CTR_OVERFLOWED(val)) {
135 oprofile_add_sample(regs, i); 134 oprofile_add_sample(regs, i);
136 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 135 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
137 } 136 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b61534c7a4c4..5e4686d70f62 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
863 if (PagePinned(virt_to_page(mm->pgd))) { 863 if (PagePinned(virt_to_page(mm->pgd))) {
864 SetPagePinned(page); 864 SetPagePinned(page);
865 865
866 vm_unmap_aliases();
866 if (!PageHighMem(page)) { 867 if (!PageHighMem(page)) {
867 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 868 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
868 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 869 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
869 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 870 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
870 } else 871 } else {
871 /* make sure there are no stray mappings of 872 /* make sure there are no stray mappings of
872 this page */ 873 this page */
873 kmap_flush_unused(); 874 kmap_flush_unused();
874 vm_unmap_aliases(); 875 }
875 } 876 }
876} 877}
877 878
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index aba77b2b7d18..688936044dc9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
850 read-only, and can be pinned. */ 850 read-only, and can be pinned. */
851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 851static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
852{ 852{
853 vm_unmap_aliases();
854
853 xen_mc_batch(); 855 xen_mc_batch();
854 856
855 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
856 /* re-enable interrupts for kmap_flush_unused */ 858 /* re-enable interrupts for flushing */
857 xen_mc_issue(0); 859 xen_mc_issue(0);
860
858 kmap_flush_unused(); 861 kmap_flush_unused();
859 vm_unmap_aliases(); 862
860 xen_mc_batch(); 863 xen_mc_batch();
861 } 864 }
862 865
@@ -874,7 +877,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
874#else /* CONFIG_X86_32 */ 877#else /* CONFIG_X86_32 */
875#ifdef CONFIG_X86_PAE 878#ifdef CONFIG_X86_PAE
876 /* Need to make sure unshared kernel PMD is pinnable */ 879 /* Need to make sure unshared kernel PMD is pinnable */
877 xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), 880 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
878 PT_PMD); 881 PT_PMD);
879#endif 882#endif
880 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); 883 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
@@ -991,7 +994,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
991 994
992#ifdef CONFIG_X86_PAE 995#ifdef CONFIG_X86_PAE
993 /* Need to make sure unshared kernel PMD is unpinned */ 996 /* Need to make sure unshared kernel PMD is unpinned */
994 xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), 997 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
995 PT_PMD); 998 PT_PMD);
996#endif 999#endif
997 1000
diff --git a/block/blk-core.c b/block/blk-core.c
index c3df30cfb3fc..10e8a64a5a5b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1770,8 +1770,6 @@ static void end_that_request_last(struct request *req, int error)
1770{ 1770{
1771 struct gendisk *disk = req->rq_disk; 1771 struct gendisk *disk = req->rq_disk;
1772 1772
1773 blk_delete_timer(req);
1774
1775 if (blk_rq_tagged(req)) 1773 if (blk_rq_tagged(req))
1776 blk_queue_end_tag(req->q, req); 1774 blk_queue_end_tag(req->q, req);
1777 1775
@@ -1781,6 +1779,8 @@ static void end_that_request_last(struct request *req, int error)
1781 if (unlikely(laptop_mode) && blk_fs_request(req)) 1779 if (unlikely(laptop_mode) && blk_fs_request(req))
1782 laptop_io_completion(); 1780 laptop_io_completion();
1783 1781
1782 blk_delete_timer(req);
1783
1784 /* 1784 /*
1785 * Account IO completion. bar_rq isn't accounted as a normal 1785 * Account IO completion. bar_rq isn't accounted as a normal
1786 * IO on queueing nor completion. Accounting the containing 1786 * IO on queueing nor completion. Accounting the containing
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8681cd6f9911..b92f5b0866b0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -222,27 +222,6 @@ new_segment:
222} 222}
223EXPORT_SYMBOL(blk_rq_map_sg); 223EXPORT_SYMBOL(blk_rq_map_sg);
224 224
225static inline int ll_new_mergeable(struct request_queue *q,
226 struct request *req,
227 struct bio *bio)
228{
229 int nr_phys_segs = bio_phys_segments(q, bio);
230
231 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
232 req->cmd_flags |= REQ_NOMERGE;
233 if (req == q->last_merge)
234 q->last_merge = NULL;
235 return 0;
236 }
237
238 /*
239 * A hw segment is just getting larger, bump just the phys
240 * counter.
241 */
242 req->nr_phys_segments += nr_phys_segs;
243 return 1;
244}
245
246static inline int ll_new_hw_segment(struct request_queue *q, 225static inline int ll_new_hw_segment(struct request_queue *q,
247 struct request *req, 226 struct request *req,
248 struct bio *bio) 227 struct bio *bio)
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 972a63f848fb..69185ea9fae2 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -75,14 +75,7 @@ void blk_delete_timer(struct request *req)
75{ 75{
76 struct request_queue *q = req->q; 76 struct request_queue *q = req->q;
77 77
78 /*
79 * Nothing to detach
80 */
81 if (!q->rq_timed_out_fn || !req->deadline)
82 return;
83
84 list_del_init(&req->timeout_list); 78 list_del_init(&req->timeout_list);
85
86 if (list_empty(&q->timeout_list)) 79 if (list_empty(&q->timeout_list))
87 del_timer(&q->timeout); 80 del_timer(&q->timeout);
88} 81}
@@ -142,7 +135,7 @@ void blk_rq_timed_out_timer(unsigned long data)
142 } 135 }
143 136
144 if (next_set && !list_empty(&q->timeout_list)) 137 if (next_set && !list_empty(&q->timeout_list))
145 mod_timer(&q->timeout, round_jiffies(next)); 138 mod_timer(&q->timeout, round_jiffies_up(next));
146 139
147 spin_unlock_irqrestore(q->queue_lock, flags); 140 spin_unlock_irqrestore(q->queue_lock, flags);
148} 141}
@@ -198,17 +191,10 @@ void blk_add_timer(struct request *req)
198 191
199 /* 192 /*
200 * If the timer isn't already pending or this timeout is earlier 193 * If the timer isn't already pending or this timeout is earlier
201 * than an existing one, modify the timer. Round to next nearest 194 * than an existing one, modify the timer. Round up to next nearest
202 * second. 195 * second.
203 */ 196 */
204 expiry = round_jiffies(req->deadline); 197 expiry = round_jiffies_up(req->deadline);
205
206 /*
207 * We use ->deadline == 0 to detect whether a timer was added or
208 * not, so just increase to next jiffy for that specific case
209 */
210 if (unlikely(!req->deadline))
211 req->deadline = 1;
212 198
213 if (!timer_pending(&q->timeout) || 199 if (!timer_pending(&q->timeout) ||
214 time_before(expiry, q->timeout.expires)) 200 time_before(expiry, q->timeout.expires))
diff --git a/block/elevator.c b/block/elevator.c
index 59173a69ebdf..9ac82dde99dd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -773,12 +773,6 @@ struct request *elv_next_request(struct request_queue *q)
773 */ 773 */
774 rq->cmd_flags |= REQ_STARTED; 774 rq->cmd_flags |= REQ_STARTED;
775 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 775 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
776
777 /*
778 * We are now handing the request to the hardware,
779 * add the timeout handler
780 */
781 blk_add_timer(rq);
782 } 776 }
783 777
784 if (!q->boundary_rq || q->boundary_rq == rq) { 778 if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -850,6 +844,12 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
850 */ 844 */
851 if (blk_account_rq(rq)) 845 if (blk_account_rq(rq))
852 q->in_flight++; 846 q->in_flight++;
847
848 /*
849 * We are now handing the request to the hardware, add the
850 * timeout handler.
851 */
852 blk_add_timer(rq);
853} 853}
854EXPORT_SYMBOL(elv_dequeue_request); 854EXPORT_SYMBOL(elv_dequeue_request);
855 855
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 82af7011f2dd..0cd3ad497136 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1712,6 +1712,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1712 else 1712 else
1713 tag = 0; 1713 tag = 0;
1714 1714
1715 if (test_and_set_bit(tag, &ap->qc_allocated))
1716 BUG();
1715 qc = __ata_qc_from_tag(ap, tag); 1717 qc = __ata_qc_from_tag(ap, tag);
1716 1718
1717 qc->tag = tag; 1719 qc->tag = tag;
@@ -4024,6 +4026,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4024 4026
4025 /* Weird ATAPI devices */ 4027 /* Weird ATAPI devices */
4026 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4028 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4029 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4027 4030
4028 /* Devices we expect to fail diagnostics */ 4031 /* Devices we expect to fail diagnostics */
4029 4032
@@ -4444,7 +4447,8 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
4444 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4447 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4445 * few ATAPI devices choke on such DMA requests. 4448 * few ATAPI devices choke on such DMA requests.
4446 */ 4449 */
4447 if (unlikely(qc->nbytes & 15)) 4450 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4451 unlikely(qc->nbytes & 15))
4448 return 1; 4452 return 1;
4449 4453
4450 if (ap->ops->check_atapi_dma) 4454 if (ap->ops->check_atapi_dma)
@@ -4561,6 +4565,37 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4561} 4565}
4562 4566
4563/** 4567/**
4568 * ata_qc_new - Request an available ATA command, for queueing
4569 * @ap: Port associated with device @dev
4570 * @dev: Device from whom we request an available command structure
4571 *
4572 * LOCKING:
4573 * None.
4574 */
4575
4576static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4577{
4578 struct ata_queued_cmd *qc = NULL;
4579 unsigned int i;
4580
4581 /* no command while frozen */
4582 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4583 return NULL;
4584
4585 /* the last tag is reserved for internal command. */
4586 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4587 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4588 qc = __ata_qc_from_tag(ap, i);
4589 break;
4590 }
4591
4592 if (qc)
4593 qc->tag = i;
4594
4595 return qc;
4596}
4597
4598/**
4564 * ata_qc_new_init - Request an available ATA command, and initialize it 4599 * ata_qc_new_init - Request an available ATA command, and initialize it
4565 * @dev: Device from whom we request an available command structure 4600 * @dev: Device from whom we request an available command structure
4566 * @tag: command tag 4601 * @tag: command tag
@@ -4569,20 +4604,16 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4569 * None. 4604 * None.
4570 */ 4605 */
4571 4606
4572struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 4607struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4573{ 4608{
4574 struct ata_port *ap = dev->link->ap; 4609 struct ata_port *ap = dev->link->ap;
4575 struct ata_queued_cmd *qc; 4610 struct ata_queued_cmd *qc;
4576 4611
4577 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4612 qc = ata_qc_new(ap);
4578 return NULL;
4579
4580 qc = __ata_qc_from_tag(ap, tag);
4581 if (qc) { 4613 if (qc) {
4582 qc->scsicmd = NULL; 4614 qc->scsicmd = NULL;
4583 qc->ap = ap; 4615 qc->ap = ap;
4584 qc->dev = dev; 4616 qc->dev = dev;
4585 qc->tag = tag;
4586 4617
4587 ata_qc_reinit(qc); 4618 ata_qc_reinit(qc);
4588 } 4619 }
@@ -4590,6 +4621,31 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4590 return qc; 4621 return qc;
4591} 4622}
4592 4623
4624/**
4625 * ata_qc_free - free unused ata_queued_cmd
4626 * @qc: Command to complete
4627 *
4628 * Designed to free unused ata_queued_cmd object
4629 * in case something prevents using it.
4630 *
4631 * LOCKING:
4632 * spin_lock_irqsave(host lock)
4633 */
4634void ata_qc_free(struct ata_queued_cmd *qc)
4635{
4636 struct ata_port *ap = qc->ap;
4637 unsigned int tag;
4638
4639 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4640
4641 qc->flags = 0;
4642 tag = qc->tag;
4643 if (likely(ata_tag_valid(tag))) {
4644 qc->tag = ATA_TAG_POISON;
4645 clear_bit(tag, &ap->qc_allocated);
4646 }
4647}
4648
4593void __ata_qc_complete(struct ata_queued_cmd *qc) 4649void __ata_qc_complete(struct ata_queued_cmd *qc)
4594{ 4650{
4595 struct ata_port *ap = qc->ap; 4651 struct ata_port *ap = qc->ap;
@@ -5934,7 +5990,7 @@ static void ata_port_detach(struct ata_port *ap)
5934 * to us. Restore SControl and disable all existing devices. 5990 * to us. Restore SControl and disable all existing devices.
5935 */ 5991 */
5936 __ata_port_for_each_link(link, ap) { 5992 __ata_port_for_each_link(link, ap) {
5937 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol); 5993 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
5938 ata_link_for_each_dev(dev, link) 5994 ata_link_for_each_dev(dev, link)
5939 ata_dev_disable(dev); 5995 ata_dev_disable(dev);
5940 } 5996 }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index bbb30d882f05..47c7afcb36f2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -190,7 +190,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
190 struct ata_port *ap; 190 struct ata_port *ap;
191 struct ata_link *link; 191 struct ata_link *link;
192 struct ata_device *dev; 192 struct ata_device *dev;
193 unsigned long flags; 193 unsigned long flags, now;
194 unsigned int uninitialized_var(msecs); 194 unsigned int uninitialized_var(msecs);
195 int rc = 0; 195 int rc = 0;
196 196
@@ -208,10 +208,11 @@ static ssize_t ata_scsi_park_show(struct device *device,
208 } 208 }
209 209
210 link = dev->link; 210 link = dev->link;
211 now = jiffies;
211 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && 212 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
212 link->eh_context.unloaded_mask & (1 << dev->devno) && 213 link->eh_context.unloaded_mask & (1 << dev->devno) &&
213 time_after(dev->unpark_deadline, jiffies)) 214 time_after(dev->unpark_deadline, now))
214 msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies); 215 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
215 else 216 else
216 msecs = 0; 217 msecs = 0;
217 218
@@ -708,11 +709,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
708{ 709{
709 struct ata_queued_cmd *qc; 710 struct ata_queued_cmd *qc;
710 711
711 if (cmd->request->tag != -1) 712 qc = ata_qc_new_init(dev);
712 qc = ata_qc_new_init(dev, cmd->request->tag);
713 else
714 qc = ata_qc_new_init(dev, 0);
715
716 if (qc) { 713 if (qc) {
717 qc->scsicmd = cmd; 714 qc->scsicmd = cmd;
718 qc->scsidone = done; 715 qc->scsidone = done;
@@ -1107,17 +1104,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1107 1104
1108 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 1105 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1109 depth = min(ATA_MAX_QUEUE - 1, depth); 1106 depth = min(ATA_MAX_QUEUE - 1, depth);
1110 1107 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1111 /*
1112 * If this device is behind a port multiplier, we have
1113 * to share the tag map between all devices on that PMP.
1114 * Set up the shared tag map here and we get automatic.
1115 */
1116 if (dev->link->ap->pmp_link)
1117 scsi_init_shared_tag_map(sdev->host, ATA_MAX_QUEUE - 1);
1118
1119 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1120 scsi_activate_tcq(sdev, depth);
1121 } 1108 }
1122 1109
1123 return 0; 1110 return 0;
@@ -1957,11 +1944,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1957 hdr[1] |= (1 << 7); 1944 hdr[1] |= (1 << 7);
1958 1945
1959 memcpy(rbuf, hdr, sizeof(hdr)); 1946 memcpy(rbuf, hdr, sizeof(hdr));
1960
1961 /* if ncq, set tags supported */
1962 if (ata_id_has_ncq(args->id))
1963 rbuf[7] |= (1 << 1);
1964
1965 memcpy(&rbuf[8], "ATA ", 8); 1947 memcpy(&rbuf[8], "ATA ", 8);
1966 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1948 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1967 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1949 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index d3831d39bdaa..fe2839e58774 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -74,7 +74,7 @@ extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
74extern void ata_force_cbl(struct ata_port *ap); 74extern void ata_force_cbl(struct ata_port *ap);
75extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); 75extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
76extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); 76extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
77extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); 77extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
78extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 78extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
79 u64 block, u32 n_block, unsigned int tf_flags, 79 u64 block, u32 n_block, unsigned int tf_flags,
80 unsigned int tag); 80 unsigned int tag);
@@ -103,6 +103,7 @@ extern int ata_dev_configure(struct ata_device *dev);
103extern int sata_down_spd_limit(struct ata_link *link); 103extern int sata_down_spd_limit(struct ata_link *link);
104extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); 104extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
105extern void ata_sg_clean(struct ata_queued_cmd *qc); 105extern void ata_sg_clean(struct ata_queued_cmd *qc);
106extern void ata_qc_free(struct ata_queued_cmd *qc);
106extern void ata_qc_issue(struct ata_queued_cmd *qc); 107extern void ata_qc_issue(struct ata_queued_cmd *qc);
107extern void __ata_qc_complete(struct ata_queued_cmd *qc); 108extern void __ata_qc_complete(struct ata_queued_cmd *qc);
108extern int atapi_check_dma(struct ata_queued_cmd *qc); 109extern int atapi_check_dma(struct ata_queued_cmd *qc);
@@ -118,22 +119,6 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host);
118extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy); 119extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy);
119extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm); 120extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
120 121
121/**
122 * ata_qc_free - free unused ata_queued_cmd
123 * @qc: Command to complete
124 *
125 * Designed to free unused ata_queued_cmd object
126 * in case something prevents using it.
127 *
128 * LOCKING:
129 * spin_lock_irqsave(host lock)
130 */
131static inline void ata_qc_free(struct ata_queued_cmd *qc)
132{
133 qc->flags = 0;
134 qc->tag = ATA_TAG_POISON;
135}
136
137/* libata-acpi.c */ 122/* libata-acpi.c */
138#ifdef CONFIG_ATA_ACPI 123#ifdef CONFIG_ATA_ACPI
139extern void ata_acpi_associate_sata_port(struct ata_port *ap); 124extern void ata_acpi_associate_sata_port(struct ata_port *ap);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index fae3841de0d8..6f1460614325 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -307,10 +307,10 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static void nv_nf2_freeze(struct ata_port *ap); 308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
310static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
311 unsigned long deadline);
310static void nv_ck804_freeze(struct ata_port *ap); 312static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap); 313static void nv_ck804_thaw(struct ata_port *ap);
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
314static int nv_adma_slave_config(struct scsi_device *sdev); 314static int nv_adma_slave_config(struct scsi_device *sdev);
315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -405,17 +405,8 @@ static struct scsi_host_template nv_swncq_sht = {
405 .slave_configure = nv_swncq_slave_config, 405 .slave_configure = nv_swncq_slave_config,
406}; 406};
407 407
408/* OSDL bz3352 reports that some nv controllers can't determine device
409 * signature reliably and nv_hardreset is implemented to work around
410 * the problem. This was reported on nf3 and it's unclear whether any
411 * other controllers are affected. However, the workaround has been
412 * applied to all variants and there isn't much to gain by trying to
413 * find out exactly which ones are affected at this point especially
414 * because NV has moved over to ahci for newer controllers.
415 */
416static struct ata_port_operations nv_common_ops = { 408static struct ata_port_operations nv_common_ops = {
417 .inherits = &ata_bmdma_port_ops, 409 .inherits = &ata_bmdma_port_ops,
418 .hardreset = nv_hardreset,
419 .scr_read = nv_scr_read, 410 .scr_read = nv_scr_read,
420 .scr_write = nv_scr_write, 411 .scr_write = nv_scr_write,
421}; 412};
@@ -429,12 +420,22 @@ static struct ata_port_operations nv_generic_ops = {
429 .hardreset = ATA_OP_NULL, 420 .hardreset = ATA_OP_NULL,
430}; 421};
431 422
423/* OSDL bz3352 reports that nf2/3 controllers can't determine device
424 * signature reliably. Also, the following thread reports detection
425 * failure on cold boot with the standard debouncing timing.
426 *
427 * http://thread.gmane.org/gmane.linux.ide/34098
428 *
429 * Debounce with hotplug timing and request follow-up SRST.
430 */
432static struct ata_port_operations nv_nf2_ops = { 431static struct ata_port_operations nv_nf2_ops = {
433 .inherits = &nv_common_ops, 432 .inherits = &nv_common_ops,
434 .freeze = nv_nf2_freeze, 433 .freeze = nv_nf2_freeze,
435 .thaw = nv_nf2_thaw, 434 .thaw = nv_nf2_thaw,
435 .hardreset = nv_nf2_hardreset,
436}; 436};
437 437
438/* CK804 finally gets hardreset right */
438static struct ata_port_operations nv_ck804_ops = { 439static struct ata_port_operations nv_ck804_ops = {
439 .inherits = &nv_common_ops, 440 .inherits = &nv_common_ops,
440 .freeze = nv_ck804_freeze, 441 .freeze = nv_ck804_freeze,
@@ -443,7 +444,7 @@ static struct ata_port_operations nv_ck804_ops = {
443}; 444};
444 445
445static struct ata_port_operations nv_adma_ops = { 446static struct ata_port_operations nv_adma_ops = {
446 .inherits = &nv_common_ops, 447 .inherits = &nv_ck804_ops,
447 448
448 .check_atapi_dma = nv_adma_check_atapi_dma, 449 .check_atapi_dma = nv_adma_check_atapi_dma,
449 .sff_tf_read = nv_adma_tf_read, 450 .sff_tf_read = nv_adma_tf_read,
@@ -467,7 +468,7 @@ static struct ata_port_operations nv_adma_ops = {
467}; 468};
468 469
469static struct ata_port_operations nv_swncq_ops = { 470static struct ata_port_operations nv_swncq_ops = {
470 .inherits = &nv_common_ops, 471 .inherits = &nv_generic_ops,
471 472
472 .qc_defer = ata_std_qc_defer, 473 .qc_defer = ata_std_qc_defer,
473 .qc_prep = nv_swncq_qc_prep, 474 .qc_prep = nv_swncq_qc_prep,
@@ -1553,6 +1554,17 @@ static void nv_nf2_thaw(struct ata_port *ap)
1553 iowrite8(mask, scr_addr + NV_INT_ENABLE); 1554 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1554} 1555}
1555 1556
1557static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
1558 unsigned long deadline)
1559{
1560 bool online;
1561 int rc;
1562
1563 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1564 &online, NULL);
1565 return online ? -EAGAIN : rc;
1566}
1567
1556static void nv_ck804_freeze(struct ata_port *ap) 1568static void nv_ck804_freeze(struct ata_port *ap)
1557{ 1569{
1558 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; 1570 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
@@ -1605,21 +1617,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1605 ata_sff_thaw(ap); 1617 ata_sff_thaw(ap);
1606} 1618}
1607 1619
1608static int nv_hardreset(struct ata_link *link, unsigned int *class,
1609 unsigned long deadline)
1610{
1611 int rc;
1612
1613 /* SATA hardreset fails to retrieve proper device signature on
1614 * some controllers. Request follow up SRST. For more info,
1615 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1616 */
1617 rc = sata_sff_hardreset(link, class, deadline);
1618 if (rc)
1619 return rc;
1620 return -EAGAIN;
1621}
1622
1623static void nv_adma_error_handler(struct ata_port *ap) 1620static void nv_adma_error_handler(struct ata_port *ap)
1624{ 1621{
1625 struct nv_adma_port_priv *pp = ap->private_data; 1622 struct nv_adma_port_priv *pp = ap->private_data;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 750d8cdc00cd..ba9a2570a742 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -153,6 +153,10 @@ static void pdc_freeze(struct ata_port *ap);
153static void pdc_sata_freeze(struct ata_port *ap); 153static void pdc_sata_freeze(struct ata_port *ap);
154static void pdc_thaw(struct ata_port *ap); 154static void pdc_thaw(struct ata_port *ap);
155static void pdc_sata_thaw(struct ata_port *ap); 155static void pdc_sata_thaw(struct ata_port *ap);
156static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
157 unsigned long deadline);
158static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
159 unsigned long deadline);
156static void pdc_error_handler(struct ata_port *ap); 160static void pdc_error_handler(struct ata_port *ap);
157static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 161static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
158static int pdc_pata_cable_detect(struct ata_port *ap); 162static int pdc_pata_cable_detect(struct ata_port *ap);
@@ -186,6 +190,7 @@ static struct ata_port_operations pdc_sata_ops = {
186 .scr_read = pdc_sata_scr_read, 190 .scr_read = pdc_sata_scr_read,
187 .scr_write = pdc_sata_scr_write, 191 .scr_write = pdc_sata_scr_write,
188 .port_start = pdc_sata_port_start, 192 .port_start = pdc_sata_port_start,
193 .hardreset = pdc_sata_hardreset,
189}; 194};
190 195
191/* First-generation chips need a more restrictive ->check_atapi_dma op */ 196/* First-generation chips need a more restrictive ->check_atapi_dma op */
@@ -200,6 +205,7 @@ static struct ata_port_operations pdc_pata_ops = {
200 .freeze = pdc_freeze, 205 .freeze = pdc_freeze,
201 .thaw = pdc_thaw, 206 .thaw = pdc_thaw,
202 .port_start = pdc_common_port_start, 207 .port_start = pdc_common_port_start,
208 .softreset = pdc_pata_softreset,
203}; 209};
204 210
205static const struct ata_port_info pdc_port_info[] = { 211static const struct ata_port_info pdc_port_info[] = {
@@ -693,6 +699,20 @@ static void pdc_sata_thaw(struct ata_port *ap)
693 readl(host_mmio + hotplug_offset); /* flush */ 699 readl(host_mmio + hotplug_offset); /* flush */
694} 700}
695 701
702static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
703 unsigned long deadline)
704{
705 pdc_reset_port(link->ap);
706 return ata_sff_softreset(link, class, deadline);
707}
708
709static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
710 unsigned long deadline)
711{
712 pdc_reset_port(link->ap);
713 return sata_sff_hardreset(link, class, deadline);
714}
715
696static void pdc_error_handler(struct ata_port *ap) 716static void pdc_error_handler(struct ata_port *ap)
697{ 717{
698 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 718 if (!(ap->pflags & ATA_PFLAG_FROZEN))
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 62367fe4d5dc..c18935f0bda2 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -602,8 +602,10 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
602 rc = vt8251_prepare_host(pdev, &host); 602 rc = vt8251_prepare_host(pdev, &host);
603 break; 603 break;
604 default: 604 default:
605 return -EINVAL; 605 rc = -EINVAL;
606 } 606 }
607 if (rc)
608 return rc;
607 609
608 svia_configure(pdev); 610 svia_configure(pdev);
609 611
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4023885353e0..12de1fdaa6c6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -96,6 +96,8 @@ static const struct pci_device_id cciss_pci_device_id[] = {
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 101 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 102 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
101 {0,} 103 {0,}
@@ -133,6 +135,8 @@ static struct board_type products[] = {
133 {0x3245103C, "Smart Array P410i", &SA5_access}, 135 {0x3245103C, "Smart Array P410i", &SA5_access},
134 {0x3247103C, "Smart Array P411", &SA5_access}, 136 {0x3247103C, "Smart Array P411", &SA5_access},
135 {0x3249103C, "Smart Array P812", &SA5_access}, 137 {0x3249103C, "Smart Array P812", &SA5_access},
138 {0x324A103C, "Smart Array P712m", &SA5_access},
139 {0x324B103C, "Smart Array P711m", &SA5_access},
136 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 140 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
137}; 141};
138 142
@@ -1366,6 +1370,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1366 disk->first_minor = drv_index << NWD_SHIFT; 1370 disk->first_minor = drv_index << NWD_SHIFT;
1367 disk->fops = &cciss_fops; 1371 disk->fops = &cciss_fops;
1368 disk->private_data = &h->drv[drv_index]; 1372 disk->private_data = &h->drv[drv_index];
1373 disk->driverfs_dev = &h->pdev->dev;
1369 1374
1370 /* Set up queue information */ 1375 /* Set up queue information */
1371 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1376 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -3404,7 +3409,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3404 int i; 3409 int i;
3405 int j = 0; 3410 int j = 0;
3406 int rc; 3411 int rc;
3407 int dac; 3412 int dac, return_code;
3413 InquiryData_struct *inq_buff = NULL;
3408 3414
3409 i = alloc_cciss_hba(); 3415 i = alloc_cciss_hba();
3410 if (i < 0) 3416 if (i < 0)
@@ -3510,6 +3516,25 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3510 /* Turn the interrupts on so we can service requests */ 3516 /* Turn the interrupts on so we can service requests */
3511 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 3517 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3512 3518
3519 /* Get the firmware version */
3520 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3521 if (inq_buff == NULL) {
3522 printk(KERN_ERR "cciss: out of memory\n");
3523 goto clean4;
3524 }
3525
3526 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3527 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
3528 if (return_code == IO_OK) {
3529 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3530 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
3531 hba[i]->firm_ver[2] = inq_buff->data_byte[34];
3532 hba[i]->firm_ver[3] = inq_buff->data_byte[35];
3533 } else { /* send command failed */
3534 printk(KERN_WARNING "cciss: unable to determine firmware"
3535 " version of controller\n");
3536 }
3537
3513 cciss_procinit(i); 3538 cciss_procinit(i);
3514 3539
3515 hba[i]->cciss_max_sectors = 2048; 3540 hba[i]->cciss_max_sectors = 2048;
@@ -3520,6 +3545,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3520 return 1; 3545 return 1;
3521 3546
3522clean4: 3547clean4:
3548 kfree(inq_buff);
3523#ifdef CONFIG_CISS_SCSI_TAPE 3549#ifdef CONFIG_CISS_SCSI_TAPE
3524 kfree(hba[i]->scsi_rejects.complete); 3550 kfree(hba[i]->scsi_rejects.complete);
3525#endif 3551#endif
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 47d233c6d0b3..5d39df14ed90 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -567,7 +567,12 @@ static int __init cpqarray_init(void)
567 num_cntlrs_reg++; 567 num_cntlrs_reg++;
568 } 568 }
569 569
570 return(num_cntlrs_reg); 570 if (num_cntlrs_reg)
571 return 0;
572 else {
573 pci_unregister_driver(&cpqarray_pci_driver);
574 return -ENODEV;
575 }
571} 576}
572 577
573/* Function to find the first free pointer into our hba[] array */ 578/* Function to find the first free pointer into our hba[] array */
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index d8f83e26e4a4..a5af6072e2b3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -1644,7 +1644,10 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1644 vc->vc_tab_stop[1] = 1644 vc->vc_tab_stop[1] =
1645 vc->vc_tab_stop[2] = 1645 vc->vc_tab_stop[2] =
1646 vc->vc_tab_stop[3] = 1646 vc->vc_tab_stop[3] =
1647 vc->vc_tab_stop[4] = 0x01010101; 1647 vc->vc_tab_stop[4] =
1648 vc->vc_tab_stop[5] =
1649 vc->vc_tab_stop[6] =
1650 vc->vc_tab_stop[7] = 0x01010101;
1648 1651
1649 vc->vc_bell_pitch = DEFAULT_BELL_PITCH; 1652 vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
1650 vc->vc_bell_duration = DEFAULT_BELL_DURATION; 1653 vc->vc_bell_duration = DEFAULT_BELL_DURATION;
@@ -1935,7 +1938,10 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1935 vc->vc_tab_stop[1] = 1938 vc->vc_tab_stop[1] =
1936 vc->vc_tab_stop[2] = 1939 vc->vc_tab_stop[2] =
1937 vc->vc_tab_stop[3] = 1940 vc->vc_tab_stop[3] =
1938 vc->vc_tab_stop[4] = 0; 1941 vc->vc_tab_stop[4] =
1942 vc->vc_tab_stop[5] =
1943 vc->vc_tab_stop[6] =
1944 vc->vc_tab_stop[7] = 0;
1939 } 1945 }
1940 return; 1946 return;
1941 case 'm': 1947 case 'm':
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 5bed73329ef8..8504a2108557 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -65,12 +65,14 @@ static void cpuidle_idle_call(void)
65 return; 65 return;
66 } 66 }
67 67
68#if 0
69 /* shows regressions, re-enable for 2.6.29 */
68 /* 70 /*
69 * run any timers that can be run now, at this point 71 * run any timers that can be run now, at this point
70 * before calculating the idle duration etc. 72 * before calculating the idle duration etc.
71 */ 73 */
72 hrtimer_peek_ahead_timers(); 74 hrtimer_peek_ahead_timers();
73 75#endif
74 /* ask the governor for the next state */ 76 /* ask the governor for the next state */
75 next_state = cpuidle_curr_governor->select(dev); 77 next_state = cpuidle_curr_governor->select(dev);
76 if (need_resched()) 78 if (need_resched())
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 3fccdd484100..6b9be42c7b98 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -587,8 +587,7 @@ static void create_units(struct fw_device *device)
587 unit->device.bus = &fw_bus_type; 587 unit->device.bus = &fw_bus_type;
588 unit->device.type = &fw_unit_type; 588 unit->device.type = &fw_unit_type;
589 unit->device.parent = &device->device; 589 unit->device.parent = &device->device;
590 snprintf(unit->device.bus_id, sizeof(unit->device.bus_id), 590 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
591 "%s.%d", device->device.bus_id, i++);
592 591
593 init_fw_attribute_group(&unit->device, 592 init_fw_attribute_group(&unit->device,
594 fw_unit_attributes, 593 fw_unit_attributes,
@@ -711,8 +710,7 @@ static void fw_device_init(struct work_struct *work)
711 device->device.type = &fw_device_type; 710 device->device.type = &fw_device_type;
712 device->device.parent = device->card->device; 711 device->device.parent = device->card->device;
713 device->device.devt = MKDEV(fw_cdev_major, minor); 712 device->device.devt = MKDEV(fw_cdev_major, minor);
714 snprintf(device->device.bus_id, sizeof(device->device.bus_id), 713 dev_set_name(&device->device, "fw%d", minor);
715 "fw%d", minor);
716 714
717 init_fw_attribute_group(&device->device, 715 init_fw_attribute_group(&device->device,
718 fw_device_attributes, 716 fw_device_attributes,
@@ -741,13 +739,13 @@ static void fw_device_init(struct work_struct *work)
741 if (device->config_rom_retries) 739 if (device->config_rom_retries)
742 fw_notify("created device %s: GUID %08x%08x, S%d00, " 740 fw_notify("created device %s: GUID %08x%08x, S%d00, "
743 "%d config ROM retries\n", 741 "%d config ROM retries\n",
744 device->device.bus_id, 742 dev_name(&device->device),
745 device->config_rom[3], device->config_rom[4], 743 device->config_rom[3], device->config_rom[4],
746 1 << device->max_speed, 744 1 << device->max_speed,
747 device->config_rom_retries); 745 device->config_rom_retries);
748 else 746 else
749 fw_notify("created device %s: GUID %08x%08x, S%d00\n", 747 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
750 device->device.bus_id, 748 dev_name(&device->device),
751 device->config_rom[3], device->config_rom[4], 749 device->config_rom[3], device->config_rom[4],
752 1 << device->max_speed); 750 1 << device->max_speed);
753 device->config_rom_retries = 0; 751 device->config_rom_retries = 0;
@@ -883,12 +881,12 @@ static void fw_device_refresh(struct work_struct *work)
883 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 881 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
884 goto gone; 882 goto gone;
885 883
886 fw_notify("refreshed device %s\n", device->device.bus_id); 884 fw_notify("refreshed device %s\n", dev_name(&device->device));
887 device->config_rom_retries = 0; 885 device->config_rom_retries = 0;
888 goto out; 886 goto out;
889 887
890 give_up: 888 give_up:
891 fw_notify("giving up on refresh of device %s\n", device->device.bus_id); 889 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
892 gone: 890 gone:
893 atomic_set(&device->state, FW_DEVICE_SHUTDOWN); 891 atomic_set(&device->state, FW_DEVICE_SHUTDOWN);
894 fw_device_shutdown(work); 892 fw_device_shutdown(work);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 8e16bfbdcb3d..46610b090415 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -2468,7 +2468,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2468 goto fail_self_id; 2468 goto fail_self_id;
2469 2469
2470 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2470 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2471 dev->dev.bus_id, version >> 16, version & 0xff); 2471 dev_name(&dev->dev), version >> 16, version & 0xff);
2472 return 0; 2472 return 0;
2473 2473
2474 fail_self_id: 2474 fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index d334cac5e1fc..97df6dac3a82 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -1135,7 +1135,7 @@ static int sbp2_probe(struct device *dev)
1135 tgt->unit = unit; 1135 tgt->unit = unit;
1136 kref_init(&tgt->kref); 1136 kref_init(&tgt->kref);
1137 INIT_LIST_HEAD(&tgt->lu_list); 1137 INIT_LIST_HEAD(&tgt->lu_list);
1138 tgt->bus_id = unit->device.bus_id; 1138 tgt->bus_id = dev_name(&unit->device);
1139 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1139 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1140 1140
1141 if (fw_device_enable_phys_dma(device) < 0) 1141 if (fw_device_enable_phys_dma(device) < 0)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3e526b6d00cb..8daf4793ac32 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -81,9 +81,9 @@ static void dmi_table(u8 *buf, int len, int num,
81 const struct dmi_header *dm = (const struct dmi_header *)data; 81 const struct dmi_header *dm = (const struct dmi_header *)data;
82 82
83 /* 83 /*
84 * We want to know the total length (formated area and strings) 84 * We want to know the total length (formatted area and
85 * before decoding to make sure we won't run off the table in 85 * strings) before decoding to make sure we won't run off the
86 * dmi_decode or dmi_string 86 * table in dmi_decode or dmi_string
87 */ 87 */
88 data += dm->length; 88 data += dm->length;
89 while ((data - buf < len - 1) && (data[0] || data[1])) 89 while ((data - buf < len - 1) && (data[0] || data[1]))
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index bc011da79e14..be3285912cb7 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -116,6 +116,18 @@ static const char* temperature_sensors_sets[][36] = {
116/* Set 9: Macbook Pro 3,1 (Santa Rosa) */ 116/* Set 9: Macbook Pro 3,1 (Santa Rosa) */
117 { "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P", 117 { "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P",
118 "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL }, 118 "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
119/* Set 10: iMac 5,1 */
120 { "TA0P", "TC0D", "TC0P", "TG0D", "TH0P", "TO0P", "Tm0P", NULL },
121/* Set 11: Macbook 5,1 */
122 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0P", "TN0D", "TN0P",
123 "TTF0", "Th0H", "Th1H", "ThFH", "Ts0P", "Ts0S", NULL },
124/* Set 12: Macbook Pro 5,1 */
125 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
126 "TG0F", "TG0H", "TG0P", "TG0T", "TG1H", "TN0D", "TN0P", "TTF0",
127 "Th2H", "Tm0P", "Ts0P", "Ts0S", NULL },
128/* Set 13: iMac 8,1 */
129 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
130 "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
119}; 131};
120 132
121/* List of keys used to read/write fan speeds */ 133/* List of keys used to read/write fan speeds */
@@ -1276,6 +1288,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1276 { .accelerometer = 1, .light = 1, .temperature_set = 8 }, 1288 { .accelerometer = 1, .light = 1, .temperature_set = 8 },
1277/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */ 1289/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */
1278 { .accelerometer = 1, .light = 1, .temperature_set = 9 }, 1290 { .accelerometer = 1, .light = 1, .temperature_set = 9 },
1291/* iMac 5: light sensor only, temperature set 10 */
1292 { .accelerometer = 0, .light = 0, .temperature_set = 10 },
1293/* MacBook 5: accelerometer, backlight and temperature set 11 */
1294 { .accelerometer = 1, .light = 1, .temperature_set = 11 },
1295/* MacBook Pro 5: accelerometer, backlight and temperature set 12 */
1296 { .accelerometer = 1, .light = 1, .temperature_set = 12 },
1297/* iMac 8: light sensor only, temperature set 13 */
1298 { .accelerometer = 0, .light = 0, .temperature_set = 13 },
1279}; 1299};
1280 1300
1281/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1301/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1285,6 +1305,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1285 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1305 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1286 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, 1306 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
1287 &applesmc_dmi_data[7]}, 1307 &applesmc_dmi_data[7]},
1308 { applesmc_dmi_match, "Apple MacBook Pro 5", {
1309 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1310 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
1311 &applesmc_dmi_data[12]},
1288 { applesmc_dmi_match, "Apple MacBook Pro 4", { 1312 { applesmc_dmi_match, "Apple MacBook Pro 4", {
1289 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1313 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1290 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") }, 1314 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") },
@@ -1305,6 +1329,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1305 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1329 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1306 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") }, 1330 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
1307 &applesmc_dmi_data[6]}, 1331 &applesmc_dmi_data[6]},
1332 { applesmc_dmi_match, "Apple MacBook 5", {
1333 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1334 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5") },
1335 &applesmc_dmi_data[11]},
1308 { applesmc_dmi_match, "Apple MacBook", { 1336 { applesmc_dmi_match, "Apple MacBook", {
1309 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1337 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1310 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, 1338 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
@@ -1317,6 +1345,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1317 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1345 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1318 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1346 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
1319 &applesmc_dmi_data[4]}, 1347 &applesmc_dmi_data[4]},
1348 { applesmc_dmi_match, "Apple iMac 8", {
1349 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1350 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
1351 &applesmc_dmi_data[13]},
1352 { applesmc_dmi_match, "Apple iMac 5", {
1353 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1354 DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
1355 &applesmc_dmi_data[10]},
1320 { applesmc_dmi_match, "Apple iMac", { 1356 { applesmc_dmi_match, "Apple iMac", {
1321 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1357 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1322 DMI_MATCH(DMI_PRODUCT_NAME,"iMac") }, 1358 DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 965cfdb84ebc..c19f23267157 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1270,8 +1270,14 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
1270 struct video_card *video = file_to_video_card(file); 1270 struct video_card *video = file_to_video_card(file);
1271 int retval = -EINVAL; 1271 int retval = -EINVAL;
1272 1272
1273 /* serialize mmap */ 1273 /*
1274 mutex_lock(&video->mtx); 1274 * We cannot use the blocking variant mutex_lock here because .mmap
1275 * is called with mmap_sem held, while .ioctl, .read, .write acquire
1276 * video->mtx and subsequently call copy_to/from_user which will
1277 * grab mmap_sem in case of a page fault.
1278 */
1279 if (!mutex_trylock(&video->mtx))
1280 return -EAGAIN;
1275 1281
1276 if ( ! video_card_initialized(video) ) { 1282 if ( ! video_card_initialized(video) ) {
1277 retval = do_dv1394_init_default(video); 1283 retval = do_dv1394_init_default(video);
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 8dd09d850419..237d0c9d69c6 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -155,11 +155,11 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
155 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device)); 155 memcpy(&h->device, &nodemgr_dev_template_host, sizeof(h->device));
156 h->device.parent = dev; 156 h->device.parent = dev;
157 set_dev_node(&h->device, dev_to_node(dev)); 157 set_dev_node(&h->device, dev_to_node(dev));
158 snprintf(h->device.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); 158 dev_set_name(&h->device, "fw-host%d", h->id);
159 159
160 h->host_dev.parent = &h->device; 160 h->host_dev.parent = &h->device;
161 h->host_dev.class = &hpsb_host_class; 161 h->host_dev.class = &hpsb_host_class;
162 snprintf(h->host_dev.bus_id, BUS_ID_SIZE, "fw-host%d", h->id); 162 dev_set_name(&h->host_dev, "fw-host%d", h->id);
163 163
164 if (device_register(&h->device)) 164 if (device_register(&h->device))
165 goto fail; 165 goto fail;
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 2376b729e876..9e39f73282ee 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -826,13 +826,11 @@ static struct node_entry *nodemgr_create_node(octlet_t guid,
826 memcpy(&ne->device, &nodemgr_dev_template_ne, 826 memcpy(&ne->device, &nodemgr_dev_template_ne,
827 sizeof(ne->device)); 827 sizeof(ne->device));
828 ne->device.parent = &host->device; 828 ne->device.parent = &host->device;
829 snprintf(ne->device.bus_id, BUS_ID_SIZE, "%016Lx", 829 dev_set_name(&ne->device, "%016Lx", (unsigned long long)(ne->guid));
830 (unsigned long long)(ne->guid));
831 830
832 ne->node_dev.parent = &ne->device; 831 ne->node_dev.parent = &ne->device;
833 ne->node_dev.class = &nodemgr_ne_class; 832 ne->node_dev.class = &nodemgr_ne_class;
834 snprintf(ne->node_dev.bus_id, BUS_ID_SIZE, "%016Lx", 833 dev_set_name(&ne->node_dev, "%016Lx", (unsigned long long)(ne->guid));
835 (unsigned long long)(ne->guid));
836 834
837 if (device_register(&ne->device)) 835 if (device_register(&ne->device))
838 goto fail_devreg; 836 goto fail_devreg;
@@ -932,13 +930,11 @@ static void nodemgr_register_device(struct node_entry *ne,
932 930
933 ud->device.parent = parent; 931 ud->device.parent = parent;
934 932
935 snprintf(ud->device.bus_id, BUS_ID_SIZE, "%s-%u", 933 dev_set_name(&ud->device, "%s-%u", dev_name(&ne->device), ud->id);
936 ne->device.bus_id, ud->id);
937 934
938 ud->unit_dev.parent = &ud->device; 935 ud->unit_dev.parent = &ud->device;
939 ud->unit_dev.class = &nodemgr_ud_class; 936 ud->unit_dev.class = &nodemgr_ud_class;
940 snprintf(ud->unit_dev.bus_id, BUS_ID_SIZE, "%s-%u", 937 dev_set_name(&ud->unit_dev, "%s-%u", dev_name(&ne->device), ud->id);
941 ne->device.bus_id, ud->id);
942 938
943 if (device_register(&ud->device)) 939 if (device_register(&ud->device))
944 goto fail_devreg; 940 goto fail_devreg;
@@ -953,7 +949,7 @@ static void nodemgr_register_device(struct node_entry *ne,
953fail_classdevreg: 949fail_classdevreg:
954 device_unregister(&ud->device); 950 device_unregister(&ud->device);
955fail_devreg: 951fail_devreg:
956 HPSB_ERR("Failed to create unit %s", ud->device.bus_id); 952 HPSB_ERR("Failed to create unit %s", dev_name(&ud->device));
957} 953}
958 954
959 955
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 9f19ac492106..bf7e761c12b1 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2268,7 +2268,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2268 return -EFAULT; 2268 return -EFAULT;
2269 } 2269 }
2270 2270
2271 mutex_lock(&fi->state_mutex); 2271 if (!mutex_trylock(&fi->state_mutex))
2272 return -EAGAIN;
2272 2273
2273 switch (fi->state) { 2274 switch (fi->state) {
2274 case opened: 2275 case opened:
@@ -2548,7 +2549,8 @@ static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
2548 struct file_info *fi = file->private_data; 2549 struct file_info *fi = file->private_data;
2549 int ret; 2550 int ret;
2550 2551
2551 mutex_lock(&fi->state_mutex); 2552 if (!mutex_trylock(&fi->state_mutex))
2553 return -EAGAIN;
2552 2554
2553 if (fi->iso_state == RAW1394_ISO_INACTIVE) 2555 if (fi->iso_state == RAW1394_ISO_INACTIVE)
2554 ret = -EINVAL; 2556 ret = -EINVAL;
@@ -2669,7 +2671,8 @@ static long raw1394_ioctl(struct file *file, unsigned int cmd,
2669 break; 2671 break;
2670 } 2672 }
2671 2673
2672 mutex_lock(&fi->state_mutex); 2674 if (!mutex_trylock(&fi->state_mutex))
2675 return -EAGAIN;
2673 2676
2674 switch (fi->iso_state) { 2677 switch (fi->iso_state) {
2675 case RAW1394_ISO_INACTIVE: 2678 case RAW1394_ISO_INACTIVE:
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 190147c79e79..3b90c5c924ec 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -148,6 +148,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
148 148
149 min_sectors = conf->array_sectors; 149 min_sectors = conf->array_sectors;
150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); 150 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
151 if (min_sectors == 0)
152 min_sectors = 1;
151 153
152 /* min_sectors is the minimum spacing that will fit the hash 154 /* min_sectors is the minimum spacing that will fit the hash
153 * table in one PAGE. This may be much smaller than needed. 155 * table in one PAGE. This may be much smaller than needed.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9abf6ed16535..1b1d32694f6f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3884,7 +3884,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3884 if (mode == 0) { 3884 if (mode == 0) {
3885 mdk_rdev_t *rdev; 3885 mdk_rdev_t *rdev;
3886 struct list_head *tmp; 3886 struct list_head *tmp;
3887 struct block_device *bdev;
3888 3887
3889 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 3888 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3890 3889
@@ -3941,11 +3940,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3941 mddev->degraded = 0; 3940 mddev->degraded = 0;
3942 mddev->barriers_work = 0; 3941 mddev->barriers_work = 0;
3943 mddev->safemode = 0; 3942 mddev->safemode = 0;
3944 bdev = bdget_disk(mddev->gendisk, 0);
3945 if (bdev) {
3946 blkdev_ioctl(bdev, 0, BLKRRPART, 0);
3947 bdput(bdev);
3948 }
3949 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 3943 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3950 3944
3951 } else if (mddev->pers) 3945 } else if (mddev->pers)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index da5129a24b18..970a96ef9b18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1137,7 +1137,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1137 if (!enough(conf)) 1137 if (!enough(conf))
1138 return -EINVAL; 1138 return -EINVAL;
1139 1139
1140 if (rdev->raid_disk) 1140 if (rdev->raid_disk >= 0)
1141 first = last = rdev->raid_disk; 1141 first = last = rdev->raid_disk;
1142 1142
1143 if (rdev->saved_raid_disk >= 0 && 1143 if (rdev->saved_raid_disk >= 0 &&
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a1abf95cf751..603ffd008c73 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -77,12 +77,6 @@ MODULE_VERSION(my_VERSION);
77 * Fusion MPT LAN private structures 77 * Fusion MPT LAN private structures
78 */ 78 */
79 79
80struct NAA_Hosed {
81 u16 NAA;
82 u8 ieee[FC_ALEN];
83 struct NAA_Hosed *next;
84};
85
86struct BufferControl { 80struct BufferControl {
87 struct sk_buff *skb; 81 struct sk_buff *skb;
88 dma_addr_t dma; 82 dma_addr_t dma;
@@ -159,11 +153,6 @@ static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
159static u32 max_buckets_out = 127; 153static u32 max_buckets_out = 127;
160static u32 tx_max_out_p = 127 - 16; 154static u32 tx_max_out_p = 127 - 16;
161 155
162#ifdef QLOGIC_NAA_WORKAROUND
163static struct NAA_Hosed *mpt_bad_naa = NULL;
164DEFINE_RWLOCK(bad_naa_lock);
165#endif
166
167/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 156/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
168/** 157/**
169 * lan_reply - Handle all data sent from the hardware. 158 * lan_reply - Handle all data sent from the hardware.
@@ -780,30 +769,6 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
780// ctx, skb, skb->data)); 769// ctx, skb, skb->data));
781 770
782 mac = skb_mac_header(skb); 771 mac = skb_mac_header(skb);
783#ifdef QLOGIC_NAA_WORKAROUND
784{
785 struct NAA_Hosed *nh;
786
787 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
788 RFC 2625. The longer I look at this, the more my opinion of Qlogic
789 drops. */
790 read_lock_irq(&bad_naa_lock);
791 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
792 if ((nh->ieee[0] == mac[0]) &&
793 (nh->ieee[1] == mac[1]) &&
794 (nh->ieee[2] == mac[2]) &&
795 (nh->ieee[3] == mac[3]) &&
796 (nh->ieee[4] == mac[4]) &&
797 (nh->ieee[5] == mac[5])) {
798 cur_naa = nh->NAA;
799 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
800 "= %04x.\n", cur_naa));
801 break;
802 }
803 }
804 read_unlock_irq(&bad_naa_lock);
805}
806#endif
807 772
808 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | 773 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
809 (mac[0] << 8) | 774 (mac[0] << 8) |
@@ -1572,79 +1537,6 @@ mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1572 1537
1573 fcllc = (struct fcllc *)skb->data; 1538 fcllc = (struct fcllc *)skb->data;
1574 1539
1575#ifdef QLOGIC_NAA_WORKAROUND
1576{
1577 u16 source_naa = fch->stype, found = 0;
1578
1579 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1580 value. */
1581
1582 if ((source_naa & 0xF000) == 0)
1583 source_naa = swab16(source_naa);
1584
1585 if (fcllc->ethertype == htons(ETH_P_ARP))
1586 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1587 "%04x.\n", source_naa));
1588
1589 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1590 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1591 struct NAA_Hosed *nh, *prevnh;
1592 int i;
1593
1594 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1595 "system with non-RFC 2625 NAA value (%04x).\n",
1596 source_naa));
1597
1598 write_lock_irq(&bad_naa_lock);
1599 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1600 prevnh=nh, nh=nh->next) {
1601 if ((nh->ieee[0] == fch->saddr[0]) &&
1602 (nh->ieee[1] == fch->saddr[1]) &&
1603 (nh->ieee[2] == fch->saddr[2]) &&
1604 (nh->ieee[3] == fch->saddr[3]) &&
1605 (nh->ieee[4] == fch->saddr[4]) &&
1606 (nh->ieee[5] == fch->saddr[5])) {
1607 found = 1;
1608 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1609 "q/Rep w/ bad NAA from system already"
1610 " in DB.\n"));
1611 break;
1612 }
1613 }
1614
1615 if ((!found) && (nh == NULL)) {
1616
1617 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1618 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1619 " bad NAA from system not yet in DB.\n"));
1620
1621 if (nh != NULL) {
1622 nh->next = NULL;
1623 if (!mpt_bad_naa)
1624 mpt_bad_naa = nh;
1625 if (prevnh)
1626 prevnh->next = nh;
1627
1628 nh->NAA = source_naa; /* Set the S_NAA value. */
1629 for (i = 0; i < FC_ALEN; i++)
1630 nh->ieee[i] = fch->saddr[i];
1631 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1632 "%02x:%02x with non-compliant S_NAA value.\n",
1633 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1634 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1635 } else {
1636 printk (KERN_ERR "mptlan/type_trans: Unable to"
1637 " kmalloc a NAA_Hosed struct.\n");
1638 }
1639 } else if (!found) {
1640 printk (KERN_ERR "mptlan/type_trans: found not"
1641 " set, but nh isn't null. Evil "
1642 "funkiness abounds.\n");
1643 }
1644 write_unlock_irq(&bad_naa_lock);
1645 }
1646}
1647#endif
1648 1540
1649 /* Strip the SNAP header from ARP packets since we don't 1541 /* Strip the SNAP header from ARP packets since we don't
1650 * pass them through to the 802.2/SNAP layers. 1542 * pass them through to the 802.2/SNAP layers.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 0d9b2d6f9ebf..f210a8ee6861 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -216,8 +216,7 @@ int mmc_add_card(struct mmc_card *card)
216 int ret; 216 int ret;
217 const char *type; 217 const char *type;
218 218
219 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), 219 dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
220 "%s:%04x", mmc_hostname(card->host), card->rca);
221 220
222 switch (card->type) { 221 switch (card->type) {
223 case MMC_TYPE_MMC: 222 case MMC_TYPE_MMC:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 044d84eeed7c..f7284b905eb3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
280 (card->host->ios.clock / 1000); 280 (card->host->ios.clock / 1000);
281 281
282 if (data->flags & MMC_DATA_WRITE) 282 if (data->flags & MMC_DATA_WRITE)
283 limit_us = 250000; 283 /*
284 * The limit is really 250 ms, but that is
285 * insufficient for some crappy cards.
286 */
287 limit_us = 300000;
284 else 288 else
285 limit_us = 100000; 289 limit_us = 100000;
286 290
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6da80fd4d974..5e945e64ead7 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -73,8 +73,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
73 if (err) 73 if (err)
74 goto free; 74 goto free;
75 75
76 snprintf(host->class_dev.bus_id, BUS_ID_SIZE, 76 dev_set_name(&host->class_dev, "mmc%d", host->index);
77 "mmc%d", host->index);
78 77
79 host->parent = dev; 78 host->parent = dev;
80 host->class_dev.parent = dev; 79 host->class_dev.parent = dev;
@@ -121,7 +120,7 @@ int mmc_add_host(struct mmc_host *host)
121 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 120 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
122 !host->ops->enable_sdio_irq); 121 !host->ops->enable_sdio_irq);
123 122
124 led_trigger_register_simple(host->class_dev.bus_id, &host->led); 123 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
125 124
126 err = device_add(&host->class_dev); 125 err = device_add(&host->class_dev);
127 if (err) 126 if (err)
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 233d0f9b3c4b..46284b527397 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -239,8 +239,7 @@ int sdio_add_func(struct sdio_func *func)
239{ 239{
240 int ret; 240 int ret;
241 241
242 snprintf(func->dev.bus_id, sizeof(func->dev.bus_id), 242 dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
243 "%s:%d", mmc_card_id(func->card), func->num);
244 243
245 ret = device_add(&func->dev); 244 ret = device_add(&func->dev);
246 if (ret == 0) 245 if (ret == 0)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 07faf5412a1f..ad00e1632317 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1348,7 +1348,7 @@ static int mmc_spi_probe(struct spi_device *spi)
1348 goto fail_add_host; 1348 goto fail_add_host;
1349 1349
1350 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", 1350 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1351 mmc->class_dev.bus_id, 1351 dev_name(&mmc->class_dev),
1352 host->dma_dev ? "" : ", no DMA", 1352 host->dma_dev ? "" : ", no DMA",
1353 (host->pdata && host->pdata->get_ro) 1353 (host->pdata && host->pdata->get_ro)
1354 ? "" : ", no WP", 1354 ? "" : ", no WP",
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 30f64b1f2354..4d010a984bed 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1733,7 +1733,7 @@ int sdhci_add_host(struct sdhci_host *host)
1733 mmc_add_host(mmc); 1733 mmc_add_host(mmc);
1734 1734
1735 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", 1735 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1736 mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, 1736 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1737 (host->flags & SDHCI_USE_ADMA)?"A":"", 1737 (host->flags & SDHCI_USE_ADMA)?"A":"",
1738 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1738 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1739 1739
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 13844843e8de..82554ddec6b3 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -632,7 +632,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
632 632
633 if (host->req) { 633 if (host->req) {
634 printk(KERN_ERR "%s : unfinished request detected\n", 634 printk(KERN_ERR "%s : unfinished request detected\n",
635 sock->dev.bus_id); 635 dev_name(&sock->dev));
636 mrq->cmd->error = -ETIMEDOUT; 636 mrq->cmd->error = -ETIMEDOUT;
637 goto err_out; 637 goto err_out;
638 } 638 }
@@ -672,7 +672,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
672 ? PCI_DMA_TODEVICE 672 ? PCI_DMA_TODEVICE
673 : PCI_DMA_FROMDEVICE)) { 673 : PCI_DMA_FROMDEVICE)) {
674 printk(KERN_ERR "%s : scatterlist map failed\n", 674 printk(KERN_ERR "%s : scatterlist map failed\n",
675 sock->dev.bus_id); 675 dev_name(&sock->dev));
676 mrq->cmd->error = -ENOMEM; 676 mrq->cmd->error = -ENOMEM;
677 goto err_out; 677 goto err_out;
678 } 678 }
@@ -684,7 +684,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
684 : PCI_DMA_FROMDEVICE); 684 : PCI_DMA_FROMDEVICE);
685 if (host->sg_len < 1) { 685 if (host->sg_len < 1) {
686 printk(KERN_ERR "%s : scatterlist map failed\n", 686 printk(KERN_ERR "%s : scatterlist map failed\n",
687 sock->dev.bus_id); 687 dev_name(&sock->dev));
688 tifm_unmap_sg(sock, &host->bounce_buf, 1, 688 tifm_unmap_sg(sock, &host->bounce_buf, 1,
689 r_data->flags & MMC_DATA_WRITE 689 r_data->flags & MMC_DATA_WRITE
690 ? PCI_DMA_TODEVICE 690 ? PCI_DMA_TODEVICE
@@ -748,7 +748,7 @@ static void tifm_sd_end_cmd(unsigned long data)
748 748
749 if (!mrq) { 749 if (!mrq) {
750 printk(KERN_ERR " %s : no request to complete?\n", 750 printk(KERN_ERR " %s : no request to complete?\n",
751 sock->dev.bus_id); 751 dev_name(&sock->dev));
752 spin_unlock_irqrestore(&sock->lock, flags); 752 spin_unlock_irqrestore(&sock->lock, flags);
753 return; 753 return;
754 } 754 }
@@ -789,7 +789,7 @@ static void tifm_sd_abort(unsigned long data)
789 printk(KERN_ERR 789 printk(KERN_ERR
790 "%s : card failed to respond for a long period of time " 790 "%s : card failed to respond for a long period of time "
791 "(%x, %x)\n", 791 "(%x, %x)\n",
792 host->dev->dev.bus_id, host->req->cmd->opcode, host->cmd_flags); 792 dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
793 793
794 tifm_eject(host->dev); 794 tifm_eject(host->dev);
795} 795}
@@ -906,7 +906,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
906 906
907 if (rc) { 907 if (rc) {
908 printk(KERN_ERR "%s : controller failed to reset\n", 908 printk(KERN_ERR "%s : controller failed to reset\n",
909 sock->dev.bus_id); 909 dev_name(&sock->dev));
910 return -ENODEV; 910 return -ENODEV;
911 } 911 }
912 912
@@ -933,7 +933,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)
933 if (rc) { 933 if (rc) {
934 printk(KERN_ERR 934 printk(KERN_ERR
935 "%s : card not ready - probe failed on initialization\n", 935 "%s : card not ready - probe failed on initialization\n",
936 sock->dev.bus_id); 936 dev_name(&sock->dev));
937 return -ENODEV; 937 return -ENODEV;
938 } 938 }
939 939
@@ -954,7 +954,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
954 if (!(TIFM_SOCK_STATE_OCCUPIED 954 if (!(TIFM_SOCK_STATE_OCCUPIED
955 & readl(sock->addr + SOCK_PRESENT_STATE))) { 955 & readl(sock->addr + SOCK_PRESENT_STATE))) {
956 printk(KERN_WARNING "%s : card gone, unexpectedly\n", 956 printk(KERN_WARNING "%s : card gone, unexpectedly\n",
957 sock->dev.bus_id); 957 dev_name(&sock->dev));
958 return rc; 958 return rc;
959 } 959 }
960 960
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3e6f5d8609e8..d74ec46aa032 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -406,19 +406,6 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
406 /* Set the default CFI lock/unlock addresses */ 406 /* Set the default CFI lock/unlock addresses */
407 cfi->addr_unlock1 = 0x555; 407 cfi->addr_unlock1 = 0x555;
408 cfi->addr_unlock2 = 0x2aa; 408 cfi->addr_unlock2 = 0x2aa;
409 /* Modify the unlock address if we are in compatibility mode */
410 if ( /* x16 in x8 mode */
411 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
412 (cfi->cfiq->InterfaceDesc ==
413 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
414 /* x32 in x16 mode */
415 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
416 (cfi->cfiq->InterfaceDesc ==
417 CFI_INTERFACE_X16_BY_X32_ASYNC)))
418 {
419 cfi->addr_unlock1 = 0xaaa;
420 cfi->addr_unlock2 = 0x555;
421 }
422 409
423 } /* CFI mode */ 410 } /* CFI mode */
424 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 411 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index f84ab6182148..2f3f2f719ba4 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1808 * several first banks can contain 0x7f instead of actual ID 1808 * several first banks can contain 0x7f instead of actual ID
1809 */ 1809 */
1810 do { 1810 do {
1811 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), 1811 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
1812 cfi_interleave(cfi),
1813 cfi->device_type);
1814 mask = (1 << (cfi->device_type * 8)) - 1; 1812 mask = (1 << (cfi->device_type * 8)) - 1;
1815 result = map_read(map, base + ofs); 1813 result = map_read(map, base + ofs);
1816 bank++; 1814 bank++;
@@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
1824{ 1822{
1825 map_word result; 1823 map_word result;
1826 unsigned long mask; 1824 unsigned long mask;
1827 u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type); 1825 u32 ofs = cfi_build_cmd_addr(1, map, cfi);
1828 mask = (1 << (cfi->device_type * 8)) -1; 1826 mask = (1 << (cfi->device_type * 8)) -1;
1829 result = map_read(map, base + ofs); 1827 result = map_read(map, base + ofs);
1830 return result.x[0] & mask; 1828 return result.x[0] & mask;
@@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2067 2065
2068 } 2066 }
2069 /* Ensure the unlock addresses we try stay inside the map */ 2067 /* Ensure the unlock addresses we try stay inside the map */
2070 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type); 2068 probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
2071 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type); 2069 probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
2072 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 2070 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
2073 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 2071 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
2074 goto retry; 2072 goto retry;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 8387e05daae2..e39b21d3e168 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -38,7 +38,6 @@
38#include <asm/arch/gpmc.h> 38#include <asm/arch/gpmc.h>
39#include <asm/arch/onenand.h> 39#include <asm/arch/onenand.h>
40#include <asm/arch/gpio.h> 40#include <asm/arch/gpio.h>
41#include <asm/arch/gpmc.h>
42#include <asm/arch/pm.h> 41#include <asm/arch/pm.h>
43 42
44#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f749b40f954e..11f143f4adf6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2010,9 +2010,13 @@ config IGB_LRO
2010 If in doubt, say N. 2010 If in doubt, say N.
2011 2011
2012config IGB_DCA 2012config IGB_DCA
2013 bool "Enable DCA" 2013 bool "Direct Cache Access (DCA) Support"
2014 default y 2014 default y
2015 depends on IGB && DCA && !(IGB=y && DCA=m) 2015 depends on IGB && DCA && !(IGB=y && DCA=m)
2016 ---help---
2017 Say Y here if you want to use Direct Cache Access (DCA) in the
2018 driver. DCA is a method for warming the CPU cache before data
2019 is used, with the intent of lessening the impact of cache misses.
2016 2020
2017source "drivers/net/ixp2000/Kconfig" 2021source "drivers/net/ixp2000/Kconfig"
2018 2022
@@ -2437,9 +2441,13 @@ config IXGBE
2437 will be called ixgbe. 2441 will be called ixgbe.
2438 2442
2439config IXGBE_DCA 2443config IXGBE_DCA
2440 bool 2444 bool "Direct Cache Access (DCA) Support"
2441 default y 2445 default y
2442 depends on IXGBE && DCA && !(IXGBE=y && DCA=m) 2446 depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
2447 ---help---
2448 Say Y here if you want to use Direct Cache Access (DCA) in the
2449 driver. DCA is a method for warming the CPU cache before data
2450 is used, with the intent of lessening the impact of cache misses.
2443 2451
2444config IXGB 2452config IXGB
2445 tristate "Intel(R) PRO/10GbE support" 2453 tristate "Intel(R) PRO/10GbE support"
@@ -2489,9 +2497,13 @@ config MYRI10GE
2489 will be called myri10ge. 2497 will be called myri10ge.
2490 2498
2491config MYRI10GE_DCA 2499config MYRI10GE_DCA
2492 bool 2500 bool "Direct Cache Access (DCA) Support"
2493 default y 2501 default y
2494 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m) 2502 depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
2503 ---help---
2504 Say Y here if you want to use Direct Cache Access (DCA) in the
2505 driver. DCA is a method for warming the CPU cache before data
2506 is used, with the intent of lessening the impact of cache misses.
2495 2507
2496config NETXEN_NIC 2508config NETXEN_NIC
2497 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" 2509 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index b645fa0f3f64..c49550d507a0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -46,7 +46,6 @@
46#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
47#include <linux/pagemap.h> 47#include <linux/pagemap.h>
48#include <linux/tcp.h> 48#include <linux/tcp.h>
49#include <linux/mii.h>
50#include <linux/ethtool.h> 49#include <linux/ethtool.h>
51#include <linux/if_vlan.h> 50#include <linux/if_vlan.h>
52#include <linux/workqueue.h> 51#include <linux/workqueue.h>
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 130927cfc75b..a6c0b3abba29 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -564,14 +564,15 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
564 564
565static void bnx2x_init_pxp(struct bnx2x *bp) 565static void bnx2x_init_pxp(struct bnx2x *bp)
566{ 566{
567 u16 devctl;
567 int r_order, w_order; 568 int r_order, w_order;
568 u32 val, i; 569 u32 val, i;
569 570
570 pci_read_config_word(bp->pdev, 571 pci_read_config_word(bp->pdev,
571 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); 572 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
572 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val); 573 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
573 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 574 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
574 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); 575 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
575 576
576 if (r_order > MAX_RD_ORD) { 577 if (r_order > MAX_RD_ORD) {
577 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", 578 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fce745148ff9..600210d7eff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.22" 62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/09/09" 63#define DRV_MODULE_RELDATE "2008/11/03"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -6481,6 +6481,7 @@ load_int_disable:
6481 bnx2x_free_irq(bp); 6481 bnx2x_free_irq(bp);
6482load_error: 6482load_error:
6483 bnx2x_free_mem(bp); 6483 bnx2x_free_mem(bp);
6484 bp->port.pmf = 0;
6484 6485
6485 /* TBD we really need to reset the chip 6486 /* TBD we really need to reset the chip
6486 if we want to recover from this */ 6487 if we want to recover from this */
@@ -6791,6 +6792,7 @@ unload_error:
6791 /* Report UNLOAD_DONE to MCP */ 6792 /* Report UNLOAD_DONE to MCP */
6792 if (!BP_NOMCP(bp)) 6793 if (!BP_NOMCP(bp))
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 6794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6795 bp->port.pmf = 0;
6794 6796
6795 /* Free SKBs, SGEs, TPA pool and driver internals */ 6797 /* Free SKBs, SGEs, TPA pool and driver internals */
6796 bnx2x_free_skbs(bp); 6798 bnx2x_free_skbs(bp);
@@ -10204,8 +10206,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10204 return -ENOMEM; 10206 return -ENOMEM;
10205 } 10207 }
10206 10208
10207 netif_carrier_off(dev);
10208
10209 bp = netdev_priv(dev); 10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug; 10210 bp->msglevel = debug;
10211 10211
@@ -10229,6 +10229,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10229 goto init_one_exit; 10229 goto init_one_exit;
10230 } 10230 }
10231 10231
10232 netif_carrier_off(dev);
10233
10232 bp->common.name = board_info[ent->driver_data].name; 10234 bp->common.name = board_info[ent->driver_data].name;
10233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10234 " IRQ %d, ", dev->name, bp->common.name, 10236 " IRQ %d, ", dev->name, bp->common.name,
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb51c1fb0338..a6f49d025787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1099,7 +1099,9 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1099 ndev->stop = fs_enet_close; 1099 ndev->stop = fs_enet_close;
1100 ndev->get_stats = fs_enet_get_stats; 1100 ndev->get_stats = fs_enet_get_stats;
1101 ndev->set_multicast_list = fs_set_multicast_list; 1101 ndev->set_multicast_list = fs_set_multicast_list;
1102 1102#ifdef CONFIG_NET_POLL_CONTROLLER
1103 ndev->poll_controller = fs_enet_netpoll;
1104#endif
1103 if (fpi->use_napi) 1105 if (fpi->use_napi)
1104 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1106 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1105 fpi->napi_weight); 1107 fpi->napi_weight);
@@ -1209,7 +1211,7 @@ static void __exit fs_cleanup(void)
1209static void fs_enet_netpoll(struct net_device *dev) 1211static void fs_enet_netpoll(struct net_device *dev)
1210{ 1212{
1211 disable_irq(dev->irq); 1213 disable_irq(dev->irq);
1212 fs_enet_interrupt(dev->irq, dev, NULL); 1214 fs_enet_interrupt(dev->irq, dev);
1213 enable_irq(dev->irq); 1215 enable_irq(dev->irq);
1214} 1216}
1215#endif 1217#endif
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a9c8c08044b1..b9dcdbd369f8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1066,9 +1066,12 @@ static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1066 return 0; 1066 return 0;
1067 } 1067 }
1068 1068
1069 if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1069 if (!smi_is_done(msp)) {
1070 msecs_to_jiffies(100))) 1070 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1071 return -ETIMEDOUT; 1071 msecs_to_jiffies(100));
1072 if (!smi_is_done(msp))
1073 return -ETIMEDOUT;
1074 }
1072 1075
1073 return 0; 1076 return 0;
1074} 1077}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ebc812702903..9acb5d70a3ae 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -8667,7 +8667,6 @@ static void __devinit niu_device_announce(struct niu *np)
8667static int __devinit niu_pci_init_one(struct pci_dev *pdev, 8667static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8668 const struct pci_device_id *ent) 8668 const struct pci_device_id *ent)
8669{ 8669{
8670 unsigned long niureg_base, niureg_len;
8671 union niu_parent_id parent_id; 8670 union niu_parent_id parent_id;
8672 struct net_device *dev; 8671 struct net_device *dev;
8673 struct niu *np; 8672 struct niu *np;
@@ -8758,10 +8757,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
8758 8757
8759 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 8758 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
8760 8759
8761 niureg_base = pci_resource_start(pdev, 0); 8760 np->regs = pci_ioremap_bar(pdev, 0);
8762 niureg_len = pci_resource_len(pdev, 0);
8763
8764 np->regs = ioremap_nocache(niureg_base, niureg_len);
8765 if (!np->regs) { 8761 if (!np->regs) {
8766 dev_err(&pdev->dev, PFX "Cannot map device registers, " 8762 dev_err(&pdev->dev, PFX "Cannot map device registers, "
8767 "aborting.\n"); 8763 "aborting.\n");
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index f59c7772f344..5051554ff05b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
499#else 499#else
500 SMC_PUSH_DATA(lp, buf, len); 500 SMC_PUSH_DATA(lp, buf, len);
501 dev->trans_start = jiffies; 501 dev->trans_start = jiffies;
502 dev_kfree_skb(skb); 502 dev_kfree_skb_irq(skb);
503#endif 503#endif
504 if (!lp->tx_throttle) { 504 if (!lp->tx_throttle) {
505 netif_wake_queue(dev); 505 netif_wake_queue(dev);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 6f9895d4e5bd..fc80f250da31 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2060,6 +2060,7 @@ static int smc_request_attrib(struct platform_device *pdev,
2060 struct net_device *ndev) 2060 struct net_device *ndev)
2061{ 2061{
2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2062 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2063 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2063 2064
2064 if (!res) 2065 if (!res)
2065 return 0; 2066 return 0;
@@ -2074,6 +2075,7 @@ static void smc_release_attrib(struct platform_device *pdev,
2074 struct net_device *ndev) 2075 struct net_device *ndev)
2075{ 2076{
2076 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2077 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2078 struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2077 2079
2078 if (res) 2080 if (res)
2079 release_mem_region(res->start, ATTRIB_SIZE); 2081 release_mem_region(res->start, ATTRIB_SIZE);
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index cfbbfee55836..85f38a6b6a49 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -37,7 +37,6 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/types.h> 39#include <asm/types.h>
40#include <asm/uaccess.h>
41 40
42#include "ucc_geth.h" 41#include "ucc_geth.h"
43#include "ucc_geth_mii.h" 42#include "ucc_geth_mii.h"
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 9e47d727e220..cfd4d052d666 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -2942,8 +2942,10 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2942 sc->opmode != NL80211_IFTYPE_MESH_POINT && 2942 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2943 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2944 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == NL80211_IFTYPE_ADHOC) 2945 if (sc->opmode == NL80211_IFTYPE_STATION ||
2946 sc->opmode == NL80211_IFTYPE_ADHOC) {
2946 rfilt |= AR5K_RX_FILTER_BEACON; 2947 rfilt |= AR5K_RX_FILTER_BEACON;
2948 }
2947 2949
2948 /* Set filters */ 2950 /* Set filters */
2949 ath5k_hw_set_rx_filter(ah,rfilt); 2951 ath5k_hw_set_rx_filter(ah,rfilt);
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index dd1374052ba9..5e362a7a3620 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -531,10 +531,10 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); 531 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 532 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); 533 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
534 rs->rs_antenna = rx_status->rx_status_0 & 534 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA; 535 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
536 rs->rs_more = rx_status->rx_status_0 & 536 rs->rs_more = !!(rx_status->rx_status_0 &
537 AR5K_5210_RX_DESC_STATUS0_MORE; 537 AR5K_5210_RX_DESC_STATUS0_MORE);
538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ 538 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 539 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 540 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
@@ -607,10 +607,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); 607 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 608 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); 609 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
610 rs->rs_antenna = rx_status->rx_status_0 & 610 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA; 611 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
612 rs->rs_more = rx_status->rx_status_0 & 612 rs->rs_more = !!(rx_status->rx_status_0 &
613 AR5K_5212_RX_DESC_STATUS0_MORE; 613 AR5K_5212_RX_DESC_STATUS0_MORE);
614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 614 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 615 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
616 rs->rs_status = 0; 616 rs->rs_status = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 321dbc8c034a..8d690a0eb1a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3252,7 +3252,11 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
3252 return; 3252 return;
3253 } 3253 }
3254 3254
3255 iwl_scan_cancel_timeout(priv, 100); 3255 if (iwl_scan_cancel(priv)) {
3256 /* cancel scan failed, just live w/ bad key and rely
3257 briefly on SW decryption */
3258 return;
3259 }
3256 3260
3257 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); 3261 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3258 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 3262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 3b0bee331a33..c89365e2ca58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -896,6 +896,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
896 return; 896 return;
897 897
898 done: 898 done:
899 /* Cannot perform scan. Make sure we clear scanning
900 * bits from status so next scan request can be performed.
901 * If we don't clear scanning status bit here all next scan
902 * will fail
903 */
904 clear_bit(STATUS_SCAN_HW, &priv->status);
905 clear_bit(STATUS_SCANNING, &priv->status);
899 /* inform mac80211 scan aborted */ 906 /* inform mac80211 scan aborted */
900 queue_work(priv->workqueue, &priv->scan_completed); 907 queue_work(priv->workqueue, &priv->scan_completed);
901 mutex_unlock(&priv->mutex); 908 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d15a2c997954..285b53e7e261 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -5768,7 +5768,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5768 if (priv->error_recovering) 5768 if (priv->error_recovering)
5769 iwl3945_error_recovery(priv); 5769 iwl3945_error_recovery(priv);
5770 5770
5771 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
5772 return; 5771 return;
5773 5772
5774 restart: 5773 restart:
@@ -6013,6 +6012,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
6013 mutex_lock(&priv->mutex); 6012 mutex_lock(&priv->mutex);
6014 iwl3945_alive_start(priv); 6013 iwl3945_alive_start(priv);
6015 mutex_unlock(&priv->mutex); 6014 mutex_unlock(&priv->mutex);
6015 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
6016} 6016}
6017 6017
6018static void iwl3945_bg_rf_kill(struct work_struct *work) 6018static void iwl3945_bg_rf_kill(struct work_struct *work)
@@ -6256,6 +6256,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6256 n_probes, 6256 n_probes,
6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6258 6258
6259 if (scan->channel_count == 0) {
6260 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
6261 goto done;
6262 }
6263
6259 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6264 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6260 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6265 scan->channel_count * sizeof(struct iwl3945_scan_channel);
6261 cmd.data = scan; 6266 cmd.data = scan;
@@ -6273,6 +6278,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6273 return; 6278 return;
6274 6279
6275 done: 6280 done:
6281 /* can not perform scan make sure we clear scanning
6282 * bits from status so next scan request can be performed.
6283 * if we dont clear scanning status bit here all next scan
6284 * will fail
6285 */
6286 clear_bit(STATUS_SCAN_HW, &priv->status);
6287 clear_bit(STATUS_SCANNING, &priv->status);
6288
6276 /* inform mac80211 scan aborted */ 6289 /* inform mac80211 scan aborted */
6277 queue_work(priv->workqueue, &priv->scan_completed); 6290 queue_work(priv->workqueue, &priv->scan_completed);
6278 mutex_unlock(&priv->mutex); 6291 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a60ae86bd5c9..a3ccd8c1c716 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
62 /* ZD1211B */ 62 /* ZD1211B */
63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 63 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 65 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
@@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
82 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, 83 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
83 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
84 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
85 /* "Driverless" devices that need ejecting */ 87 /* "Driverless" devices that need ejecting */
86 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
87 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index d962ba0dd87a..191a3202cecc 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -105,7 +105,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
105 if (!capable(CAP_SYS_ADMIN)) 105 if (!capable(CAP_SYS_ADMIN))
106 return -EPERM; 106 return -EPERM;
107 107
108 if (test_and_set_bit(0, &buffer_opened)) 108 if (test_and_set_bit_lock(0, &buffer_opened))
109 return -EBUSY; 109 return -EBUSY;
110 110
111 /* Register as a user of dcookies 111 /* Register as a user of dcookies
@@ -129,7 +129,7 @@ static int event_buffer_open(struct inode *inode, struct file *file)
129fail: 129fail:
130 dcookie_unregister(file->private_data); 130 dcookie_unregister(file->private_data);
131out: 131out:
132 clear_bit(0, &buffer_opened); 132 __clear_bit_unlock(0, &buffer_opened);
133 return err; 133 return err;
134} 134}
135 135
@@ -141,7 +141,7 @@ static int event_buffer_release(struct inode *inode, struct file *file)
141 dcookie_unregister(file->private_data); 141 dcookie_unregister(file->private_data);
142 buffer_pos = 0; 142 buffer_pos = 0;
143 atomic_set(&buffer_ready, 0); 143 atomic_set(&buffer_ready, 0);
144 clear_bit(0, &buffer_opened); 144 __clear_bit_unlock(0, &buffer_opened);
145 return 0; 145 return 0;
146} 146}
147 147
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 110022d78689..5d72866897a8 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -575,7 +575,7 @@ static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct
575 575
576 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 576 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
577 start = vma->vm_pgoff; 577 start = vma->vm_pgoff;
578 size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; 578 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
579 if (start < size && size - start >= nr) 579 if (start < size && size - start >= nr)
580 return 1; 580 return 1;
581 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", 581 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbf66ea8fd87..5049a47030ac 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1692,24 +1692,24 @@ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
1692 } 1692 }
1693} 1693}
1694 1694
1695DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1695DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1696 PCI_DEVICE_ID_NX2_5706, 1696 PCI_DEVICE_ID_NX2_5706,
1697 quirk_brcm_570x_limit_vpd); 1697 quirk_brcm_570x_limit_vpd);
1698DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1698DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1699 PCI_DEVICE_ID_NX2_5706S, 1699 PCI_DEVICE_ID_NX2_5706S,
1700 quirk_brcm_570x_limit_vpd); 1700 quirk_brcm_570x_limit_vpd);
1701DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1701DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1702 PCI_DEVICE_ID_NX2_5708, 1702 PCI_DEVICE_ID_NX2_5708,
1703 quirk_brcm_570x_limit_vpd); 1703 quirk_brcm_570x_limit_vpd);
1704DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1704DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1705 PCI_DEVICE_ID_NX2_5708S, 1705 PCI_DEVICE_ID_NX2_5708S,
1706 quirk_brcm_570x_limit_vpd); 1706 quirk_brcm_570x_limit_vpd);
1707DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1707DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1708 PCI_DEVICE_ID_NX2_5709, 1708 PCI_DEVICE_ID_NX2_5709,
1709 quirk_brcm_570x_limit_vpd); 1709 quirk_brcm_570x_limit_vpd);
1710DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 1710DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
1711 PCI_DEVICE_ID_NX2_5709S, 1711 PCI_DEVICE_ID_NX2_5709S,
1712 quirk_brcm_570x_limit_vpd); 1712 quirk_brcm_570x_limit_vpd);
1713 1713
1714#ifdef CONFIG_PCI_MSI 1714#ifdef CONFIG_PCI_MSI
1715/* Some chipsets do not support MSI. We cannot easily rely on setting 1715/* Some chipsets do not support MSI. We cannot easily rely on setting
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 1f5f6143f35c..132a78159b60 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -100,7 +100,8 @@ size_t pci_get_rom_size(void __iomem *rom, size_t size)
100 * pci_map_rom - map a PCI ROM to kernel space 100 * pci_map_rom - map a PCI ROM to kernel space
101 * @pdev: pointer to pci device struct 101 * @pdev: pointer to pci device struct
102 * @size: pointer to receive size of pci window over ROM 102 * @size: pointer to receive size of pci window over ROM
103 * @return: kernel virtual pointer to image of ROM 103 *
104 * Return: kernel virtual pointer to image of ROM
104 * 105 *
105 * Map a PCI ROM into kernel space. If ROM is boot video ROM, 106 * Map a PCI ROM into kernel space. If ROM is boot video ROM,
106 * the shadow BIOS copy will be returned instead of the 107 * the shadow BIOS copy will be returned instead of the
@@ -167,7 +168,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
167 * pci_map_rom_copy - map a PCI ROM to kernel space, create a copy 168 * pci_map_rom_copy - map a PCI ROM to kernel space, create a copy
168 * @pdev: pointer to pci device struct 169 * @pdev: pointer to pci device struct
169 * @size: pointer to receive size of pci window over ROM 170 * @size: pointer to receive size of pci window over ROM
170 * @return: kernel virtual pointer to image of ROM 171 *
172 * Return: kernel virtual pointer to image of ROM
171 * 173 *
172 * Map a PCI ROM into kernel space. If ROM is boot video ROM, 174 * Map a PCI ROM into kernel space. If ROM is boot video ROM,
173 * the shadow BIOS copy will be returned instead of the 175 * the shadow BIOS copy will be returned instead of the
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 478a4a739c00..c3f1c8e9d254 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/pnp.h>
16#include <linux/stat.h> 15#include <linux/stat.h>
17#include <linux/ctype.h> 16#include <linux/ctype.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 85edf945ab86..204158cf7a55 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/smp.h>
25#include <asm/time.h> 26#include <asm/time.h>
26#include <asm/ps3.h> 27#include <asm/ps3.h>
27#include <asm/lv1call.h> 28#include <asm/lv1call.h>
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 4dada6ee1119..39360e2a4540 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1,6 +1,4 @@
1menu "Voltage and Current regulators" 1menuconfig REGULATOR
2
3config REGULATOR
4 bool "Voltage and Current Regulator Support" 2 bool "Voltage and Current Regulator Support"
5 default n 3 default n
6 help 4 help
@@ -23,21 +21,20 @@ config REGULATOR
23 21
24 If unsure, say no. 22 If unsure, say no.
25 23
24if REGULATOR
25
26config REGULATOR_DEBUG 26config REGULATOR_DEBUG
27 bool "Regulator debug support" 27 bool "Regulator debug support"
28 depends on REGULATOR
29 help 28 help
30 Say yes here to enable debugging support. 29 Say yes here to enable debugging support.
31 30
32config REGULATOR_FIXED_VOLTAGE 31config REGULATOR_FIXED_VOLTAGE
33 tristate 32 tristate
34 default n 33 default n
35 select REGULATOR
36 34
37config REGULATOR_VIRTUAL_CONSUMER 35config REGULATOR_VIRTUAL_CONSUMER
38 tristate "Virtual regulator consumer support" 36 tristate "Virtual regulator consumer support"
39 default n 37 default n
40 select REGULATOR
41 help 38 help
42 This driver provides a virtual consumer for the voltage and 39 This driver provides a virtual consumer for the voltage and
43 current regulator API which provides sysfs controls for 40 current regulator API which provides sysfs controls for
@@ -49,7 +46,6 @@ config REGULATOR_VIRTUAL_CONSUMER
49config REGULATOR_BQ24022 46config REGULATOR_BQ24022
50 tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC" 47 tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
51 default n 48 default n
52 select REGULATOR
53 help 49 help
54 This driver controls a TI bq24022 Charger attached via 50 This driver controls a TI bq24022 Charger attached via
55 GPIOs. The provided current regulator can enable/disable 51 GPIOs. The provided current regulator can enable/disable
@@ -59,7 +55,6 @@ config REGULATOR_BQ24022
59config REGULATOR_WM8350 55config REGULATOR_WM8350
60 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC" 56 tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC"
61 depends on MFD_WM8350 57 depends on MFD_WM8350
62 select REGULATOR
63 help 58 help
64 This driver provides support for the voltage and current regulators 59 This driver provides support for the voltage and current regulators
65 of the WM8350 AudioPlus PMIC. 60 of the WM8350 AudioPlus PMIC.
@@ -67,7 +62,6 @@ config REGULATOR_WM8350
67config REGULATOR_WM8400 62config REGULATOR_WM8400
68 tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC" 63 tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC"
69 depends on MFD_WM8400 64 depends on MFD_WM8400
70 select REGULATOR
71 help 65 help
72 This driver provides support for the voltage regulators of the 66 This driver provides support for the voltage regulators of the
73 WM8400 AudioPlus PMIC. 67 WM8400 AudioPlus PMIC.
@@ -75,9 +69,8 @@ config REGULATOR_WM8400
75config REGULATOR_DA903X 69config REGULATOR_DA903X
76 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC" 70 tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC"
77 depends on PMIC_DA903X 71 depends on PMIC_DA903X
78 select REGULATOR
79 help 72 help
80 Say y here to support the BUCKs and LDOs regulators found on 73 Say y here to support the BUCKs and LDOs regulators found on
81 Dialog Semiconductor DA9030/DA9034 PMIC. 74 Dialog Semiconductor DA9030/DA9034 PMIC.
82 75
83endmenu 76endif
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 7af60b98d8a4..a04c1b6b1575 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -271,7 +271,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
271 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); 271 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
272 do { 272 do {
273 alarm->time.tm_year++; 273 alarm->time.tm_year++;
274 } while (!rtc_valid_tm(&alarm->time)); 274 } while (rtc_valid_tm(&alarm->time) != 0);
275 break; 275 break;
276 276
277 default: 277 default:
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5549231179a2..6cf8e282338f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -794,7 +794,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
794 goto cleanup2; 794 goto cleanup2;
795 } 795 }
796 796
797 pr_info("%s: alarms up to one %s%s, %zd bytes nvram, %s irqs\n", 797 pr_info("%s: alarms up to one %s%s, %zd bytes nvram%s\n",
798 cmos_rtc.rtc->dev.bus_id, 798 cmos_rtc.rtc->dev.bus_id,
799 is_valid_irq(rtc_irq) 799 is_valid_irq(rtc_irq)
800 ? (cmos_rtc.mon_alrm 800 ? (cmos_rtc.mon_alrm
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 2bec9ccc0293..a9a9893a5f95 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -36,7 +36,6 @@
36#include <linux/poll.h> 36#include <linux/poll.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/smp_lock.h>
40#include <linux/genhd.h> 39#include <linux/genhd.h>
41#include <linux/blkdev.h> 40#include <linux/blkdev.h>
42 41
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 61fb8b6d19af..d5efd6c77904 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1258,6 +1258,8 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
1258 atmel_port->clk = clk_get(&pdev->dev, "usart"); 1258 atmel_port->clk = clk_get(&pdev->dev, "usart");
1259 clk_enable(atmel_port->clk); 1259 clk_enable(atmel_port->clk);
1260 port->uartclk = clk_get_rate(atmel_port->clk); 1260 port->uartclk = clk_get_rate(atmel_port->clk);
1261 clk_disable(atmel_port->clk);
1262 /* only enable clock when USART is in use */
1261 } 1263 }
1262 1264
1263 atmel_port->use_dma_rx = data->use_dma_rx; 1265 atmel_port->use_dma_rx = data->use_dma_rx;
@@ -1379,6 +1381,8 @@ static int __init atmel_console_setup(struct console *co, char *options)
1379 return -ENODEV; 1381 return -ENODEV;
1380 } 1382 }
1381 1383
1384 clk_enable(atmel_ports[co->index].clk);
1385
1382 UART_PUT_IDR(port, -1); 1386 UART_PUT_IDR(port, -1);
1383 UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1387 UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1384 UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); 1388 UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
@@ -1403,7 +1407,7 @@ static struct console atmel_console = {
1403 .data = &atmel_uart, 1407 .data = &atmel_uart,
1404}; 1408};
1405 1409
1406#define ATMEL_CONSOLE_DEVICE &atmel_console 1410#define ATMEL_CONSOLE_DEVICE (&atmel_console)
1407 1411
1408/* 1412/*
1409 * Early console initialization (before VM subsystem initialized). 1413 * Early console initialization (before VM subsystem initialized).
@@ -1534,6 +1538,15 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1534 if (ret) 1538 if (ret)
1535 goto err_add_port; 1539 goto err_add_port;
1536 1540
1541 if (atmel_is_console_port(&port->uart)
1542 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
1543 /*
1544 * The serial core enabled the clock for us, so undo
1545 * the clk_enable() in atmel_console_setup()
1546 */
1547 clk_disable(port->clk);
1548 }
1549
1537 device_init_wakeup(&pdev->dev, 1); 1550 device_init_wakeup(&pdev->dev, 1);
1538 platform_set_drvdata(pdev, port); 1551 platform_set_drvdata(pdev, port);
1539 1552
@@ -1544,7 +1557,6 @@ err_add_port:
1544 port->rx_ring.buf = NULL; 1557 port->rx_ring.buf = NULL;
1545err_alloc_ring: 1558err_alloc_ring:
1546 if (!atmel_is_console_port(&port->uart)) { 1559 if (!atmel_is_console_port(&port->uart)) {
1547 clk_disable(port->clk);
1548 clk_put(port->clk); 1560 clk_put(port->clk);
1549 port->clk = NULL; 1561 port->clk = NULL;
1550 } 1562 }
@@ -1568,7 +1580,6 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
1568 1580
1569 /* "port" is allocated statically, so we shouldn't free it */ 1581 /* "port" is allocated statically, so we shouldn't free it */
1570 1582
1571 clk_disable(atmel_port->clk);
1572 clk_put(atmel_port->clk); 1583 clk_put(atmel_port->clk);
1573 1584
1574 return ret; 1585 return ret;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e1654f59eb70..c95b286a1239 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -21,7 +21,23 @@ menuconfig STAGING
21 21
22 If in doubt, say N here. 22 If in doubt, say N here.
23 23
24if STAGING 24
25config STAGING_EXCLUDE_BUILD
26 bool "Exclude Staging drivers from being built" if STAGING
27 default y
28 ---help---
29 Are you sure you really want to build the staging drivers?
30 They taint your kernel, don't live up to the normal Linux
31 kernel quality standards, are a bit crufty around the edges,
32 and might go off and kick your dog when you aren't paying
33 attention.
34
35 Say N here to be able to select and build the Staging drivers.
36 This option is primarily here to prevent them from being built
37 when selecting 'make allyesconfg' and 'make allmodconfig' so
38 don't be all that put off, your dog will be just fine.
39
40if !STAGING_EXCLUDE_BUILD
25 41
26source "drivers/staging/et131x/Kconfig" 42source "drivers/staging/et131x/Kconfig"
27 43
@@ -45,4 +61,4 @@ source "drivers/staging/at76_usb/Kconfig"
45 61
46source "drivers/staging/poch/Kconfig" 62source "drivers/staging/poch/Kconfig"
47 63
48endif # STAGING 64endif # !STAGING_EXCLUDE_BUILD
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index b8f2c5e9dee5..fd4007e329e7 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -106,7 +106,6 @@
106 106
107#include <linux/kernel.h> /* We're doing kernel work */ 107#include <linux/kernel.h> /* We're doing kernel work */
108#include <linux/module.h> 108#include <linux/module.h>
109#include <linux/kernel.h>
110#include <linux/slab.h> 109#include <linux/slab.h>
111 110
112#include "bit_operations.h" 111#include "bit_operations.h"
diff --git a/drivers/staging/me4000/me4000.c b/drivers/staging/me4000/me4000.c
index cf8b01bcac8d..0394e2709278 100644
--- a/drivers/staging/me4000/me4000.c
+++ b/drivers/staging/me4000/me4000.c
@@ -39,7 +39,6 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/uaccess.h>
43 42
44/* Include-File for the Meilhaus ME-4000 I/O board */ 43/* Include-File for the Meilhaus ME-4000 I/O board */
45#include "me4000.h" 44#include "me4000.h"
diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig
index 7426235ccc44..217fb7e62c2f 100644
--- a/drivers/staging/usbip/Kconfig
+++ b/drivers/staging/usbip/Kconfig
@@ -1,6 +1,6 @@
1config USB_IP_COMMON 1config USB_IP_COMMON
2 tristate "USB IP support (EXPERIMENTAL)" 2 tristate "USB IP support (EXPERIMENTAL)"
3 depends on USB && EXPERIMENTAL 3 depends on USB && NET && EXPERIMENTAL
4 default N 4 default N
5 ---help--- 5 ---help---
6 This enables pushing USB packets over IP to allow remote 6 This enables pushing USB packets over IP to allow remote
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0f13448c6f79..3f3ce13fef43 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2083,6 +2083,38 @@ config FB_METRONOME
2083 controller. The pre-release name for this device was 8track 2083 controller. The pre-release name for this device was 8track
2084 and could also have been called by some vendors as PVI-nnnn. 2084 and could also have been called by some vendors as PVI-nnnn.
2085 2085
2086config FB_MB862XX
2087 tristate "Fujitsu MB862xx GDC support"
2088 depends on FB
2089 select FB_CFB_FILLRECT
2090 select FB_CFB_COPYAREA
2091 select FB_CFB_IMAGEBLIT
2092 ---help---
2093 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2094
2095config FB_MB862XX_PCI_GDC
2096 bool "Carmine/Coral-P(A) GDC"
2097 depends on PCI && FB_MB862XX
2098 ---help---
2099 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2100 PCI graphics controller devices.
2101
2102config FB_MB862XX_LIME
2103 bool "Lime GDC"
2104 depends on FB_MB862XX
2105 depends on OF && !FB_MB862XX_PCI_GDC
2106 select FB_FOREIGN_ENDIAN
2107 select FB_LITTLE_ENDIAN
2108 ---help---
2109 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2110
2111config FB_PRE_INIT_FB
2112 bool "Don't reinitialize, use bootloader's GDC/Display configuration"
2113 depends on FB_MB862XX_LIME
2114 ---help---
2115 Select this option if display contents should be inherited as set by
2116 the bootloader.
2117
2086source "drivers/video/omap/Kconfig" 2118source "drivers/video/omap/Kconfig"
2087 2119
2088source "drivers/video/backlight/Kconfig" 2120source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 248bddc8d0b0..e39e33e797da 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
122obj-$(CONFIG_FB_OMAP) += omap/ 122obj-$(CONFIG_FB_OMAP) += omap/
123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
124obj-$(CONFIG_FB_CARMINE) += carminefb.o 124obj-$(CONFIG_FB_CARMINE) += carminefb.o
125obj-$(CONFIG_FB_MB862XX) += mb862xx/
125 126
126# Platform or fallback drivers go here 127# Platform or fallback drivers go here
127obj-$(CONFIG_FB_UVESA) += uvesafb.o 128obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 6048b55f2878..1d5ae39cb271 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1002,13 +1002,9 @@ fb_blank(struct fb_info *info, int blank)
1002 return ret; 1002 return ret;
1003} 1003}
1004 1004
1005static long 1005static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1006fb_ioctl(struct file *file, unsigned int cmd, 1006 unsigned long arg)
1007 unsigned long arg)
1008{ 1007{
1009 struct inode *inode = file->f_path.dentry->d_inode;
1010 int fbidx = iminor(inode);
1011 struct fb_info *info;
1012 struct fb_ops *fb; 1008 struct fb_ops *fb;
1013 struct fb_var_screeninfo var; 1009 struct fb_var_screeninfo var;
1014 struct fb_fix_screeninfo fix; 1010 struct fb_fix_screeninfo fix;
@@ -1018,14 +1014,10 @@ fb_ioctl(struct file *file, unsigned int cmd,
1018 void __user *argp = (void __user *)arg; 1014 void __user *argp = (void __user *)arg;
1019 long ret = 0; 1015 long ret = 0;
1020 1016
1021 info = registered_fb[fbidx];
1022 mutex_lock(&info->lock);
1023 fb = info->fbops; 1017 fb = info->fbops;
1024 1018 if (!fb)
1025 if (!fb) {
1026 mutex_unlock(&info->lock);
1027 return -ENODEV; 1019 return -ENODEV;
1028 } 1020
1029 switch (cmd) { 1021 switch (cmd) {
1030 case FBIOGET_VSCREENINFO: 1022 case FBIOGET_VSCREENINFO:
1031 ret = copy_to_user(argp, &info->var, 1023 ret = copy_to_user(argp, &info->var,
@@ -1126,6 +1118,21 @@ fb_ioctl(struct file *file, unsigned int cmd,
1126 else 1118 else
1127 ret = fb->fb_ioctl(info, cmd, arg); 1119 ret = fb->fb_ioctl(info, cmd, arg);
1128 } 1120 }
1121 return ret;
1122}
1123
1124static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1125__acquires(&info->lock)
1126__releases(&info->lock)
1127{
1128 struct inode *inode = file->f_path.dentry->d_inode;
1129 int fbidx = iminor(inode);
1130 struct fb_info *info;
1131 long ret;
1132
1133 info = registered_fb[fbidx];
1134 mutex_lock(&info->lock);
1135 ret = do_fb_ioctl(info, cmd, arg);
1129 mutex_unlock(&info->lock); 1136 mutex_unlock(&info->lock);
1130 return ret; 1137 return ret;
1131} 1138}
@@ -1157,8 +1164,8 @@ struct fb_cmap32 {
1157 compat_caddr_t transp; 1164 compat_caddr_t transp;
1158}; 1165};
1159 1166
1160static int fb_getput_cmap(struct inode *inode, struct file *file, 1167static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
1161 unsigned int cmd, unsigned long arg) 1168 unsigned long arg)
1162{ 1169{
1163 struct fb_cmap_user __user *cmap; 1170 struct fb_cmap_user __user *cmap;
1164 struct fb_cmap32 __user *cmap32; 1171 struct fb_cmap32 __user *cmap32;
@@ -1181,7 +1188,7 @@ static int fb_getput_cmap(struct inode *inode, struct file *file,
1181 put_user(compat_ptr(data), &cmap->transp)) 1188 put_user(compat_ptr(data), &cmap->transp))
1182 return -EFAULT; 1189 return -EFAULT;
1183 1190
1184 err = fb_ioctl(file, cmd, (unsigned long) cmap); 1191 err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
1185 1192
1186 if (!err) { 1193 if (!err) {
1187 if (copy_in_user(&cmap32->start, 1194 if (copy_in_user(&cmap32->start,
@@ -1223,8 +1230,8 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
1223 return err; 1230 return err;
1224} 1231}
1225 1232
1226static int fb_get_fscreeninfo(struct inode *inode, struct file *file, 1233static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1227 unsigned int cmd, unsigned long arg) 1234 unsigned long arg)
1228{ 1235{
1229 mm_segment_t old_fs; 1236 mm_segment_t old_fs;
1230 struct fb_fix_screeninfo fix; 1237 struct fb_fix_screeninfo fix;
@@ -1235,7 +1242,7 @@ static int fb_get_fscreeninfo(struct inode *inode, struct file *file,
1235 1242
1236 old_fs = get_fs(); 1243 old_fs = get_fs();
1237 set_fs(KERNEL_DS); 1244 set_fs(KERNEL_DS);
1238 err = fb_ioctl(file, cmd, (unsigned long) &fix); 1245 err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
1239 set_fs(old_fs); 1246 set_fs(old_fs);
1240 1247
1241 if (!err) 1248 if (!err)
@@ -1244,8 +1251,10 @@ static int fb_get_fscreeninfo(struct inode *inode, struct file *file,
1244 return err; 1251 return err;
1245} 1252}
1246 1253
1247static long 1254static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1248fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1255 unsigned long arg)
1256__acquires(&info->lock)
1257__releases(&info->lock)
1249{ 1258{
1250 struct inode *inode = file->f_path.dentry->d_inode; 1259 struct inode *inode = file->f_path.dentry->d_inode;
1251 int fbidx = iminor(inode); 1260 int fbidx = iminor(inode);
@@ -1262,16 +1271,16 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1262 case FBIOPUT_CON2FBMAP: 1271 case FBIOPUT_CON2FBMAP:
1263 arg = (unsigned long) compat_ptr(arg); 1272 arg = (unsigned long) compat_ptr(arg);
1264 case FBIOBLANK: 1273 case FBIOBLANK:
1265 mutex_unlock(&info->lock); 1274 ret = do_fb_ioctl(info, cmd, arg);
1266 return fb_ioctl(file, cmd, arg); 1275 break;
1267 1276
1268 case FBIOGET_FSCREENINFO: 1277 case FBIOGET_FSCREENINFO:
1269 ret = fb_get_fscreeninfo(inode, file, cmd, arg); 1278 ret = fb_get_fscreeninfo(info, cmd, arg);
1270 break; 1279 break;
1271 1280
1272 case FBIOGETCMAP: 1281 case FBIOGETCMAP:
1273 case FBIOPUTCMAP: 1282 case FBIOPUTCMAP:
1274 ret = fb_getput_cmap(inode, file, cmd, arg); 1283 ret = fb_getput_cmap(info, cmd, arg);
1275 break; 1284 break;
1276 1285
1277 default: 1286 default:
@@ -1286,6 +1295,8 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1286 1295
1287static int 1296static int
1288fb_mmap(struct file *file, struct vm_area_struct * vma) 1297fb_mmap(struct file *file, struct vm_area_struct * vma)
1298__acquires(&info->lock)
1299__releases(&info->lock)
1289{ 1300{
1290 int fbidx = iminor(file->f_path.dentry->d_inode); 1301 int fbidx = iminor(file->f_path.dentry->d_inode);
1291 struct fb_info *info = registered_fb[fbidx]; 1302 struct fb_info *info = registered_fb[fbidx];
@@ -1339,6 +1350,8 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1339 1350
1340static int 1351static int
1341fb_open(struct inode *inode, struct file *file) 1352fb_open(struct inode *inode, struct file *file)
1353__acquires(&info->lock)
1354__releases(&info->lock)
1342{ 1355{
1343 int fbidx = iminor(inode); 1356 int fbidx = iminor(inode);
1344 struct fb_info *info; 1357 struct fb_info *info;
@@ -1374,6 +1387,8 @@ out:
1374 1387
1375static int 1388static int
1376fb_release(struct inode *inode, struct file *file) 1389fb_release(struct inode *inode, struct file *file)
1390__acquires(&info->lock)
1391__releases(&info->lock)
1377{ 1392{
1378 struct fb_info * const info = file->private_data; 1393 struct fb_info * const info = file->private_data;
1379 1394
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
new file mode 100644
index 000000000000..07664814bb1d
--- /dev/null
+++ b/drivers/video/mb862xx/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the MB862xx framebuffer driver
3#
4
5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o
diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/mb862xx/mb862xx_reg.h
new file mode 100644
index 000000000000..2ba65e118500
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xx_reg.h
@@ -0,0 +1,138 @@
1/*
2 * Fujitsu MB862xx Graphics Controller Registers/Bits
3 */
4
5#ifndef _MB862XX_REG_H
6#define _MB862XX_REG_H
7
8#ifdef MB862XX_MMIO_BOTTOM
9#define MB862XX_MMIO_BASE 0x03fc0000
10#else
11#define MB862XX_MMIO_BASE 0x01fc0000
12#endif
13#define MB862XX_I2C_BASE 0x0000c000
14#define MB862XX_DISP_BASE 0x00010000
15#define MB862XX_CAP_BASE 0x00018000
16#define MB862XX_DRAW_BASE 0x00030000
17#define MB862XX_GEO_BASE 0x00038000
18#define MB862XX_PIO_BASE 0x00038000
19#define MB862XX_MMIO_SIZE 0x40000
20
21/* Host interface/pio registers */
22#define GC_IST 0x00000020
23#define GC_IMASK 0x00000024
24#define GC_SRST 0x0000002c
25#define GC_CCF 0x00000038
26#define GC_CID 0x000000f0
27#define GC_REVISION 0x00000084
28
29#define GC_CCF_CGE_100 0x00000000
30#define GC_CCF_CGE_133 0x00040000
31#define GC_CCF_CGE_166 0x00080000
32#define GC_CCF_COT_100 0x00000000
33#define GC_CCF_COT_133 0x00010000
34#define GC_CID_CNAME_MSK 0x0000ff00
35#define GC_CID_VERSION_MSK 0x000000ff
36
37/* define enabled interrupts hereby */
38#define GC_INT_EN 0x00000000
39
40/* Memory interface mode register */
41#define GC_MMR 0x0000fffc
42
43/* Display Controller registers */
44#define GC_DCM0 0x00000000
45#define GC_HTP 0x00000004
46#define GC_HDB_HDP 0x00000008
47#define GC_VSW_HSW_HSP 0x0000000c
48#define GC_VTR 0x00000010
49#define GC_VDP_VSP 0x00000014
50#define GC_WY_WX 0x00000018
51#define GC_WH_WW 0x0000001c
52#define GC_L0M 0x00000020
53#define GC_L0OA0 0x00000024
54#define GC_L0DA0 0x00000028
55#define GC_L0DY_L0DX 0x0000002c
56#define GC_DCM1 0x00000100
57#define GC_L0EM 0x00000110
58#define GC_L0WY_L0WX 0x00000114
59#define GC_L0WH_L0WW 0x00000118
60#define GC_DCM2 0x00000104
61#define GC_DCM3 0x00000108
62#define GC_CPM_CUTC 0x000000a0
63#define GC_CUOA0 0x000000a4
64#define GC_CUY0_CUX0 0x000000a8
65#define GC_CUOA1 0x000000ac
66#define GC_CUY1_CUX1 0x000000b0
67#define GC_L0PAL0 0x00000400
68
69#define GC_CPM_CEN0 0x00100000
70#define GC_CPM_CEN1 0x00200000
71
72#define GC_DCM01_ESY 0x00000004
73#define GC_DCM01_SC 0x00003f00
74#define GC_DCM01_RESV 0x00004000
75#define GC_DCM01_CKS 0x00008000
76#define GC_DCM01_L0E 0x00010000
77#define GC_DCM01_DEN 0x80000000
78#define GC_L0M_L0C_8 0x00000000
79#define GC_L0M_L0C_16 0x80000000
80#define GC_L0EM_L0EC_24 0x40000000
81#define GC_L0M_L0W_UNIT 64
82
83#define GC_DISP_REFCLK_400 400
84
85/* Carmine specific */
86#define MB86297_DRAW_BASE 0x00020000
87#define MB86297_DISP0_BASE 0x00100000
88#define MB86297_DISP1_BASE 0x00140000
89#define MB86297_WRBACK_BASE 0x00180000
90#define MB86297_CAP0_BASE 0x00200000
91#define MB86297_CAP1_BASE 0x00280000
92#define MB86297_DRAMCTRL_BASE 0x00300000
93#define MB86297_CTRL_BASE 0x00400000
94#define MB86297_I2C_BASE 0x00500000
95
96#define GC_CTRL_STATUS 0x00000000
97#define GC_CTRL_INT_MASK 0x00000004
98#define GC_CTRL_CLK_ENABLE 0x0000000c
99#define GC_CTRL_SOFT_RST 0x00000010
100
101#define GC_CTRL_CLK_EN_DRAM 0x00000001
102#define GC_CTRL_CLK_EN_2D3D 0x00000002
103#define GC_CTRL_CLK_EN_DISP0 0x00000020
104#define GC_CTRL_CLK_EN_DISP1 0x00000040
105
106#define GC_2D3D_REV 0x000004b4
107#define GC_RE_REVISION 0x24240200
108
109/* define enabled interrupts hereby */
110#define GC_CARMINE_INT_EN 0x00000004
111
112/* DRAM controller */
113#define GC_DCTL_MODE_ADD 0x00000000
114#define GC_DCTL_SETTIME1_EMODE 0x00000004
115#define GC_DCTL_REFRESH_SETTIME2 0x00000008
116#define GC_DCTL_RSV0_STATES 0x0000000C
117#define GC_DCTL_RSV2_RSV1 0x00000010
118#define GC_DCTL_DDRIF2_DDRIF1 0x00000014
119#define GC_DCTL_IOCONT1_IOCONT0 0x00000024
120
121#define GC_DCTL_STATES_MSK 0x0000000f
122#define GC_DCTL_INIT_WAIT_CNT 3000
123#define GC_DCTL_INIT_WAIT_INTERVAL 1
124
125/* DRAM ctrl values for Carmine PCI Eval. board */
126#define GC_EVB_DCTL_MODE_ADD 0x012105c3
127#define GC_EVB_DCTL_MODE_ADD_AFT_RST 0x002105c3
128#define GC_EVB_DCTL_SETTIME1_EMODE 0x47498000
129#define GC_EVB_DCTL_REFRESH_SETTIME2 0x00422a22
130#define GC_EVB_DCTL_RSV0_STATES 0x00200003
131#define GC_EVB_DCTL_RSV0_STATES_AFT_RST 0x00200002
132#define GC_EVB_DCTL_RSV2_RSV1 0x0000000f
133#define GC_EVB_DCTL_DDRIF2_DDRIF1 0x00556646
134#define GC_EVB_DCTL_IOCONT1_IOCONT0 0x05550555
135
136#define GC_DISP_REFCLK_533 533
137
138#endif
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
new file mode 100644
index 000000000000..38718d95fbb9
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -0,0 +1,1061 @@
1/*
2 * drivers/mb862xx/mb862xxfb.c
3 *
4 * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver
5 *
6 * (C) 2008 Anatolij Gustschin <agust@denx.de>
7 * DENX Software Engineering
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#undef DEBUG
16
17#include <linux/fb.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#if defined(CONFIG_PPC_OF)
23#include <linux/of_platform.h>
24#endif
25#include "mb862xxfb.h"
26#include "mb862xx_reg.h"
27
28#define NR_PALETTE 256
29#define MB862XX_MEM_SIZE 0x1000000
30#define CORALP_MEM_SIZE 0x4000000
31#define CARMINE_MEM_SIZE 0x8000000
32#define DRV_NAME "mb862xxfb"
33
34#if defined(CONFIG_LWMON5)
35static struct mb862xx_gc_mode lwmon5_gc_mode = {
36 /* Mode for Sharp LQ104V1DG61 TFT LCD Panel */
37 { "640x480", 60, 640, 480, 40000, 48, 16, 32, 11, 96, 2, 0, 0, 0 },
38 /* 16 bits/pixel, 32MB, 100MHz, SDRAM memory mode value */
39 16, 0x2000000, GC_CCF_COT_100, 0x414fb7f2
40};
41#endif
42
43#if defined(CONFIG_SOCRATES)
44static struct mb862xx_gc_mode socrates_gc_mode = {
45 /* Mode for Prime View PM070WL4 TFT LCD Panel */
46 { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
47 /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
48 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
49};
50#endif
51
52/* Helpers */
53static inline int h_total(struct fb_var_screeninfo *var)
54{
55 return var->xres + var->left_margin +
56 var->right_margin + var->hsync_len;
57}
58
59static inline int v_total(struct fb_var_screeninfo *var)
60{
61 return var->yres + var->upper_margin +
62 var->lower_margin + var->vsync_len;
63}
64
65static inline int hsp(struct fb_var_screeninfo *var)
66{
67 return var->xres + var->right_margin - 1;
68}
69
70static inline int vsp(struct fb_var_screeninfo *var)
71{
72 return var->yres + var->lower_margin - 1;
73}
74
75static inline int d_pitch(struct fb_var_screeninfo *var)
76{
77 return var->xres * var->bits_per_pixel / 8;
78}
79
80static inline unsigned int chan_to_field(unsigned int chan,
81 struct fb_bitfield *bf)
82{
83 chan &= 0xffff;
84 chan >>= 16 - bf->length;
85 return chan << bf->offset;
86}
87
88static int mb862xxfb_setcolreg(unsigned regno,
89 unsigned red, unsigned green, unsigned blue,
90 unsigned transp, struct fb_info *info)
91{
92 struct mb862xxfb_par *par = info->par;
93 unsigned int val;
94
95 switch (info->fix.visual) {
96 case FB_VISUAL_TRUECOLOR:
97 if (regno < 16) {
98 val = chan_to_field(red, &info->var.red);
99 val |= chan_to_field(green, &info->var.green);
100 val |= chan_to_field(blue, &info->var.blue);
101 par->pseudo_palette[regno] = val;
102 }
103 break;
104 case FB_VISUAL_PSEUDOCOLOR:
105 if (regno < 256) {
106 val = (red >> 8) << 16;
107 val |= (green >> 8) << 8;
108 val |= blue >> 8;
109 outreg(disp, GC_L0PAL0 + (regno * 4), val);
110 }
111 break;
112 default:
113 return 1; /* unsupported type */
114 }
115 return 0;
116}
117
118static int mb862xxfb_check_var(struct fb_var_screeninfo *var,
119 struct fb_info *fbi)
120{
121 unsigned long tmp;
122
123 if (fbi->dev)
124 dev_dbg(fbi->dev, "%s\n", __func__);
125
126 /* check if these values fit into the registers */
127 if (var->hsync_len > 255 || var->vsync_len > 255)
128 return -EINVAL;
129
130 if ((var->xres + var->right_margin) >= 4096)
131 return -EINVAL;
132
133 if ((var->yres + var->lower_margin) > 4096)
134 return -EINVAL;
135
136 if (h_total(var) > 4096 || v_total(var) > 4096)
137 return -EINVAL;
138
139 if (var->xres_virtual > 4096 || var->yres_virtual > 4096)
140 return -EINVAL;
141
142 if (var->bits_per_pixel <= 8)
143 var->bits_per_pixel = 8;
144 else if (var->bits_per_pixel <= 16)
145 var->bits_per_pixel = 16;
146 else if (var->bits_per_pixel <= 32)
147 var->bits_per_pixel = 32;
148
149 /*
150 * can cope with 8,16 or 24/32bpp if resulting
151 * pitch is divisible by 64 without remainder
152 */
153 if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT) {
154 int r;
155
156 var->bits_per_pixel = 0;
157 do {
158 var->bits_per_pixel += 8;
159 r = d_pitch(&fbi->var) % GC_L0M_L0W_UNIT;
160 } while (r && var->bits_per_pixel <= 32);
161
162 if (d_pitch(&fbi->var) % GC_L0M_L0W_UNIT)
163 return -EINVAL;
164 }
165
166 /* line length is going to be 128 bit aligned */
167 tmp = (var->xres * var->bits_per_pixel) / 8;
168 if ((tmp & 15) != 0)
169 return -EINVAL;
170
171 /* set r/g/b positions and validate bpp */
172 switch (var->bits_per_pixel) {
173 case 8:
174 var->red.length = var->bits_per_pixel;
175 var->green.length = var->bits_per_pixel;
176 var->blue.length = var->bits_per_pixel;
177 var->red.offset = 0;
178 var->green.offset = 0;
179 var->blue.offset = 0;
180 var->transp.length = 0;
181 break;
182 case 16:
183 var->red.length = 5;
184 var->green.length = 5;
185 var->blue.length = 5;
186 var->red.offset = 10;
187 var->green.offset = 5;
188 var->blue.offset = 0;
189 var->transp.length = 0;
190 break;
191 case 24:
192 case 32:
193 var->transp.length = 8;
194 var->red.length = 8;
195 var->green.length = 8;
196 var->blue.length = 8;
197 var->transp.offset = 24;
198 var->red.offset = 16;
199 var->green.offset = 8;
200 var->blue.offset = 0;
201 break;
202 default:
203 return -EINVAL;
204 }
205 return 0;
206}
207
208/*
209 * set display parameters
210 */
211static int mb862xxfb_set_par(struct fb_info *fbi)
212{
213 struct mb862xxfb_par *par = fbi->par;
214 unsigned long reg, sc;
215
216 dev_dbg(par->dev, "%s\n", __func__);
217
218 if (par->pre_init)
219 return 0;
220
221 /* disp off */
222 reg = inreg(disp, GC_DCM1);
223 reg &= ~GC_DCM01_DEN;
224 outreg(disp, GC_DCM1, reg);
225
226 /* set display reference clock div. */
227 sc = par->refclk / (1000000 / fbi->var.pixclock) - 1;
228 reg = inreg(disp, GC_DCM1);
229 reg &= ~(GC_DCM01_CKS | GC_DCM01_RESV | GC_DCM01_SC);
230 reg |= sc << 8;
231 outreg(disp, GC_DCM1, reg);
232 dev_dbg(par->dev, "SC 0x%lx\n", sc);
233
234 /* disp dimension, format */
235 reg = pack(d_pitch(&fbi->var) / GC_L0M_L0W_UNIT,
236 (fbi->var.yres - 1));
237 if (fbi->var.bits_per_pixel == 16)
238 reg |= GC_L0M_L0C_16;
239 outreg(disp, GC_L0M, reg);
240
241 if (fbi->var.bits_per_pixel == 32) {
242 reg = inreg(disp, GC_L0EM);
243 outreg(disp, GC_L0EM, reg | GC_L0EM_L0EC_24);
244 }
245 outreg(disp, GC_WY_WX, 0);
246 reg = pack(fbi->var.yres - 1, fbi->var.xres);
247 outreg(disp, GC_WH_WW, reg);
248 outreg(disp, GC_L0OA0, 0);
249 outreg(disp, GC_L0DA0, 0);
250 outreg(disp, GC_L0DY_L0DX, 0);
251 outreg(disp, GC_L0WY_L0WX, 0);
252 outreg(disp, GC_L0WH_L0WW, reg);
253
254 /* both HW-cursors off */
255 reg = inreg(disp, GC_CPM_CUTC);
256 reg &= ~(GC_CPM_CEN0 | GC_CPM_CEN1);
257 outreg(disp, GC_CPM_CUTC, reg);
258
259 /* timings */
260 reg = pack(fbi->var.xres - 1, fbi->var.xres - 1);
261 outreg(disp, GC_HDB_HDP, reg);
262 reg = pack((fbi->var.yres - 1), vsp(&fbi->var));
263 outreg(disp, GC_VDP_VSP, reg);
264 reg = ((fbi->var.vsync_len - 1) << 24) |
265 pack((fbi->var.hsync_len - 1), hsp(&fbi->var));
266 outreg(disp, GC_VSW_HSW_HSP, reg);
267 outreg(disp, GC_HTP, pack(h_total(&fbi->var) - 1, 0));
268 outreg(disp, GC_VTR, pack(v_total(&fbi->var) - 1, 0));
269
270 /* display on */
271 reg = inreg(disp, GC_DCM1);
272 reg |= GC_DCM01_DEN | GC_DCM01_L0E;
273 reg &= ~GC_DCM01_ESY;
274 outreg(disp, GC_DCM1, reg);
275 return 0;
276}
277
278static int mb862xxfb_pan(struct fb_var_screeninfo *var,
279 struct fb_info *info)
280{
281 struct mb862xxfb_par *par = info->par;
282 unsigned long reg;
283
284 reg = pack(var->yoffset, var->xoffset);
285 outreg(disp, GC_L0WY_L0WX, reg);
286
287 reg = pack(var->yres_virtual, var->xres_virtual);
288 outreg(disp, GC_L0WH_L0WW, reg);
289 return 0;
290}
291
292static int mb862xxfb_blank(int mode, struct fb_info *fbi)
293{
294 struct mb862xxfb_par *par = fbi->par;
295 unsigned long reg;
296
297 dev_dbg(fbi->dev, "blank mode=%d\n", mode);
298
299 switch (mode) {
300 case FB_BLANK_POWERDOWN:
301 reg = inreg(disp, GC_DCM1);
302 reg &= ~GC_DCM01_DEN;
303 outreg(disp, GC_DCM1, reg);
304 break;
305 case FB_BLANK_UNBLANK:
306 reg = inreg(disp, GC_DCM1);
307 reg |= GC_DCM01_DEN;
308 outreg(disp, GC_DCM1, reg);
309 break;
310 case FB_BLANK_NORMAL:
311 case FB_BLANK_VSYNC_SUSPEND:
312 case FB_BLANK_HSYNC_SUSPEND:
313 default:
314 return 1;
315 }
316 return 0;
317}
318
319/* framebuffer ops */
320static struct fb_ops mb862xxfb_ops = {
321 .owner = THIS_MODULE,
322 .fb_check_var = mb862xxfb_check_var,
323 .fb_set_par = mb862xxfb_set_par,
324 .fb_setcolreg = mb862xxfb_setcolreg,
325 .fb_blank = mb862xxfb_blank,
326 .fb_pan_display = mb862xxfb_pan,
327 .fb_fillrect = cfb_fillrect,
328 .fb_copyarea = cfb_copyarea,
329 .fb_imageblit = cfb_imageblit,
330};
331
332/* initialize fb_info data */
333static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
334{
335 struct mb862xxfb_par *par = fbi->par;
336 struct mb862xx_gc_mode *mode = par->gc_mode;
337 unsigned long reg;
338
339 fbi->fbops = &mb862xxfb_ops;
340 fbi->pseudo_palette = par->pseudo_palette;
341 fbi->screen_base = par->fb_base;
342 fbi->screen_size = par->mapped_vram;
343
344 strcpy(fbi->fix.id, DRV_NAME);
345 fbi->fix.smem_start = (unsigned long)par->fb_base_phys;
346 fbi->fix.smem_len = par->mapped_vram;
347 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys;
348 fbi->fix.mmio_len = par->mmio_len;
349 fbi->fix.accel = FB_ACCEL_NONE;
350 fbi->fix.type = FB_TYPE_PACKED_PIXELS;
351 fbi->fix.type_aux = 0;
352 fbi->fix.xpanstep = 1;
353 fbi->fix.ypanstep = 1;
354 fbi->fix.ywrapstep = 0;
355
356 reg = inreg(disp, GC_DCM1);
357 if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E) {
358 /* get the disp mode from active display cfg */
359 unsigned long sc = ((reg & GC_DCM01_SC) >> 8) + 1;
360 unsigned long hsp, vsp, ht, vt;
361
362 dev_dbg(par->dev, "using bootloader's disp. mode\n");
363 fbi->var.pixclock = (sc * 1000000) / par->refclk;
364 fbi->var.xres = (inreg(disp, GC_HDB_HDP) & 0x0fff) + 1;
365 reg = inreg(disp, GC_VDP_VSP);
366 fbi->var.yres = ((reg >> 16) & 0x0fff) + 1;
367 vsp = (reg & 0x0fff) + 1;
368 fbi->var.xres_virtual = fbi->var.xres;
369 fbi->var.yres_virtual = fbi->var.yres;
370 reg = inreg(disp, GC_L0EM);
371 if (reg & GC_L0EM_L0EC_24) {
372 fbi->var.bits_per_pixel = 32;
373 } else {
374 reg = inreg(disp, GC_L0M);
375 if (reg & GC_L0M_L0C_16)
376 fbi->var.bits_per_pixel = 16;
377 else
378 fbi->var.bits_per_pixel = 8;
379 }
380 reg = inreg(disp, GC_VSW_HSW_HSP);
381 fbi->var.hsync_len = ((reg & 0xff0000) >> 16) + 1;
382 fbi->var.vsync_len = ((reg & 0x3f000000) >> 24) + 1;
383 hsp = (reg & 0xffff) + 1;
384 ht = ((inreg(disp, GC_HTP) & 0xfff0000) >> 16) + 1;
385 fbi->var.right_margin = hsp - fbi->var.xres;
386 fbi->var.left_margin = ht - hsp - fbi->var.hsync_len;
387 vt = ((inreg(disp, GC_VTR) & 0xfff0000) >> 16) + 1;
388 fbi->var.lower_margin = vsp - fbi->var.yres;
389 fbi->var.upper_margin = vt - vsp - fbi->var.vsync_len;
390 } else if (mode) {
391 dev_dbg(par->dev, "using supplied mode\n");
392 fb_videomode_to_var(&fbi->var, (struct fb_videomode *)mode);
393 fbi->var.bits_per_pixel = mode->def_bpp ? mode->def_bpp : 8;
394 } else {
395 int ret;
396
397 ret = fb_find_mode(&fbi->var, fbi, "640x480-16@60",
398 NULL, 0, NULL, 16);
399 if (ret == 0 || ret == 4) {
400 dev_err(par->dev,
401 "failed to get initial mode\n");
402 return -EINVAL;
403 }
404 }
405
406 fbi->var.xoffset = 0;
407 fbi->var.yoffset = 0;
408 fbi->var.grayscale = 0;
409 fbi->var.nonstd = 0;
410 fbi->var.height = -1;
411 fbi->var.width = -1;
412 fbi->var.accel_flags = 0;
413 fbi->var.vmode = FB_VMODE_NONINTERLACED;
414 fbi->var.activate = FB_ACTIVATE_NOW;
415 fbi->flags = FBINFO_DEFAULT |
416#ifdef __BIG_ENDIAN
417 FBINFO_FOREIGN_ENDIAN |
418#endif
419 FBINFO_HWACCEL_XPAN |
420 FBINFO_HWACCEL_YPAN;
421
422 /* check and possibly fix bpp */
423 if ((fbi->fbops->fb_check_var)(&fbi->var, fbi))
424 dev_err(par->dev, "check_var() failed on initial setup?\n");
425
426 fbi->fix.visual = fbi->var.bits_per_pixel == 8 ?
427 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
428 fbi->fix.line_length = (fbi->var.xres_virtual *
429 fbi->var.bits_per_pixel) / 8;
430 return 0;
431}
432
433/*
434 * show some display controller and cursor registers
435 */
436static ssize_t mb862xxfb_show_dispregs(struct device *dev,
437 struct device_attribute *attr, char *buf)
438{
439 struct fb_info *fbi = dev_get_drvdata(dev);
440 struct mb862xxfb_par *par = fbi->par;
441 char *ptr = buf;
442 unsigned int reg;
443
444 for (reg = GC_DCM0; reg <= GC_L0DY_L0DX; reg += 4)
445 ptr += sprintf(ptr, "%08x = %08x\n",
446 reg, inreg(disp, reg));
447
448 for (reg = GC_CPM_CUTC; reg <= GC_CUY1_CUX1; reg += 4)
449 ptr += sprintf(ptr, "%08x = %08x\n",
450 reg, inreg(disp, reg));
451
452 for (reg = GC_DCM1; reg <= GC_L0WH_L0WW; reg += 4)
453 ptr += sprintf(ptr, "%08x = %08x\n",
454 reg, inreg(disp, reg));
455
456 return ptr - buf;
457}
458
459static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
460
461irqreturn_t mb862xx_intr(int irq, void *dev_id)
462{
463 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
464 unsigned long reg_ist, mask;
465
466 if (!par)
467 return IRQ_NONE;
468
469 if (par->type == BT_CARMINE) {
470 /* Get Interrupt Status */
471 reg_ist = inreg(ctrl, GC_CTRL_STATUS);
472 mask = inreg(ctrl, GC_CTRL_INT_MASK);
473 if (reg_ist == 0)
474 return IRQ_HANDLED;
475
476 reg_ist &= mask;
477 if (reg_ist == 0)
478 return IRQ_HANDLED;
479
480 /* Clear interrupt status */
481 outreg(ctrl, 0x0, reg_ist);
482 } else {
483 /* Get status */
484 reg_ist = inreg(host, GC_IST);
485 mask = inreg(host, GC_IMASK);
486
487 reg_ist &= mask;
488 if (reg_ist == 0)
489 return IRQ_HANDLED;
490
491 /* Clear status */
492 outreg(host, GC_IST, ~reg_ist);
493 }
494 return IRQ_HANDLED;
495}
496
497#if defined(CONFIG_FB_MB862XX_LIME)
498/*
499 * GDC (Lime, Coral(B/Q), Mint, ...) on host bus
500 */
501static int mb862xx_gdc_init(struct mb862xxfb_par *par)
502{
503 unsigned long ccf, mmr;
504 unsigned long ver, rev;
505
506 if (!par)
507 return -ENODEV;
508
509#if defined(CONFIG_FB_PRE_INIT_FB)
510 par->pre_init = 1;
511#endif
512 par->host = par->mmio_base;
513 par->i2c = par->mmio_base + MB862XX_I2C_BASE;
514 par->disp = par->mmio_base + MB862XX_DISP_BASE;
515 par->cap = par->mmio_base + MB862XX_CAP_BASE;
516 par->draw = par->mmio_base + MB862XX_DRAW_BASE;
517 par->geo = par->mmio_base + MB862XX_GEO_BASE;
518 par->pio = par->mmio_base + MB862XX_PIO_BASE;
519
520 par->refclk = GC_DISP_REFCLK_400;
521
522 ver = inreg(host, GC_CID);
523 rev = inreg(pio, GC_REVISION);
524 if ((ver == 0x303) && (rev & 0xffffff00) == 0x20050100) {
525 dev_info(par->dev, "Fujitsu Lime v1.%d found\n",
526 (int)rev & 0xff);
527 par->type = BT_LIME;
528 ccf = par->gc_mode ? par->gc_mode->ccf : GC_CCF_COT_100;
529 mmr = par->gc_mode ? par->gc_mode->mmr : 0x414fb7f2;
530 } else {
531 dev_info(par->dev, "? GDC, CID/Rev.: 0x%lx/0x%lx \n", ver, rev);
532 return -ENODEV;
533 }
534
535 if (!par->pre_init) {
536 outreg(host, GC_CCF, ccf);
537 udelay(200);
538 outreg(host, GC_MMR, mmr);
539 udelay(10);
540 }
541
542 /* interrupt status */
543 outreg(host, GC_IST, 0);
544 outreg(host, GC_IMASK, GC_INT_EN);
545 return 0;
546}
547
548static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev,
549 const struct of_device_id *id)
550{
551 struct device_node *np = ofdev->node;
552 struct device *dev = &ofdev->dev;
553 struct mb862xxfb_par *par;
554 struct fb_info *info;
555 struct resource res;
556 resource_size_t res_size;
557 unsigned long ret = -ENODEV;
558
559 if (of_address_to_resource(np, 0, &res)) {
560 dev_err(dev, "Invalid address\n");
561 return -ENXIO;
562 }
563
564 info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
565 if (info == NULL) {
566 dev_err(dev, "cannot allocate framebuffer\n");
567 return -ENOMEM;
568 }
569
570 par = info->par;
571 par->info = info;
572 par->dev = dev;
573
574 par->irq = irq_of_parse_and_map(np, 0);
575 if (par->irq == NO_IRQ) {
576 dev_err(dev, "failed to map irq\n");
577 ret = -ENODEV;
578 goto fbrel;
579 }
580
581 res_size = 1 + res.end - res.start;
582 par->res = request_mem_region(res.start, res_size, DRV_NAME);
583 if (par->res == NULL) {
584 dev_err(dev, "Cannot claim framebuffer/mmio\n");
585 ret = -ENXIO;
586 goto irqdisp;
587 }
588
589#if defined(CONFIG_LWMON5)
590 par->gc_mode = &lwmon5_gc_mode;
591#endif
592
593#if defined(CONFIG_SOCRATES)
594 par->gc_mode = &socrates_gc_mode;
595#endif
596
597 par->fb_base_phys = res.start;
598 par->mmio_base_phys = res.start + MB862XX_MMIO_BASE;
599 par->mmio_len = MB862XX_MMIO_SIZE;
600 if (par->gc_mode)
601 par->mapped_vram = par->gc_mode->max_vram;
602 else
603 par->mapped_vram = MB862XX_MEM_SIZE;
604
605 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram);
606 if (par->fb_base == NULL) {
607 dev_err(dev, "Cannot map framebuffer\n");
608 goto rel_reg;
609 }
610
611 par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len);
612 if (par->mmio_base == NULL) {
613 dev_err(dev, "Cannot map registers\n");
614 goto fb_unmap;
615 }
616
617 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
618 (u64)par->fb_base_phys, (ulong)par->mapped_vram);
619 dev_dbg(dev, "mmio phys 0x%llx 0x%lx, (irq = %d)\n",
620 (u64)par->mmio_base_phys, (ulong)par->mmio_len, par->irq);
621
622 if (mb862xx_gdc_init(par))
623 goto io_unmap;
624
625 if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED,
626 DRV_NAME, (void *)par)) {
627 dev_err(dev, "Cannot request irq\n");
628 goto io_unmap;
629 }
630
631 mb862xxfb_init_fbinfo(info);
632
633 if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) {
634 dev_err(dev, "Could not allocate cmap for fb_info.\n");
635 goto free_irq;
636 }
637
638 if ((info->fbops->fb_set_par)(info))
639 dev_err(dev, "set_var() failed on initial setup?\n");
640
641 if (register_framebuffer(info)) {
642 dev_err(dev, "failed to register framebuffer\n");
643 goto rel_cmap;
644 }
645
646 dev_set_drvdata(dev, info);
647
648 if (device_create_file(dev, &dev_attr_dispregs))
649 dev_err(dev, "Can't create sysfs regdump file\n");
650 return 0;
651
652rel_cmap:
653 fb_dealloc_cmap(&info->cmap);
654free_irq:
655 outreg(host, GC_IMASK, 0);
656 free_irq(par->irq, (void *)par);
657io_unmap:
658 iounmap(par->mmio_base);
659fb_unmap:
660 iounmap(par->fb_base);
661rel_reg:
662 release_mem_region(res.start, res_size);
663irqdisp:
664 irq_dispose_mapping(par->irq);
665fbrel:
666 dev_set_drvdata(dev, NULL);
667 framebuffer_release(info);
668 return ret;
669}
670
671static int __devexit of_platform_mb862xx_remove(struct of_device *ofdev)
672{
673 struct fb_info *fbi = dev_get_drvdata(&ofdev->dev);
674 struct mb862xxfb_par *par = fbi->par;
675 resource_size_t res_size = 1 + par->res->end - par->res->start;
676 unsigned long reg;
677
678 dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
679
680 /* display off */
681 reg = inreg(disp, GC_DCM1);
682 reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E);
683 outreg(disp, GC_DCM1, reg);
684
685 /* disable interrupts */
686 outreg(host, GC_IMASK, 0);
687
688 free_irq(par->irq, (void *)par);
689 irq_dispose_mapping(par->irq);
690
691 device_remove_file(&ofdev->dev, &dev_attr_dispregs);
692
693 unregister_framebuffer(fbi);
694 fb_dealloc_cmap(&fbi->cmap);
695
696 iounmap(par->mmio_base);
697 iounmap(par->fb_base);
698
699 dev_set_drvdata(&ofdev->dev, NULL);
700 release_mem_region(par->res->start, res_size);
701 framebuffer_release(fbi);
702 return 0;
703}
704
705/*
706 * common types
707 */
708static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
709 { .compatible = "fujitsu,MB86276", },
710 { .compatible = "fujitsu,lime", },
711 { .compatible = "fujitsu,MB86277", },
712 { .compatible = "fujitsu,mint", },
713 { .compatible = "fujitsu,MB86293", },
714 { .compatible = "fujitsu,MB86294", },
715 { .compatible = "fujitsu,coral", },
716 { /* end */ }
717};
718
719static struct of_platform_driver of_platform_mb862xxfb_driver = {
720 .owner = THIS_MODULE,
721 .name = DRV_NAME,
722 .match_table = of_platform_mb862xx_tbl,
723 .probe = of_platform_mb862xx_probe,
724 .remove = __devexit_p(of_platform_mb862xx_remove),
725};
726#endif
727
728#if defined(CONFIG_FB_MB862XX_PCI_GDC)
729static int coralp_init(struct mb862xxfb_par *par)
730{
731 int cn, ver;
732
733 par->host = par->mmio_base;
734 par->i2c = par->mmio_base + MB862XX_I2C_BASE;
735 par->disp = par->mmio_base + MB862XX_DISP_BASE;
736 par->cap = par->mmio_base + MB862XX_CAP_BASE;
737 par->draw = par->mmio_base + MB862XX_DRAW_BASE;
738 par->geo = par->mmio_base + MB862XX_GEO_BASE;
739 par->pio = par->mmio_base + MB862XX_PIO_BASE;
740
741 par->refclk = GC_DISP_REFCLK_400;
742
743 ver = inreg(host, GC_CID);
744 cn = (ver & GC_CID_CNAME_MSK) >> 8;
745 ver = ver & GC_CID_VERSION_MSK;
746 if (cn == 3) {
747 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\
748 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?",
749 par->pdev->revision);
750 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133);
751 udelay(200);
752 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL);
753 udelay(10);
754 /* Clear interrupt status */
755 outreg(host, GC_IST, 0);
756 } else {
757 return -ENODEV;
758 }
759 return 0;
760}
761
762static int init_dram_ctrl(struct mb862xxfb_par *par)
763{
764 unsigned long i = 0;
765
766 /*
767 * Set io mode first! Spec. says IC may be destroyed
768 * if not set to SSTL2/LVCMOS before init.
769 */
770 outreg(dram_ctrl, GC_DCTL_IOCONT1_IOCONT0, GC_EVB_DCTL_IOCONT1_IOCONT0);
771
772 /* DRAM init */
773 outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD);
774 outreg(dram_ctrl, GC_DCTL_SETTIME1_EMODE, GC_EVB_DCTL_SETTIME1_EMODE);
775 outreg(dram_ctrl, GC_DCTL_REFRESH_SETTIME2,
776 GC_EVB_DCTL_REFRESH_SETTIME2);
777 outreg(dram_ctrl, GC_DCTL_RSV2_RSV1, GC_EVB_DCTL_RSV2_RSV1);
778 outreg(dram_ctrl, GC_DCTL_DDRIF2_DDRIF1, GC_EVB_DCTL_DDRIF2_DDRIF1);
779 outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES);
780
781 /* DLL reset done? */
782 while ((inreg(dram_ctrl, GC_DCTL_RSV0_STATES) & GC_DCTL_STATES_MSK)) {
783 udelay(GC_DCTL_INIT_WAIT_INTERVAL);
784 if (i++ > GC_DCTL_INIT_WAIT_CNT) {
785 dev_err(par->dev, "VRAM init failed.\n");
786 return -EINVAL;
787 }
788 }
789 outreg(dram_ctrl, GC_DCTL_MODE_ADD, GC_EVB_DCTL_MODE_ADD_AFT_RST);
790 outreg(dram_ctrl, GC_DCTL_RSV0_STATES, GC_EVB_DCTL_RSV0_STATES_AFT_RST);
791 return 0;
792}
793
794static int carmine_init(struct mb862xxfb_par *par)
795{
796 unsigned long reg;
797
798 par->ctrl = par->mmio_base + MB86297_CTRL_BASE;
799 par->i2c = par->mmio_base + MB86297_I2C_BASE;
800 par->disp = par->mmio_base + MB86297_DISP0_BASE;
801 par->disp1 = par->mmio_base + MB86297_DISP1_BASE;
802 par->cap = par->mmio_base + MB86297_CAP0_BASE;
803 par->cap1 = par->mmio_base + MB86297_CAP1_BASE;
804 par->draw = par->mmio_base + MB86297_DRAW_BASE;
805 par->dram_ctrl = par->mmio_base + MB86297_DRAMCTRL_BASE;
806 par->wrback = par->mmio_base + MB86297_WRBACK_BASE;
807
808 par->refclk = GC_DISP_REFCLK_533;
809
810 /* warm up */
811 reg = GC_CTRL_CLK_EN_DRAM | GC_CTRL_CLK_EN_2D3D | GC_CTRL_CLK_EN_DISP0;
812 outreg(ctrl, GC_CTRL_CLK_ENABLE, reg);
813
814 /* check for engine module revision */
815 if (inreg(draw, GC_2D3D_REV) == GC_RE_REVISION)
816 dev_info(par->dev, "Fujitsu Carmine GDC Rev.%d found\n",
817 par->pdev->revision);
818 else
819 goto err_init;
820
821 reg &= ~GC_CTRL_CLK_EN_2D3D;
822 outreg(ctrl, GC_CTRL_CLK_ENABLE, reg);
823
824 /* set up vram */
825 if (init_dram_ctrl(par) < 0)
826 goto err_init;
827
828 outreg(ctrl, GC_CTRL_INT_MASK, 0);
829 return 0;
830
831err_init:
832 outreg(ctrl, GC_CTRL_CLK_ENABLE, 0);
833 return -EINVAL;
834}
835
836static inline int mb862xx_pci_gdc_init(struct mb862xxfb_par *par)
837{
838 switch (par->type) {
839 case BT_CORALP:
840 return coralp_init(par);
841 case BT_CARMINE:
842 return carmine_init(par);
843 default:
844 return -ENODEV;
845 }
846}
847
848#define CHIP_ID(id) \
849 { PCI_DEVICE(PCI_VENDOR_ID_FUJITSU_LIMITED, id) }
850
851static struct pci_device_id mb862xx_pci_tbl[] __devinitdata = {
852 /* MB86295/MB86296 */
853 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALP),
854 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CORALPA),
855 /* MB86297 */
856 CHIP_ID(PCI_DEVICE_ID_FUJITSU_CARMINE),
857 { 0, }
858};
859
860MODULE_DEVICE_TABLE(pci, mb862xx_pci_tbl);
861
862static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
863 const struct pci_device_id *ent)
864{
865 struct mb862xxfb_par *par;
866 struct fb_info *info;
867 struct device *dev = &pdev->dev;
868 int ret;
869
870 ret = pci_enable_device(pdev);
871 if (ret < 0) {
872 dev_err(dev, "Cannot enable PCI device\n");
873 goto out;
874 }
875
876 info = framebuffer_alloc(sizeof(struct mb862xxfb_par), dev);
877 if (!info) {
878 dev_err(dev, "framebuffer alloc failed\n");
879 ret = -ENOMEM;
880 goto dis_dev;
881 }
882
883 par = info->par;
884 par->info = info;
885 par->dev = dev;
886 par->pdev = pdev;
887 par->irq = pdev->irq;
888
889 ret = pci_request_regions(pdev, DRV_NAME);
890 if (ret < 0) {
891 dev_err(dev, "Cannot reserve region(s) for PCI device\n");
892 goto rel_fb;
893 }
894
895 switch (pdev->device) {
896 case PCI_DEVICE_ID_FUJITSU_CORALP:
897 case PCI_DEVICE_ID_FUJITSU_CORALPA:
898 par->fb_base_phys = pci_resource_start(par->pdev, 0);
899 par->mapped_vram = CORALP_MEM_SIZE;
900 par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE;
901 par->mmio_len = MB862XX_MMIO_SIZE;
902 par->type = BT_CORALP;
903 break;
904 case PCI_DEVICE_ID_FUJITSU_CARMINE:
905 par->fb_base_phys = pci_resource_start(par->pdev, 2);
906 par->mmio_base_phys = pci_resource_start(par->pdev, 3);
907 par->mmio_len = pci_resource_len(par->pdev, 3);
908 par->mapped_vram = CARMINE_MEM_SIZE;
909 par->type = BT_CARMINE;
910 break;
911 default:
912 /* should never occur */
913 goto rel_reg;
914 }
915
916 par->fb_base = ioremap(par->fb_base_phys, par->mapped_vram);
917 if (par->fb_base == NULL) {
918 dev_err(dev, "Cannot map framebuffer\n");
919 goto rel_reg;
920 }
921
922 par->mmio_base = ioremap(par->mmio_base_phys, par->mmio_len);
923 if (par->mmio_base == NULL) {
924 dev_err(dev, "Cannot map registers\n");
925 ret = -EIO;
926 goto fb_unmap;
927 }
928
929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
930 (u64)par->fb_base_phys, (ulong)par->mapped_vram);
931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
932 (u64)par->mmio_base_phys, (ulong)par->mmio_len);
933
934 if (mb862xx_pci_gdc_init(par))
935 goto io_unmap;
936
937 if (request_irq(par->irq, mb862xx_intr, IRQF_DISABLED | IRQF_SHARED,
938 DRV_NAME, (void *)par)) {
939 dev_err(dev, "Cannot request irq\n");
940 goto io_unmap;
941 }
942
943 mb862xxfb_init_fbinfo(info);
944
945 if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0) < 0) {
946 dev_err(dev, "Could not allocate cmap for fb_info.\n");
947 ret = -ENOMEM;
948 goto free_irq;
949 }
950
951 if ((info->fbops->fb_set_par)(info))
952 dev_err(dev, "set_var() failed on initial setup?\n");
953
954 ret = register_framebuffer(info);
955 if (ret < 0) {
956 dev_err(dev, "failed to register framebuffer\n");
957 goto rel_cmap;
958 }
959
960 pci_set_drvdata(pdev, info);
961
962 if (device_create_file(dev, &dev_attr_dispregs))
963 dev_err(dev, "Can't create sysfs regdump file\n");
964
965 if (par->type == BT_CARMINE)
966 outreg(ctrl, GC_CTRL_INT_MASK, GC_CARMINE_INT_EN);
967 else
968 outreg(host, GC_IMASK, GC_INT_EN);
969
970 return 0;
971
972rel_cmap:
973 fb_dealloc_cmap(&info->cmap);
974free_irq:
975 free_irq(par->irq, (void *)par);
976io_unmap:
977 iounmap(par->mmio_base);
978fb_unmap:
979 iounmap(par->fb_base);
980rel_reg:
981 pci_release_regions(pdev);
982rel_fb:
983 framebuffer_release(info);
984dis_dev:
985 pci_disable_device(pdev);
986out:
987 return ret;
988}
989
990static void __devexit mb862xx_pci_remove(struct pci_dev *pdev)
991{
992 struct fb_info *fbi = pci_get_drvdata(pdev);
993 struct mb862xxfb_par *par = fbi->par;
994 unsigned long reg;
995
996 dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
997
998 /* display off */
999 reg = inreg(disp, GC_DCM1);
1000 reg &= ~(GC_DCM01_DEN | GC_DCM01_L0E);
1001 outreg(disp, GC_DCM1, reg);
1002
1003 if (par->type == BT_CARMINE) {
1004 outreg(ctrl, GC_CTRL_INT_MASK, 0);
1005 outreg(ctrl, GC_CTRL_CLK_ENABLE, 0);
1006 } else {
1007 outreg(host, GC_IMASK, 0);
1008 }
1009
1010 device_remove_file(&pdev->dev, &dev_attr_dispregs);
1011
1012 pci_set_drvdata(pdev, NULL);
1013 unregister_framebuffer(fbi);
1014 fb_dealloc_cmap(&fbi->cmap);
1015
1016 free_irq(par->irq, (void *)par);
1017 iounmap(par->mmio_base);
1018 iounmap(par->fb_base);
1019
1020 pci_release_regions(pdev);
1021 framebuffer_release(fbi);
1022 pci_disable_device(pdev);
1023}
1024
1025static struct pci_driver mb862xxfb_pci_driver = {
1026 .name = DRV_NAME,
1027 .id_table = mb862xx_pci_tbl,
1028 .probe = mb862xx_pci_probe,
1029 .remove = __devexit_p(mb862xx_pci_remove),
1030};
1031#endif
1032
1033static int __devinit mb862xxfb_init(void)
1034{
1035 int ret = -ENODEV;
1036
1037#if defined(CONFIG_FB_MB862XX_LIME)
1038 ret = of_register_platform_driver(&of_platform_mb862xxfb_driver);
1039#endif
1040#if defined(CONFIG_FB_MB862XX_PCI_GDC)
1041 ret = pci_register_driver(&mb862xxfb_pci_driver);
1042#endif
1043 return ret;
1044}
1045
1046static void __exit mb862xxfb_exit(void)
1047{
1048#if defined(CONFIG_FB_MB862XX_LIME)
1049 of_unregister_platform_driver(&of_platform_mb862xxfb_driver);
1050#endif
1051#if defined(CONFIG_FB_MB862XX_PCI_GDC)
1052 pci_unregister_driver(&mb862xxfb_pci_driver);
1053#endif
1054}
1055
1056module_init(mb862xxfb_init);
1057module_exit(mb862xxfb_exit);
1058
1059MODULE_DESCRIPTION("Fujitsu MB862xx Framebuffer driver");
1060MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
1061MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
new file mode 100644
index 000000000000..c4c8f4dd2217
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -0,0 +1,83 @@
1#ifndef __MB862XX_H__
2#define __MB862XX_H__
3
4#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf
5#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019
6#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e
7#define PCI_DEVICE_ID_FUJITSU_CARMINE 0x202b
8
9#define GC_MMR_CORALP_EVB_VAL 0x11d7fa13
10
11enum gdctype {
12 BT_NONE,
13 BT_LIME,
14 BT_MINT,
15 BT_CORAL,
16 BT_CORALP,
17 BT_CARMINE,
18};
19
20struct mb862xx_gc_mode {
21 struct fb_videomode def_mode; /* mode of connected display */
22 unsigned int def_bpp; /* default depth */
23 unsigned long max_vram; /* connected SDRAM size */
24 unsigned long ccf; /* gdc clk */
25 unsigned long mmr; /* memory mode for SDRAM */
26};
27
28/* private data */
29struct mb862xxfb_par {
30 struct fb_info *info; /* fb info head */
31 struct device *dev;
32 struct pci_dev *pdev;
33 struct resource *res; /* framebuffer/mmio resource */
34
35 resource_size_t fb_base_phys; /* fb base, 36-bit PPC440EPx */
36 resource_size_t mmio_base_phys; /* io base addr */
37 void __iomem *fb_base; /* remapped framebuffer */
38 void __iomem *mmio_base; /* remapped registers */
39 size_t mapped_vram; /* length of remapped vram */
40 size_t mmio_len; /* length of register region */
41
42 void __iomem *host; /* relocatable reg. bases */
43 void __iomem *i2c;
44 void __iomem *disp;
45 void __iomem *disp1;
46 void __iomem *cap;
47 void __iomem *cap1;
48 void __iomem *draw;
49 void __iomem *geo;
50 void __iomem *pio;
51 void __iomem *ctrl;
52 void __iomem *dram_ctrl;
53 void __iomem *wrback;
54
55 unsigned int irq;
56 unsigned int type; /* GDC type */
57 unsigned int refclk; /* disp. reference clock */
58 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */
59 int pre_init; /* don't init display if 1 */
60
61 u32 pseudo_palette[16];
62};
63
64#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
65#error "Select Lime GDC or CoralP/Carmine support, but not both together"
66#endif
67#if defined(CONFIG_FB_MB862XX_LIME)
68#define gdc_read __raw_readl
69#define gdc_write __raw_writel
70#else
71#define gdc_read readl
72#define gdc_write writel
73#endif
74
75#define inreg(type, off) \
76 gdc_read((par->type + (off)))
77
78#define outreg(type, off, val) \
79 gdc_write((val), (par->type + (off)))
80
81#define pack(a, b) (((a) << 16) | (b))
82
83#endif
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1a22fe782a27..4fd3fa5546b1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -67,11 +67,11 @@ config AT91RM9200_WATCHDOG
67 system when the timeout is reached. 67 system when the timeout is reached.
68 68
69config AT91SAM9X_WATCHDOG 69config AT91SAM9X_WATCHDOG
70 tristate "AT91SAM9X watchdog" 70 tristate "AT91SAM9X / AT91CAP9 watchdog"
71 depends on WATCHDOG && (ARCH_AT91SAM9260 || ARCH_AT91SAM9261) 71 depends on ARCH_AT91 && !ARCH_AT91RM9200
72 help 72 help
73 Watchdog timer embedded into AT91SAM9X chips. This will reboot your 73 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
74 system when the timeout is reached. 74 reboot your system when the timeout is reached.
75 75
76config 21285_WATCHDOG 76config 21285_WATCHDOG
77 tristate "DC21285 watchdog" 77 tristate "DC21285 watchdog"
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index b4babfc31586..b1da287f90ec 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -30,7 +30,7 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32 32
33#include <asm/arch/at91_wdt.h> 33#include <mach/at91_wdt.h>
34 34
35#define DRV_NAME "AT91SAM9 Watchdog" 35#define DRV_NAME "AT91SAM9 Watchdog"
36 36
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8c83abc73400..a0fb5eac407c 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -41,7 +41,6 @@
41#include <linux/pagemap.h> 41#include <linux/pagemap.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/highmem.h>
45#include <linux/list.h> 44#include <linux/list.h>
46#include <linux/sysdev.h> 45#include <linux/sysdev.h>
47 46
diff --git a/fs/Makefile b/fs/Makefile
index 2168c902d5ca..d9f8afe6f0c4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -81,8 +81,6 @@ obj-$(CONFIG_HUGETLBFS) += hugetlbfs/
81obj-$(CONFIG_CODA_FS) += coda/ 81obj-$(CONFIG_CODA_FS) += coda/
82obj-$(CONFIG_MINIX_FS) += minix/ 82obj-$(CONFIG_MINIX_FS) += minix/
83obj-$(CONFIG_FAT_FS) += fat/ 83obj-$(CONFIG_FAT_FS) += fat/
84obj-$(CONFIG_MSDOS_FS) += msdos/
85obj-$(CONFIG_VFAT_FS) += vfat/
86obj-$(CONFIG_BFS_FS) += bfs/ 84obj-$(CONFIG_BFS_FS) += bfs/
87obj-$(CONFIG_ISO9660_FS) += isofs/ 85obj-$(CONFIG_ISO9660_FS) += isofs/
88obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+ 86obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 625abf5422e2..33bf8cbfd051 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -128,9 +128,10 @@ static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
128 */ 128 */
129static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) 129static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
130{ 130{
131 int err = -EINVAL; 131 int err;
132 132
133 if (check_dev_ioctl_version(cmd, param)) { 133 err = check_dev_ioctl_version(cmd, param);
134 if (err) {
134 AUTOFS_WARN("invalid device control module version " 135 AUTOFS_WARN("invalid device control module version "
135 "supplied for cmd(0x%08x)", cmd); 136 "supplied for cmd(0x%08x)", cmd);
136 goto out; 137 goto out;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index cde2f8e8935a..4b6fb3f628c0 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -56,12 +56,23 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
56 mntget(mnt); 56 mntget(mnt);
57 dget(dentry); 57 dget(dentry);
58 58
59 if (!autofs4_follow_mount(&mnt, &dentry)) 59 if (!follow_down(&mnt, &dentry))
60 goto done; 60 goto done;
61 61
62 /* This is an autofs submount, we can't expire it */ 62 if (is_autofs4_dentry(dentry)) {
63 if (is_autofs4_dentry(dentry)) 63 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
64 goto done; 64
65 /* This is an autofs submount, we can't expire it */
66 if (sbi->type == AUTOFS_TYPE_INDIRECT)
67 goto done;
68
69 /*
70 * Otherwise it's an offset mount and we need to check
71 * if we can umount its mount, if there is one.
72 */
73 if (!d_mountpoint(dentry))
74 goto done;
75 }
65 76
66 /* Update the expiry counter if fs is busy */ 77 /* Update the expiry counter if fs is busy */
67 if (!may_umount_tree(mnt)) { 78 if (!may_umount_tree(mnt)) {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 88a776fa0ef6..db831efbdbbd 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -986,7 +986,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
986static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) 986static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
987{ 987{
988 struct gendisk *disk; 988 struct gendisk *disk;
989 struct hd_struct *part = NULL;
990 int ret; 989 int ret;
991 int partno; 990 int partno;
992 int perm = 0; 991 int perm = 0;
@@ -1004,24 +1003,25 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1004 return ret; 1003 return ret;
1005 } 1004 }
1006 1005
1007 ret = -ENXIO;
1008
1009 lock_kernel(); 1006 lock_kernel();
1010 1007
1008 ret = -ENXIO;
1011 disk = get_gendisk(bdev->bd_dev, &partno); 1009 disk = get_gendisk(bdev->bd_dev, &partno);
1012 if (!disk) 1010 if (!disk)
1013 goto out_unlock_kernel; 1011 goto out_unlock_kernel;
1014 part = disk_get_part(disk, partno);
1015 if (!part)
1016 goto out_unlock_kernel;
1017 1012
1018 mutex_lock_nested(&bdev->bd_mutex, for_part); 1013 mutex_lock_nested(&bdev->bd_mutex, for_part);
1019 if (!bdev->bd_openers) { 1014 if (!bdev->bd_openers) {
1020 bdev->bd_disk = disk; 1015 bdev->bd_disk = disk;
1021 bdev->bd_part = part;
1022 bdev->bd_contains = bdev; 1016 bdev->bd_contains = bdev;
1023 if (!partno) { 1017 if (!partno) {
1024 struct backing_dev_info *bdi; 1018 struct backing_dev_info *bdi;
1019
1020 ret = -ENXIO;
1021 bdev->bd_part = disk_get_part(disk, partno);
1022 if (!bdev->bd_part)
1023 goto out_clear;
1024
1025 if (disk->fops->open) { 1025 if (disk->fops->open) {
1026 ret = disk->fops->open(bdev, mode); 1026 ret = disk->fops->open(bdev, mode);
1027 if (ret) 1027 if (ret)
@@ -1049,18 +1049,17 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1049 bdev->bd_contains = whole; 1049 bdev->bd_contains = whole;
1050 bdev->bd_inode->i_data.backing_dev_info = 1050 bdev->bd_inode->i_data.backing_dev_info =
1051 whole->bd_inode->i_data.backing_dev_info; 1051 whole->bd_inode->i_data.backing_dev_info;
1052 bdev->bd_part = disk_get_part(disk, partno);
1052 if (!(disk->flags & GENHD_FL_UP) || 1053 if (!(disk->flags & GENHD_FL_UP) ||
1053 !part || !part->nr_sects) { 1054 !bdev->bd_part || !bdev->bd_part->nr_sects) {
1054 ret = -ENXIO; 1055 ret = -ENXIO;
1055 goto out_clear; 1056 goto out_clear;
1056 } 1057 }
1057 bd_set_size(bdev, (loff_t)part->nr_sects << 9); 1058 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1058 } 1059 }
1059 } else { 1060 } else {
1060 disk_put_part(part);
1061 put_disk(disk); 1061 put_disk(disk);
1062 module_put(disk->fops->owner); 1062 module_put(disk->fops->owner);
1063 part = NULL;
1064 disk = NULL; 1063 disk = NULL;
1065 if (bdev->bd_contains == bdev) { 1064 if (bdev->bd_contains == bdev) {
1066 if (bdev->bd_disk->fops->open) { 1065 if (bdev->bd_disk->fops->open) {
@@ -1080,6 +1079,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1080 return 0; 1079 return 0;
1081 1080
1082 out_clear: 1081 out_clear:
1082 disk_put_part(bdev->bd_part);
1083 bdev->bd_disk = NULL; 1083 bdev->bd_disk = NULL;
1084 bdev->bd_part = NULL; 1084 bdev->bd_part = NULL;
1085 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1085 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
@@ -1091,7 +1091,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1091 out_unlock_kernel: 1091 out_unlock_kernel:
1092 unlock_kernel(); 1092 unlock_kernel();
1093 1093
1094 disk_put_part(part);
1095 if (disk) 1094 if (disk)
1096 module_put(disk->fops->owner); 1095 module_put(disk->fops->owner);
1097 put_disk(disk); 1096 put_disk(disk);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 8f528ea24c48..8855331b2fba 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -4,7 +4,11 @@ Various fixes to make delete of open files behavior more predictable
4(when delete of an open file fails we mark the file as "delete-on-close" 4(when delete of an open file fails we mark the file as "delete-on-close"
5in a way that more servers accept, but only if we can first rename the 5in a way that more servers accept, but only if we can first rename the
6file to a temporary name). Add experimental support for more safely 6file to a temporary name). Add experimental support for more safely
7handling fcntl(F_SETLEASE). 7handling fcntl(F_SETLEASE). Convert cifs to using blocking tcp
8sends, and also let tcp autotune the socket send and receive buffers.
9This reduces the number of EAGAIN errors returned by TCP/IP in
10high stress workloads (and the number of retries on socket writes
11when sending large SMBWriteX requests).
8 12
9Version 1.54 13Version 1.54
10------------ 14------------
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c791e5b5a914..1cb1189f24e0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -141,6 +141,8 @@ struct TCP_Server_Info {
141 char versionMajor; 141 char versionMajor;
142 char versionMinor; 142 char versionMinor;
143 bool svlocal:1; /* local server or remote */ 143 bool svlocal:1; /* local server or remote */
144 bool noblocksnd; /* use blocking sendmsg */
145 bool noautotune; /* do not autotune send buf sizes */
144 atomic_t socketUseCount; /* number of open cifs sessions on socket */ 146 atomic_t socketUseCount; /* number of open cifs sessions on socket */
145 atomic_t inFlight; /* number of requests on the wire to server */ 147 atomic_t inFlight; /* number of requests on the wire to server */
146#ifdef CONFIG_CIFS_STATS2 148#ifdef CONFIG_CIFS_STATS2
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 0cff7fe986e8..6f21ecb85ce5 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -36,7 +36,7 @@ extern void cifs_buf_release(void *);
36extern struct smb_hdr *cifs_small_buf_get(void); 36extern struct smb_hdr *cifs_small_buf_get(void);
37extern void cifs_small_buf_release(void *); 37extern void cifs_small_buf_release(void *);
38extern int smb_send(struct socket *, struct smb_hdr *, 38extern int smb_send(struct socket *, struct smb_hdr *,
39 unsigned int /* length */ , struct sockaddr *); 39 unsigned int /* length */ , struct sockaddr *, bool);
40extern unsigned int _GetXid(void); 40extern unsigned int _GetXid(void);
41extern void _FreeXid(unsigned int); 41extern void _FreeXid(unsigned int);
42#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current->fsuid)); 42#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current->fsuid));
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 843a85fb8b9a..d5eac48fc415 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1536,7 +1536,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1536 __u32 bytes_sent; 1536 __u32 bytes_sent;
1537 __u16 byte_count; 1537 __u16 byte_count;
1538 1538
1539 /* cFYI(1,("write at %lld %d bytes",offset,count));*/ 1539 /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
1540 if (tcon->ses == NULL) 1540 if (tcon->ses == NULL)
1541 return -ECONNABORTED; 1541 return -ECONNABORTED;
1542 1542
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 71b7661e2260..e9f9248cb3fe 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -92,6 +92,8 @@ struct smb_vol {
92 bool seal:1; /* request transport encryption on share */ 92 bool seal:1; /* request transport encryption on share */
93 bool nodfs:1; /* Do not request DFS, even if available */ 93 bool nodfs:1; /* Do not request DFS, even if available */
94 bool local_lease:1; /* check leases only on local system, not remote */ 94 bool local_lease:1; /* check leases only on local system, not remote */
95 bool noblocksnd:1;
96 bool noautotune:1;
95 unsigned int rsize; 97 unsigned int rsize;
96 unsigned int wsize; 98 unsigned int wsize;
97 unsigned int sockopt; 99 unsigned int sockopt;
@@ -102,9 +104,11 @@ struct smb_vol {
102static int ipv4_connect(struct sockaddr_in *psin_server, 104static int ipv4_connect(struct sockaddr_in *psin_server,
103 struct socket **csocket, 105 struct socket **csocket,
104 char *netb_name, 106 char *netb_name,
105 char *server_netb_name); 107 char *server_netb_name,
108 bool noblocksnd,
109 bool nosndbuf); /* ipv6 never set sndbuf size */
106static int ipv6_connect(struct sockaddr_in6 *psin_server, 110static int ipv6_connect(struct sockaddr_in6 *psin_server,
107 struct socket **csocket); 111 struct socket **csocket, bool noblocksnd);
108 112
109 113
110 /* 114 /*
@@ -191,12 +195,13 @@ cifs_reconnect(struct TCP_Server_Info *server)
191 try_to_freeze(); 195 try_to_freeze();
192 if (server->protocolType == IPV6) { 196 if (server->protocolType == IPV6) {
193 rc = ipv6_connect(&server->addr.sockAddr6, 197 rc = ipv6_connect(&server->addr.sockAddr6,
194 &server->ssocket); 198 &server->ssocket, server->noautotune);
195 } else { 199 } else {
196 rc = ipv4_connect(&server->addr.sockAddr, 200 rc = ipv4_connect(&server->addr.sockAddr,
197 &server->ssocket, 201 &server->ssocket,
198 server->workstation_RFC1001_name, 202 server->workstation_RFC1001_name,
199 server->server_RFC1001_name); 203 server->server_RFC1001_name,
204 server->noblocksnd, server->noautotune);
200 } 205 }
201 if (rc) { 206 if (rc) {
202 cFYI(1, ("reconnect error %d", rc)); 207 cFYI(1, ("reconnect error %d", rc));
@@ -1192,6 +1197,10 @@ cifs_parse_mount_options(char *options, const char *devname,
1192 /* ignore */ 1197 /* ignore */
1193 } else if (strnicmp(data, "rw", 2) == 0) { 1198 } else if (strnicmp(data, "rw", 2) == 0) {
1194 vol->rw = true; 1199 vol->rw = true;
1200 } else if (strnicmp(data, "noblocksend", 11) == 0) {
1201 vol->noblocksnd = 1;
1202 } else if (strnicmp(data, "noautotune", 10) == 0) {
1203 vol->noautotune = 1;
1195 } else if ((strnicmp(data, "suid", 4) == 0) || 1204 } else if ((strnicmp(data, "suid", 4) == 0) ||
1196 (strnicmp(data, "nosuid", 6) == 0) || 1205 (strnicmp(data, "nosuid", 6) == 0) ||
1197 (strnicmp(data, "exec", 4) == 0) || 1206 (strnicmp(data, "exec", 4) == 0) ||
@@ -1518,7 +1527,8 @@ static void rfc1002mangle(char *target, char *source, unsigned int length)
1518 1527
1519static int 1528static int
1520ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, 1529ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1521 char *netbios_name, char *target_name) 1530 char *netbios_name, char *target_name,
1531 bool noblocksnd, bool noautotune)
1522{ 1532{
1523 int rc = 0; 1533 int rc = 0;
1524 int connected = 0; 1534 int connected = 0;
@@ -1590,11 +1600,16 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1590 (*csocket)->sk->sk_sndbuf, 1600 (*csocket)->sk->sk_sndbuf,
1591 (*csocket)->sk->sk_rcvbuf, (*csocket)->sk->sk_rcvtimeo)); 1601 (*csocket)->sk->sk_rcvbuf, (*csocket)->sk->sk_rcvtimeo));
1592 (*csocket)->sk->sk_rcvtimeo = 7 * HZ; 1602 (*csocket)->sk->sk_rcvtimeo = 7 * HZ;
1603 if (!noblocksnd)
1604 (*csocket)->sk->sk_sndtimeo = 3 * HZ;
1605
1593 /* make the bufsizes depend on wsize/rsize and max requests */ 1606 /* make the bufsizes depend on wsize/rsize and max requests */
1594 if ((*csocket)->sk->sk_sndbuf < (200 * 1024)) 1607 if (noautotune) {
1595 (*csocket)->sk->sk_sndbuf = 200 * 1024; 1608 if ((*csocket)->sk->sk_sndbuf < (200 * 1024))
1596 if ((*csocket)->sk->sk_rcvbuf < (140 * 1024)) 1609 (*csocket)->sk->sk_sndbuf = 200 * 1024;
1597 (*csocket)->sk->sk_rcvbuf = 140 * 1024; 1610 if ((*csocket)->sk->sk_rcvbuf < (140 * 1024))
1611 (*csocket)->sk->sk_rcvbuf = 140 * 1024;
1612 }
1598 1613
1599 /* send RFC1001 sessinit */ 1614 /* send RFC1001 sessinit */
1600 if (psin_server->sin_port == htons(RFC1001_PORT)) { 1615 if (psin_server->sin_port == htons(RFC1001_PORT)) {
@@ -1631,7 +1646,7 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1631 /* sizeof RFC1002_SESSION_REQUEST with no scope */ 1646 /* sizeof RFC1002_SESSION_REQUEST with no scope */
1632 smb_buf->smb_buf_length = 0x81000044; 1647 smb_buf->smb_buf_length = 0x81000044;
1633 rc = smb_send(*csocket, smb_buf, 0x44, 1648 rc = smb_send(*csocket, smb_buf, 0x44,
1634 (struct sockaddr *)psin_server); 1649 (struct sockaddr *)psin_server, noblocksnd);
1635 kfree(ses_init_buf); 1650 kfree(ses_init_buf);
1636 msleep(1); /* RFC1001 layer in at least one server 1651 msleep(1); /* RFC1001 layer in at least one server
1637 requires very short break before negprot 1652 requires very short break before negprot
@@ -1651,7 +1666,8 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1651} 1666}
1652 1667
1653static int 1668static int
1654ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket) 1669ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket,
1670 bool noblocksnd)
1655{ 1671{
1656 int rc = 0; 1672 int rc = 0;
1657 int connected = 0; 1673 int connected = 0;
@@ -1720,6 +1736,9 @@ ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket)
1720 the default. sock_setsockopt not used because it expects 1736 the default. sock_setsockopt not used because it expects
1721 user space buffer */ 1737 user space buffer */
1722 (*csocket)->sk->sk_rcvtimeo = 7 * HZ; 1738 (*csocket)->sk->sk_rcvtimeo = 7 * HZ;
1739 if (!noblocksnd)
1740 (*csocket)->sk->sk_sndtimeo = 3 * HZ;
1741
1723 1742
1724 return rc; 1743 return rc;
1725} 1744}
@@ -1983,11 +2002,14 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1983 cFYI(1, ("attempting ipv6 connect")); 2002 cFYI(1, ("attempting ipv6 connect"));
1984 /* BB should we allow ipv6 on port 139? */ 2003 /* BB should we allow ipv6 on port 139? */
1985 /* other OS never observed in Wild doing 139 with v6 */ 2004 /* other OS never observed in Wild doing 139 with v6 */
1986 rc = ipv6_connect(&sin_server6, &csocket); 2005 rc = ipv6_connect(&sin_server6, &csocket,
2006 volume_info.noblocksnd);
1987 } else 2007 } else
1988 rc = ipv4_connect(&sin_server, &csocket, 2008 rc = ipv4_connect(&sin_server, &csocket,
1989 volume_info.source_rfc1001_name, 2009 volume_info.source_rfc1001_name,
1990 volume_info.target_rfc1001_name); 2010 volume_info.target_rfc1001_name,
2011 volume_info.noblocksnd,
2012 volume_info.noautotune);
1991 if (rc < 0) { 2013 if (rc < 0) {
1992 cERROR(1, ("Error connecting to IPv4 socket. " 2014 cERROR(1, ("Error connecting to IPv4 socket. "
1993 "Aborting operation")); 2015 "Aborting operation"));
@@ -2002,6 +2024,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
2002 sock_release(csocket); 2024 sock_release(csocket);
2003 goto out; 2025 goto out;
2004 } else { 2026 } else {
2027 srvTcp->noblocksnd = volume_info.noblocksnd;
2028 srvTcp->noautotune = volume_info.noautotune;
2005 memcpy(&srvTcp->addr.sockAddr, &sin_server, 2029 memcpy(&srvTcp->addr.sockAddr, &sin_server,
2006 sizeof(struct sockaddr_in)); 2030 sizeof(struct sockaddr_in));
2007 atomic_set(&srvTcp->inFlight, 0); 2031 atomic_set(&srvTcp->inFlight, 0);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 62d8bd8f14c0..ead1a3bb0256 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1824,7 +1824,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
1824 pTcon = cifs_sb->tcon; 1824 pTcon = cifs_sb->tcon;
1825 1825
1826 pagevec_init(&lru_pvec, 0); 1826 pagevec_init(&lru_pvec, 0);
1827 cFYI(DBG2, ("rpages: num pages %d", num_pages)); 1827 cFYI(DBG2, ("rpages: num pages %d", num_pages));
1828 for (i = 0; i < num_pages; ) { 1828 for (i = 0; i < num_pages; ) {
1829 unsigned contig_pages; 1829 unsigned contig_pages;
1830 struct page *tmp_page; 1830 struct page *tmp_page;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d54fa8aeaea9..ff8c68de4a92 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1361,9 +1361,11 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1361 CIFS_MOUNT_MAP_SPECIAL_CHR); 1361 CIFS_MOUNT_MAP_SPECIAL_CHR);
1362 1362
1363 if (tmprc == 0 && (info_buf_source->UniqueId == 1363 if (tmprc == 0 && (info_buf_source->UniqueId ==
1364 info_buf_target->UniqueId)) 1364 info_buf_target->UniqueId)) {
1365 /* same file, POSIX says that this is a noop */ 1365 /* same file, POSIX says that this is a noop */
1366 rc = 0;
1366 goto cifs_rename_exit; 1367 goto cifs_rename_exit;
1368 }
1367 } /* else ... BB we could add the same check for Windows by 1369 } /* else ... BB we could add the same check for Windows by
1368 checking the UniqueId via FILE_INTERNAL_INFO */ 1370 checking the UniqueId via FILE_INTERNAL_INFO */
1369 1371
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index bf0e6d8e382a..ff8243a8fe3e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -161,7 +161,7 @@ void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
161 161
162int 162int
163smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer, 163smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
164 unsigned int smb_buf_length, struct sockaddr *sin) 164 unsigned int smb_buf_length, struct sockaddr *sin, bool noblocksnd)
165{ 165{
166 int rc = 0; 166 int rc = 0;
167 int i = 0; 167 int i = 0;
@@ -178,7 +178,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
178 smb_msg.msg_namelen = sizeof(struct sockaddr); 178 smb_msg.msg_namelen = sizeof(struct sockaddr);
179 smb_msg.msg_control = NULL; 179 smb_msg.msg_control = NULL;
180 smb_msg.msg_controllen = 0; 180 smb_msg.msg_controllen = 0;
181 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/ 181 if (noblocksnd)
182 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
183 else
184 smb_msg.msg_flags = MSG_NOSIGNAL;
182 185
183 /* smb header is converted in header_assemble. bcc and rest of SMB word 186 /* smb header is converted in header_assemble. bcc and rest of SMB word
184 area, and byte area if necessary, is converted to littleendian in 187 area, and byte area if necessary, is converted to littleendian in
@@ -229,8 +232,8 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
229} 232}
230 233
231static int 234static int
232smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, 235smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
233 struct sockaddr *sin) 236 struct sockaddr *sin, bool noblocksnd)
234{ 237{
235 int rc = 0; 238 int rc = 0;
236 int i = 0; 239 int i = 0;
@@ -240,6 +243,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
240 unsigned int total_len; 243 unsigned int total_len;
241 int first_vec = 0; 244 int first_vec = 0;
242 unsigned int smb_buf_length = smb_buffer->smb_buf_length; 245 unsigned int smb_buf_length = smb_buffer->smb_buf_length;
246 struct socket *ssocket = server->ssocket;
243 247
244 if (ssocket == NULL) 248 if (ssocket == NULL)
245 return -ENOTSOCK; /* BB eventually add reconnect code here */ 249 return -ENOTSOCK; /* BB eventually add reconnect code here */
@@ -248,7 +252,10 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
248 smb_msg.msg_namelen = sizeof(struct sockaddr); 252 smb_msg.msg_namelen = sizeof(struct sockaddr);
249 smb_msg.msg_control = NULL; 253 smb_msg.msg_control = NULL;
250 smb_msg.msg_controllen = 0; 254 smb_msg.msg_controllen = 0;
251 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/ 255 if (noblocksnd)
256 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
257 else
258 smb_msg.msg_flags = MSG_NOSIGNAL;
252 259
253 /* smb header is converted in header_assemble. bcc and rest of SMB word 260 /* smb header is converted in header_assemble. bcc and rest of SMB word
254 area, and byte area if necessary, is converted to littleendian in 261 area, and byte area if necessary, is converted to littleendian in
@@ -283,8 +290,11 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
283 if (rc < 0) 290 if (rc < 0)
284 break; 291 break;
285 292
286 if (rc >= total_len) { 293 if (rc == total_len) {
287 WARN_ON(rc > total_len); 294 total_len = 0;
295 break;
296 } else if (rc > total_len) {
297 cERROR(1, ("sent %d requested %d", rc, total_len));
288 break; 298 break;
289 } 299 }
290 if (rc == 0) { 300 if (rc == 0) {
@@ -312,6 +322,16 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
312 i = 0; /* in case we get ENOSPC on the next send */ 322 i = 0; /* in case we get ENOSPC on the next send */
313 } 323 }
314 324
325 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
326 cFYI(1, ("partial send (%d remaining), terminating session",
327 total_len));
328 /* If we have only sent part of an SMB then the next SMB
329 could be taken as the remainder of this one. We need
330 to kill the socket so the server throws away the partial
331 SMB */
332 server->tcpStatus = CifsNeedReconnect;
333 }
334
315 if (rc < 0) { 335 if (rc < 0) {
316 cERROR(1, ("Error %d sending data on socket to server", rc)); 336 cERROR(1, ("Error %d sending data on socket to server", rc));
317 } else 337 } else
@@ -518,8 +538,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
518#ifdef CONFIG_CIFS_STATS2 538#ifdef CONFIG_CIFS_STATS2
519 atomic_inc(&ses->server->inSend); 539 atomic_inc(&ses->server->inSend);
520#endif 540#endif
521 rc = smb_send2(ses->server->ssocket, iov, n_vec, 541 rc = smb_send2(ses->server, iov, n_vec,
522 (struct sockaddr *) &(ses->server->addr.sockAddr)); 542 (struct sockaddr *) &(ses->server->addr.sockAddr),
543 ses->server->noblocksnd);
523#ifdef CONFIG_CIFS_STATS2 544#ifdef CONFIG_CIFS_STATS2
524 atomic_dec(&ses->server->inSend); 545 atomic_dec(&ses->server->inSend);
525 midQ->when_sent = jiffies; 546 midQ->when_sent = jiffies;
@@ -711,7 +732,8 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
711 atomic_inc(&ses->server->inSend); 732 atomic_inc(&ses->server->inSend);
712#endif 733#endif
713 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, 734 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
714 (struct sockaddr *) &(ses->server->addr.sockAddr)); 735 (struct sockaddr *) &(ses->server->addr.sockAddr),
736 ses->server->noblocksnd);
715#ifdef CONFIG_CIFS_STATS2 737#ifdef CONFIG_CIFS_STATS2
716 atomic_dec(&ses->server->inSend); 738 atomic_dec(&ses->server->inSend);
717 midQ->when_sent = jiffies; 739 midQ->when_sent = jiffies;
@@ -851,7 +873,8 @@ send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
851 return rc; 873 return rc;
852 } 874 }
853 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, 875 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
854 (struct sockaddr *) &(ses->server->addr.sockAddr)); 876 (struct sockaddr *) &(ses->server->addr.sockAddr),
877 ses->server->noblocksnd);
855 up(&ses->server->tcpSem); 878 up(&ses->server->tcpSem);
856 return rc; 879 return rc;
857} 880}
@@ -941,7 +964,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
941 atomic_inc(&ses->server->inSend); 964 atomic_inc(&ses->server->inSend);
942#endif 965#endif
943 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, 966 rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
944 (struct sockaddr *) &(ses->server->addr.sockAddr)); 967 (struct sockaddr *) &(ses->server->addr.sockAddr),
968 ses->server->noblocksnd);
945#ifdef CONFIG_CIFS_STATS2 969#ifdef CONFIG_CIFS_STATS2
946 atomic_dec(&ses->server->inSend); 970 atomic_dec(&ses->server->inSend);
947 midQ->when_sent = jiffies; 971 midQ->when_sent = jiffies;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e5717a4fae67..5dec6d1356c4 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2390,13 +2390,12 @@ static void ext3_write_super (struct super_block * sb)
2390 2390
2391static int ext3_sync_fs(struct super_block *sb, int wait) 2391static int ext3_sync_fs(struct super_block *sb, int wait)
2392{ 2392{
2393 tid_t target;
2394
2395 sb->s_dirt = 0; 2393 sb->s_dirt = 0;
2396 if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { 2394 if (wait)
2397 if (wait) 2395 ext3_force_commit(sb);
2398 log_wait_commit(EXT3_SB(sb)->s_journal, target); 2396 else
2399 } 2397 journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
2398
2400 return 0; 2399 return 0;
2401} 2400}
2402 2401
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fe34d74cfb19..2a117e286e54 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -718,6 +718,8 @@ got:
718 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 718 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
719 free = ext4_free_blocks_after_init(sb, group, gdp); 719 free = ext4_free_blocks_after_init(sb, group, gdp);
720 gdp->bg_free_blocks_count = cpu_to_le16(free); 720 gdp->bg_free_blocks_count = cpu_to_le16(free);
721 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
722 gdp);
721 } 723 }
722 spin_unlock(sb_bgl_lock(sbi, group)); 724 spin_unlock(sb_bgl_lock(sbi, group));
723 725
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8dbf6953845b..be21a5ae33cb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page,
2329 unlock_page(page); 2329 unlock_page(page);
2330 return 0; 2330 return 0;
2331 } 2331 }
2332 /* now mark the buffer_heads as dirty and uptodate */
2333 block_commit_write(page, 0, PAGE_CACHE_SIZE);
2332 } 2334 }
2333 2335
2334 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2336 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
4580static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4582static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4581{ 4583{
4582 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4584 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
4583 return ext4_indirect_trans_blocks(inode, nrblocks, 0); 4585 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
4584 return ext4_ext_index_trans_blocks(inode, nrblocks, 0); 4586 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4585} 4587}
4588
4586/* 4589/*
4587 * Account for index blocks, block groups bitmaps and block group 4590 * Account for index blocks, block groups bitmaps and block group
4588 * descriptor blocks if modify datablocks and index blocks 4591 * descriptor blocks if modify datablocks and index blocks
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index dfe17a134052..444ad998f72e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4441 else if (block >= (entry->start_blk + entry->count)) 4441 else if (block >= (entry->start_blk + entry->count))
4442 n = &(*n)->rb_right; 4442 n = &(*n)->rb_right;
4443 else { 4443 else {
4444 ext4_unlock_group(sb, group);
4444 ext4_error(sb, __func__, 4445 ext4_error(sb, __func__,
4445 "Double free of blocks %d (%d %d)\n", 4446 "Double free of blocks %d (%d %d)\n",
4446 block, entry->start_blk, entry->count); 4447 block, entry->start_blk, entry->count);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 994859df010e..e4a241c65dbe 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
1458 1458
1459 /* We allocate both existing and potentially added groups */ 1459 /* We allocate both existing and potentially added groups */
1460 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + 1460 flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
1461 ((sbi->s_es->s_reserved_gdt_blocks +1 ) << 1461 ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
1462 EXT4_DESC_PER_BLOCK_BITS(sb))) / 1462 EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
1463 groups_per_flex;
1464 sbi->s_flex_groups = kzalloc(flex_group_count * 1463 sbi->s_flex_groups = kzalloc(flex_group_count *
1465 sizeof(struct flex_groups), GFP_KERNEL); 1464 sizeof(struct flex_groups), GFP_KERNEL);
1466 if (sbi->s_flex_groups == NULL) { 1465 if (sbi->s_flex_groups == NULL) {
@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb)
2885/* 2884/*
2886 * Ext4 always journals updates to the superblock itself, so we don't 2885 * Ext4 always journals updates to the superblock itself, so we don't
2887 * have to propagate any other updates to the superblock on disk at this 2886 * have to propagate any other updates to the superblock on disk at this
2888 * point. Just start an async writeback to get the buffers on their way 2887 * point. (We can probably nuke this function altogether, and remove
2889 * to the disk. 2888 * any mention to sb->s_dirt in all of fs/ext4; eventual cleanup...)
2890 *
2891 * This implicitly triggers the writebehind on sync().
2892 */ 2889 */
2893
2894static void ext4_write_super(struct super_block *sb) 2890static void ext4_write_super(struct super_block *sb)
2895{ 2891{
2896 if (mutex_trylock(&sb->s_lock) != 0) 2892 if (mutex_trylock(&sb->s_lock) != 0)
@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb)
2900 2896
2901static int ext4_sync_fs(struct super_block *sb, int wait) 2897static int ext4_sync_fs(struct super_block *sb, int wait)
2902{ 2898{
2903 tid_t target; 2899 int ret = 0;
2904 2900
2905 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait); 2901 trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
2906 sb->s_dirt = 0; 2902 sb->s_dirt = 0;
2907 if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) { 2903 if (wait)
2908 if (wait) 2904 ret = ext4_force_commit(sb);
2909 jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target); 2905 else
2910 } 2906 jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
2911 return 0; 2907 return ret;
2912} 2908}
2913 2909
2914/* 2910/*
diff --git a/fs/fat/Makefile b/fs/fat/Makefile
index bfb5f06cf2c8..e06190322c1c 100644
--- a/fs/fat/Makefile
+++ b/fs/fat/Makefile
@@ -3,5 +3,9 @@
3# 3#
4 4
5obj-$(CONFIG_FAT_FS) += fat.o 5obj-$(CONFIG_FAT_FS) += fat.o
6obj-$(CONFIG_VFAT_FS) += vfat.o
7obj-$(CONFIG_MSDOS_FS) += msdos.o
6 8
7fat-objs := cache.o dir.o fatent.o file.o inode.o misc.o 9fat-y := cache.o dir.o fatent.o file.o inode.o misc.o
10vfat-y := namei_vfat.o
11msdos-y := namei_msdos.o
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 3222f51c41cf..b42602298087 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -9,8 +9,8 @@
9 */ 9 */
10 10
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/msdos_fs.h>
13#include <linux/buffer_head.h> 12#include <linux/buffer_head.h>
13#include "fat.h"
14 14
15/* this must be > 0. */ 15/* this must be > 0. */
16#define FAT_MAX_CACHE 8 16#define FAT_MAX_CACHE 8
@@ -293,10 +293,12 @@ static int fat_bmap_cluster(struct inode *inode, int cluster)
293} 293}
294 294
295int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, 295int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
296 unsigned long *mapped_blocks) 296 unsigned long *mapped_blocks, int create)
297{ 297{
298 struct super_block *sb = inode->i_sb; 298 struct super_block *sb = inode->i_sb;
299 struct msdos_sb_info *sbi = MSDOS_SB(sb); 299 struct msdos_sb_info *sbi = MSDOS_SB(sb);
300 const unsigned long blocksize = sb->s_blocksize;
301 const unsigned char blocksize_bits = sb->s_blocksize_bits;
300 sector_t last_block; 302 sector_t last_block;
301 int cluster, offset; 303 int cluster, offset;
302 304
@@ -309,10 +311,21 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
309 } 311 }
310 return 0; 312 return 0;
311 } 313 }
312 last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1)) 314
313 >> sb->s_blocksize_bits; 315 last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
314 if (sector >= last_block) 316 if (sector >= last_block) {
315 return 0; 317 if (!create)
318 return 0;
319
320 /*
321 * ->mmu_private can access on only allocation path.
322 * (caller must hold ->i_mutex)
323 */
324 last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
325 >> blocksize_bits;
326 if (sector >= last_block)
327 return 0;
328 }
316 329
317 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); 330 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
318 offset = sector & (sbi->sec_per_clus - 1); 331 offset = sector & (sbi->sec_per_clus - 1);
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index bae1c3292522..67e058357098 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -16,11 +16,11 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/msdos_fs.h>
20#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
21#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
22#include <linux/compat.h> 21#include <linux/compat.h>
23#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include "fat.h"
24 24
25static inline loff_t fat_make_i_pos(struct super_block *sb, 25static inline loff_t fat_make_i_pos(struct super_block *sb,
26 struct buffer_head *bh, 26 struct buffer_head *bh,
@@ -77,7 +77,7 @@ next:
77 77
78 *bh = NULL; 78 *bh = NULL;
79 iblock = *pos >> sb->s_blocksize_bits; 79 iblock = *pos >> sb->s_blocksize_bits;
80 err = fat_bmap(dir, iblock, &phys, &mapped_blocks); 80 err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0);
81 if (err || !phys) 81 if (err || !phys)
82 return -1; /* beyond EOF or error */ 82 return -1; /* beyond EOF or error */
83 83
@@ -86,7 +86,7 @@ next:
86 *bh = sb_bread(sb, phys); 86 *bh = sb_bread(sb, phys);
87 if (*bh == NULL) { 87 if (*bh == NULL) {
88 printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n", 88 printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n",
89 (unsigned long long)phys); 89 (llu)phys);
90 /* skip this block */ 90 /* skip this block */
91 *pos = (iblock + 1) << sb->s_blocksize_bits; 91 *pos = (iblock + 1) << sb->s_blocksize_bits;
92 goto next; 92 goto next;
@@ -373,9 +373,10 @@ parse_record:
373 if (de->attr == ATTR_EXT) { 373 if (de->attr == ATTR_EXT) {
374 int status = fat_parse_long(inode, &cpos, &bh, &de, 374 int status = fat_parse_long(inode, &cpos, &bh, &de,
375 &unicode, &nr_slots); 375 &unicode, &nr_slots);
376 if (status < 0) 376 if (status < 0) {
377 return status; 377 err = status;
378 else if (status == PARSE_INVALID) 378 goto end_of_dir;
379 } else if (status == PARSE_INVALID)
379 continue; 380 continue;
380 else if (status == PARSE_NOT_LONGNAME) 381 else if (status == PARSE_NOT_LONGNAME)
381 goto parse_record; 382 goto parse_record;
@@ -832,6 +833,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
832#endif /* CONFIG_COMPAT */ 833#endif /* CONFIG_COMPAT */
833 834
834const struct file_operations fat_dir_operations = { 835const struct file_operations fat_dir_operations = {
836 .llseek = generic_file_llseek,
835 .read = generic_read_dir, 837 .read = generic_read_dir,
836 .readdir = fat_readdir, 838 .readdir = fat_readdir,
837 .ioctl = fat_dir_ioctl, 839 .ioctl = fat_dir_ioctl,
@@ -1089,6 +1091,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
1089 struct msdos_dir_entry *de; 1091 struct msdos_dir_entry *de;
1090 sector_t blknr; 1092 sector_t blknr;
1091 __le16 date, time; 1093 __le16 date, time;
1094 u8 time_cs;
1092 int err, cluster; 1095 int err, cluster;
1093 1096
1094 err = fat_alloc_clusters(dir, &cluster, 1); 1097 err = fat_alloc_clusters(dir, &cluster, 1);
@@ -1102,7 +1105,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
1102 goto error_free; 1105 goto error_free;
1103 } 1106 }
1104 1107
1105 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc); 1108 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
1106 1109
1107 de = (struct msdos_dir_entry *)bhs[0]->b_data; 1110 de = (struct msdos_dir_entry *)bhs[0]->b_data;
1108 /* filling the new directory slots ("." and ".." entries) */ 1111 /* filling the new directory slots ("." and ".." entries) */
@@ -1112,13 +1115,14 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
1112 de[0].lcase = de[1].lcase = 0; 1115 de[0].lcase = de[1].lcase = 0;
1113 de[0].time = de[1].time = time; 1116 de[0].time = de[1].time = time;
1114 de[0].date = de[1].date = date; 1117 de[0].date = de[1].date = date;
1115 de[0].ctime_cs = de[1].ctime_cs = 0;
1116 if (sbi->options.isvfat) { 1118 if (sbi->options.isvfat) {
1117 /* extra timestamps */ 1119 /* extra timestamps */
1118 de[0].ctime = de[1].ctime = time; 1120 de[0].ctime = de[1].ctime = time;
1121 de[0].ctime_cs = de[1].ctime_cs = time_cs;
1119 de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = date; 1122 de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = date;
1120 } else { 1123 } else {
1121 de[0].ctime = de[1].ctime = 0; 1124 de[0].ctime = de[1].ctime = 0;
1125 de[0].ctime_cs = de[1].ctime_cs = 0;
1122 de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0; 1126 de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0;
1123 } 1127 }
1124 de[0].start = cpu_to_le16(cluster); 1128 de[0].start = cpu_to_le16(cluster);
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
new file mode 100644
index 000000000000..ea440d65819c
--- /dev/null
+++ b/fs/fat/fat.h
@@ -0,0 +1,329 @@
1#ifndef _FAT_H
2#define _FAT_H
3
4#include <linux/buffer_head.h>
5#include <linux/string.h>
6#include <linux/nls.h>
7#include <linux/fs.h>
8#include <linux/mutex.h>
9#include <linux/msdos_fs.h>
10
11/*
12 * vfat shortname flags
13 */
14#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
15#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
16#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
17#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
18#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
19
20struct fat_mount_options {
21 uid_t fs_uid;
22 gid_t fs_gid;
23 unsigned short fs_fmask;
24 unsigned short fs_dmask;
25 unsigned short codepage; /* Codepage for shortname conversions */
26 char *iocharset; /* Charset used for filename input/display */
27 unsigned short shortname; /* flags for shortname display/create rule */
28 unsigned char name_check; /* r = relaxed, n = normal, s = strict */
29 unsigned short allow_utime;/* permission for setting the [am]time */
30 unsigned quiet:1, /* set = fake successful chmods and chowns */
31 showexec:1, /* set = only set x bit for com/exe/bat */
32 sys_immutable:1, /* set = system files are immutable */
33 dotsOK:1, /* set = hidden and system files are named '.filename' */
34 isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
35 utf8:1, /* Use of UTF-8 character set (Default) */
36 unicode_xlate:1, /* create escape sequences for unhandled Unicode */
37 numtail:1, /* Does first alias have a numeric '~1' type tail? */
38 flush:1, /* write things quickly */
39 nocase:1, /* Does this need case conversion? 0=need case conversion*/
40 usefree:1, /* Use free_clusters for FAT32 */
41 tz_utc:1, /* Filesystem timestamps are in UTC */
42 rodir:1; /* allow ATTR_RO for directory */
43};
44
45#define FAT_HASH_BITS 8
46#define FAT_HASH_SIZE (1UL << FAT_HASH_BITS)
47
48/*
49 * MS-DOS file system in-core superblock data
50 */
51struct msdos_sb_info {
52 unsigned short sec_per_clus; /* sectors/cluster */
53 unsigned short cluster_bits; /* log2(cluster_size) */
54 unsigned int cluster_size; /* cluster size */
55 unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */
56 unsigned short fat_start;
57 unsigned long fat_length; /* FAT start & length (sec.) */
58 unsigned long dir_start;
59 unsigned short dir_entries; /* root dir start & entries */
60 unsigned long data_start; /* first data sector */
61 unsigned long max_cluster; /* maximum cluster number */
62 unsigned long root_cluster; /* first cluster of the root directory */
63 unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
64 struct mutex fat_lock;
65 unsigned int prev_free; /* previously allocated cluster number */
66 unsigned int free_clusters; /* -1 if undefined */
67 unsigned int free_clus_valid; /* is free_clusters valid? */
68 struct fat_mount_options options;
69 struct nls_table *nls_disk; /* Codepage used on disk */
70 struct nls_table *nls_io; /* Charset used for input and display */
71 const void *dir_ops; /* Opaque; default directory operations */
72 int dir_per_block; /* dir entries per block */
73 int dir_per_block_bits; /* log2(dir_per_block) */
74
75 int fatent_shift;
76 struct fatent_operations *fatent_ops;
77
78 spinlock_t inode_hash_lock;
79 struct hlist_head inode_hashtable[FAT_HASH_SIZE];
80};
81
82#define FAT_CACHE_VALID 0 /* special case for valid cache */
83
84/*
85 * MS-DOS file system inode data in memory
86 */
87struct msdos_inode_info {
88 spinlock_t cache_lru_lock;
89 struct list_head cache_lru;
90 int nr_caches;
91 /* for avoiding the race between fat_free() and fat_get_cluster() */
92 unsigned int cache_valid_id;
93
94 /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */
95 loff_t mmu_private; /* physically allocated size */
96
97 int i_start; /* first cluster or 0 */
98 int i_logstart; /* logical first cluster */
99 int i_attrs; /* unused attribute bits */
100 loff_t i_pos; /* on-disk position of directory entry or 0 */
101 struct hlist_node i_fat_hash; /* hash by i_location */
102 struct inode vfs_inode;
103};
104
105struct fat_slot_info {
106 loff_t i_pos; /* on-disk position of directory entry */
107 loff_t slot_off; /* offset for slot or de start */
108 int nr_slots; /* number of slots + 1(de) in filename */
109 struct msdos_dir_entry *de;
110 struct buffer_head *bh;
111};
112
113static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb)
114{
115 return sb->s_fs_info;
116}
117
118static inline struct msdos_inode_info *MSDOS_I(struct inode *inode)
119{
120 return container_of(inode, struct msdos_inode_info, vfs_inode);
121}
122
123/*
124 * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to
125 * save ATTR_RO instead of ->i_mode.
126 *
127 * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
128 * bit, it's just used as flag for app.
129 */
130static inline int fat_mode_can_hold_ro(struct inode *inode)
131{
132 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
133 mode_t mask;
134
135 if (S_ISDIR(inode->i_mode)) {
136 if (!sbi->options.rodir)
137 return 0;
138 mask = ~sbi->options.fs_dmask;
139 } else
140 mask = ~sbi->options.fs_fmask;
141
142 if (!(mask & S_IWUGO))
143 return 0;
144 return 1;
145}
146
147/* Convert attribute bits and a mask to the UNIX mode. */
148static inline mode_t fat_make_mode(struct msdos_sb_info *sbi,
149 u8 attrs, mode_t mode)
150{
151 if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir))
152 mode &= ~S_IWUGO;
153
154 if (attrs & ATTR_DIR)
155 return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
156 else
157 return (mode & ~sbi->options.fs_fmask) | S_IFREG;
158}
159
160/* Return the FAT attribute byte for this inode */
161static inline u8 fat_make_attrs(struct inode *inode)
162{
163 u8 attrs = MSDOS_I(inode)->i_attrs;
164 if (S_ISDIR(inode->i_mode))
165 attrs |= ATTR_DIR;
166 if (fat_mode_can_hold_ro(inode) && !(inode->i_mode & S_IWUGO))
167 attrs |= ATTR_RO;
168 return attrs;
169}
170
171static inline void fat_save_attrs(struct inode *inode, u8 attrs)
172{
173 if (fat_mode_can_hold_ro(inode))
174 MSDOS_I(inode)->i_attrs = attrs & ATTR_UNUSED;
175 else
176 MSDOS_I(inode)->i_attrs = attrs & (ATTR_UNUSED | ATTR_RO);
177}
178
179static inline unsigned char fat_checksum(const __u8 *name)
180{
181 unsigned char s = name[0];
182 s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2];
183 s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4];
184 s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6];
185 s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8];
186 s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10];
187 return s;
188}
189
190static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus)
191{
192 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus
193 + sbi->data_start;
194}
195
196static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len)
197{
198#ifdef __BIG_ENDIAN
199 while (len--) {
200 *dst++ = src[0] | (src[1] << 8);
201 src += 2;
202 }
203#else
204 memcpy(dst, src, len * 2);
205#endif
206}
207
208static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
209{
210#ifdef __BIG_ENDIAN
211 while (len--) {
212 dst[0] = *src & 0x00FF;
213 dst[1] = (*src & 0xFF00) >> 8;
214 dst += 2;
215 src++;
216 }
217#else
218 memcpy(dst, src, len * 2);
219#endif
220}
221
222/* fat/cache.c */
223extern void fat_cache_inval_inode(struct inode *inode);
224extern int fat_get_cluster(struct inode *inode, int cluster,
225 int *fclus, int *dclus);
226extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
227 unsigned long *mapped_blocks, int create);
228
229/* fat/dir.c */
230extern const struct file_operations fat_dir_operations;
231extern int fat_search_long(struct inode *inode, const unsigned char *name,
232 int name_len, struct fat_slot_info *sinfo);
233extern int fat_dir_empty(struct inode *dir);
234extern int fat_subdirs(struct inode *dir);
235extern int fat_scan(struct inode *dir, const unsigned char *name,
236 struct fat_slot_info *sinfo);
237extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
238 struct msdos_dir_entry **de, loff_t *i_pos);
239extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
240extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
241 struct fat_slot_info *sinfo);
242extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo);
243
244/* fat/fatent.c */
245struct fat_entry {
246 int entry;
247 union {
248 u8 *ent12_p[2];
249 __le16 *ent16_p;
250 __le32 *ent32_p;
251 } u;
252 int nr_bhs;
253 struct buffer_head *bhs[2];
254};
255
256static inline void fatent_init(struct fat_entry *fatent)
257{
258 fatent->nr_bhs = 0;
259 fatent->entry = 0;
260 fatent->u.ent32_p = NULL;
261 fatent->bhs[0] = fatent->bhs[1] = NULL;
262}
263
264static inline void fatent_set_entry(struct fat_entry *fatent, int entry)
265{
266 fatent->entry = entry;
267 fatent->u.ent32_p = NULL;
268}
269
270static inline void fatent_brelse(struct fat_entry *fatent)
271{
272 int i;
273 fatent->u.ent32_p = NULL;
274 for (i = 0; i < fatent->nr_bhs; i++)
275 brelse(fatent->bhs[i]);
276 fatent->nr_bhs = 0;
277 fatent->bhs[0] = fatent->bhs[1] = NULL;
278}
279
280extern void fat_ent_access_init(struct super_block *sb);
281extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
282 int entry);
283extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
284 int new, int wait);
285extern int fat_alloc_clusters(struct inode *inode, int *cluster,
286 int nr_cluster);
287extern int fat_free_clusters(struct inode *inode, int cluster);
288extern int fat_count_free_clusters(struct super_block *sb);
289
290/* fat/file.c */
291extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
292 unsigned int cmd, unsigned long arg);
293extern const struct file_operations fat_file_operations;
294extern const struct inode_operations fat_file_inode_operations;
295extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
296extern void fat_truncate(struct inode *inode);
297extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
298 struct kstat *stat);
299
300/* fat/inode.c */
301extern void fat_attach(struct inode *inode, loff_t i_pos);
302extern void fat_detach(struct inode *inode);
303extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
304extern struct inode *fat_build_inode(struct super_block *sb,
305 struct msdos_dir_entry *de, loff_t i_pos);
306extern int fat_sync_inode(struct inode *inode);
307extern int fat_fill_super(struct super_block *sb, void *data, int silent,
308 const struct inode_operations *fs_dir_inode_ops, int isvfat);
309
310extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
311 struct inode *i2);
312/* fat/misc.c */
313extern void fat_fs_panic(struct super_block *s, const char *fmt, ...)
314 __attribute__ ((format (printf, 2, 3))) __cold;
315extern void fat_clusters_flush(struct super_block *sb);
316extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
317extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
318 __le16 __time, __le16 __date, u8 time_cs);
319extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
320 __le16 *time, __le16 *date, u8 *time_cs);
321extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
322
323int fat_cache_init(void);
324void fat_cache_destroy(void);
325
326/* helper for printk */
327typedef unsigned long long llu;
328
329#endif /* !_FAT_H */
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index fb98b3d847ed..da6eea47872f 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -7,6 +7,7 @@
7#include <linux/fs.h> 7#include <linux/fs.h>
8#include <linux/msdos_fs.h> 8#include <linux/msdos_fs.h>
9#include <linux/blkdev.h> 9#include <linux/blkdev.h>
10#include "fat.h"
10 11
11struct fatent_operations { 12struct fatent_operations {
12 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); 13 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
@@ -92,8 +93,7 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
92err_brelse: 93err_brelse:
93 brelse(bhs[0]); 94 brelse(bhs[0]);
94err: 95err:
95 printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n", 96 printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n", (llu)blocknr);
96 (unsigned long long)blocknr);
97 return -EIO; 97 return -EIO;
98} 98}
99 99
@@ -106,7 +106,7 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
106 fatent->bhs[0] = sb_bread(sb, blocknr); 106 fatent->bhs[0] = sb_bread(sb, blocknr);
107 if (!fatent->bhs[0]) { 107 if (!fatent->bhs[0]) {
108 printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n", 108 printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n",
109 (unsigned long long)blocknr); 109 (llu)blocknr);
110 return -EIO; 110 return -EIO;
111 } 111 }
112 fatent->nr_bhs = 1; 112 fatent->nr_bhs = 1;
@@ -316,10 +316,20 @@ static inline int fat_ent_update_ptr(struct super_block *sb,
316 /* Is this fatent's blocks including this entry? */ 316 /* Is this fatent's blocks including this entry? */
317 if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) 317 if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
318 return 0; 318 return 0;
319 /* Does this entry need the next block? */ 319 if (sbi->fat_bits == 12) {
320 if (sbi->fat_bits == 12 && (offset + 1) >= sb->s_blocksize) { 320 if ((offset + 1) < sb->s_blocksize) {
321 if (fatent->nr_bhs != 2 || bhs[1]->b_blocknr != (blocknr + 1)) 321 /* This entry is on bhs[0]. */
322 return 0; 322 if (fatent->nr_bhs == 2) {
323 brelse(bhs[1]);
324 fatent->nr_bhs = 1;
325 }
326 } else {
327 /* This entry needs the next block. */
328 if (fatent->nr_bhs != 2)
329 return 0;
330 if (bhs[1]->b_blocknr != (blocknr + 1))
331 return 0;
332 }
323 } 333 }
324 ops->ent_set_ptr(fatent, offset); 334 ops->ent_set_ptr(fatent, offset);
325 return 1; 335 return 1;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index ddde37025ca6..f06a4e525ece 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -10,13 +10,13 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mount.h> 11#include <linux/mount.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/msdos_fs.h>
14#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
15#include <linux/writeback.h> 14#include <linux/writeback.h>
16#include <linux/backing-dev.h> 15#include <linux/backing-dev.h>
17#include <linux/blkdev.h> 16#include <linux/blkdev.h>
18#include <linux/fsnotify.h> 17#include <linux/fsnotify.h>
19#include <linux/security.h> 18#include <linux/security.h>
19#include "fat.h"
20 20
21int fat_generic_ioctl(struct inode *inode, struct file *filp, 21int fat_generic_ioctl(struct inode *inode, struct file *filp,
22 unsigned int cmd, unsigned long arg) 22 unsigned int cmd, unsigned long arg)
@@ -29,10 +29,9 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
29 { 29 {
30 u32 attr; 30 u32 attr;
31 31
32 if (inode->i_ino == MSDOS_ROOT_INO) 32 mutex_lock(&inode->i_mutex);
33 attr = ATTR_DIR; 33 attr = fat_make_attrs(inode);
34 else 34 mutex_unlock(&inode->i_mutex);
35 attr = fat_attr(inode);
36 35
37 return put_user(attr, user_attr); 36 return put_user(attr, user_attr);
38 } 37 }
@@ -62,20 +61,16 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
62 /* Merge in ATTR_VOLUME and ATTR_DIR */ 61 /* Merge in ATTR_VOLUME and ATTR_DIR */
63 attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) | 62 attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
64 (is_dir ? ATTR_DIR : 0); 63 (is_dir ? ATTR_DIR : 0);
65 oldattr = fat_attr(inode); 64 oldattr = fat_make_attrs(inode);
66 65
67 /* Equivalent to a chmod() */ 66 /* Equivalent to a chmod() */
68 ia.ia_valid = ATTR_MODE | ATTR_CTIME; 67 ia.ia_valid = ATTR_MODE | ATTR_CTIME;
69 ia.ia_ctime = current_fs_time(inode->i_sb); 68 ia.ia_ctime = current_fs_time(inode->i_sb);
70 if (is_dir) { 69 if (is_dir)
71 ia.ia_mode = MSDOS_MKMODE(attr, 70 ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
72 S_IRWXUGO & ~sbi->options.fs_dmask) 71 else {
73 | S_IFDIR; 72 ia.ia_mode = fat_make_mode(sbi, attr,
74 } else { 73 S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO));
75 ia.ia_mode = MSDOS_MKMODE(attr,
76 (S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO))
77 & ~sbi->options.fs_fmask)
78 | S_IFREG;
79 } 74 }
80 75
81 /* The root directory has no attributes */ 76 /* The root directory has no attributes */
@@ -115,7 +110,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
115 inode->i_flags &= S_IMMUTABLE; 110 inode->i_flags &= S_IMMUTABLE;
116 } 111 }
117 112
118 MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED; 113 fat_save_attrs(inode, attr);
119 mark_inode_dirty(inode); 114 mark_inode_dirty(inode);
120up: 115up:
121 mnt_drop_write(filp->f_path.mnt); 116 mnt_drop_write(filp->f_path.mnt);
@@ -274,7 +269,7 @@ static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
274 269
275 /* 270 /*
276 * Note, the basic check is already done by a caller of 271 * Note, the basic check is already done by a caller of
277 * (attr->ia_mode & ~MSDOS_VALID_MODE) 272 * (attr->ia_mode & ~FAT_VALID_MODE)
278 */ 273 */
279 274
280 if (S_ISREG(inode->i_mode)) 275 if (S_ISREG(inode->i_mode))
@@ -287,11 +282,18 @@ static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
287 /* 282 /*
288 * Of the r and x bits, all (subject to umask) must be present. Of the 283 * Of the r and x bits, all (subject to umask) must be present. Of the
289 * w bits, either all (subject to umask) or none must be present. 284 * w bits, either all (subject to umask) or none must be present.
285 *
286 * If fat_mode_can_hold_ro(inode) is false, can't change w bits.
290 */ 287 */
291 if ((perm & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO))) 288 if ((perm & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO)))
292 return -EPERM; 289 return -EPERM;
293 if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask))) 290 if (fat_mode_can_hold_ro(inode)) {
294 return -EPERM; 291 if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask)))
292 return -EPERM;
293 } else {
294 if ((perm & S_IWUGO) != (S_IWUGO & ~mask))
295 return -EPERM;
296 }
295 297
296 *mode_ptr &= S_IFMT | perm; 298 *mode_ptr &= S_IFMT | perm;
297 299
@@ -314,13 +316,15 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
314} 316}
315 317
316#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) 318#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
319/* valid file mode bits */
320#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
317 321
318int fat_setattr(struct dentry *dentry, struct iattr *attr) 322int fat_setattr(struct dentry *dentry, struct iattr *attr)
319{ 323{
320 struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb); 324 struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
321 struct inode *inode = dentry->d_inode; 325 struct inode *inode = dentry->d_inode;
322 int error = 0;
323 unsigned int ia_valid; 326 unsigned int ia_valid;
327 int error;
324 328
325 /* 329 /*
326 * Expand the file. Since inode_setattr() updates ->i_size 330 * Expand the file. Since inode_setattr() updates ->i_size
@@ -356,7 +360,7 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
356 ((attr->ia_valid & ATTR_GID) && 360 ((attr->ia_valid & ATTR_GID) &&
357 (attr->ia_gid != sbi->options.fs_gid)) || 361 (attr->ia_gid != sbi->options.fs_gid)) ||
358 ((attr->ia_valid & ATTR_MODE) && 362 ((attr->ia_valid & ATTR_MODE) &&
359 (attr->ia_mode & ~MSDOS_VALID_MODE))) 363 (attr->ia_mode & ~FAT_VALID_MODE)))
360 error = -EPERM; 364 error = -EPERM;
361 365
362 if (error) { 366 if (error) {
@@ -374,7 +378,8 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
374 attr->ia_valid &= ~ATTR_MODE; 378 attr->ia_valid &= ~ATTR_MODE;
375 } 379 }
376 380
377 error = inode_setattr(inode, attr); 381 if (attr->ia_valid)
382 error = inode_setattr(inode, attr);
378out: 383out:
379 return error; 384 return error;
380} 385}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 2b2eec1283bf..bdd8fb7be2ca 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -16,7 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/smp_lock.h> 17#include <linux/smp_lock.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/msdos_fs.h>
20#include <linux/pagemap.h> 19#include <linux/pagemap.h>
21#include <linux/mpage.h> 20#include <linux/mpage.h>
22#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
@@ -27,7 +26,9 @@
27#include <linux/uio.h> 26#include <linux/uio.h>
28#include <linux/writeback.h> 27#include <linux/writeback.h>
29#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/hash.h>
30#include <asm/unaligned.h> 30#include <asm/unaligned.h>
31#include "fat.h"
31 32
32#ifndef CONFIG_FAT_DEFAULT_IOCHARSET 33#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
33/* if user don't select VFAT, this is undefined. */ 34/* if user don't select VFAT, this is undefined. */
@@ -63,7 +64,7 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock,
63 sector_t phys; 64 sector_t phys;
64 int err, offset; 65 int err, offset;
65 66
66 err = fat_bmap(inode, iblock, &phys, &mapped_blocks); 67 err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create);
67 if (err) 68 if (err)
68 return err; 69 return err;
69 if (phys) { 70 if (phys) {
@@ -93,7 +94,7 @@ static inline int __fat_get_block(struct inode *inode, sector_t iblock,
93 *max_blocks = min(mapped_blocks, *max_blocks); 94 *max_blocks = min(mapped_blocks, *max_blocks);
94 MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits; 95 MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits;
95 96
96 err = fat_bmap(inode, iblock, &phys, &mapped_blocks); 97 err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create);
97 if (err) 98 if (err)
98 return err; 99 return err;
99 100
@@ -198,7 +199,14 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
198 199
199static sector_t _fat_bmap(struct address_space *mapping, sector_t block) 200static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
200{ 201{
201 return generic_block_bmap(mapping, block, fat_get_block); 202 sector_t blocknr;
203
204 /* fat_get_cluster() assumes the requested blocknr isn't truncated. */
205 mutex_lock(&mapping->host->i_mutex);
206 blocknr = generic_block_bmap(mapping, block, fat_get_block);
207 mutex_unlock(&mapping->host->i_mutex);
208
209 return blocknr;
202} 210}
203 211
204static const struct address_space_operations fat_aops = { 212static const struct address_space_operations fat_aops = {
@@ -247,25 +255,21 @@ static void fat_hash_init(struct super_block *sb)
247 INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); 255 INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
248} 256}
249 257
250static inline unsigned long fat_hash(struct super_block *sb, loff_t i_pos) 258static inline unsigned long fat_hash(loff_t i_pos)
251{ 259{
252 unsigned long tmp = (unsigned long)i_pos | (unsigned long) sb; 260 return hash_32(i_pos, FAT_HASH_BITS);
253 tmp = tmp + (tmp >> FAT_HASH_BITS) + (tmp >> FAT_HASH_BITS * 2);
254 return tmp & FAT_HASH_MASK;
255} 261}
256 262
257void fat_attach(struct inode *inode, loff_t i_pos) 263void fat_attach(struct inode *inode, loff_t i_pos)
258{ 264{
259 struct super_block *sb = inode->i_sb; 265 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
260 struct msdos_sb_info *sbi = MSDOS_SB(sb); 266 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
261 267
262 spin_lock(&sbi->inode_hash_lock); 268 spin_lock(&sbi->inode_hash_lock);
263 MSDOS_I(inode)->i_pos = i_pos; 269 MSDOS_I(inode)->i_pos = i_pos;
264 hlist_add_head(&MSDOS_I(inode)->i_fat_hash, 270 hlist_add_head(&MSDOS_I(inode)->i_fat_hash, head);
265 sbi->inode_hashtable + fat_hash(sb, i_pos));
266 spin_unlock(&sbi->inode_hash_lock); 271 spin_unlock(&sbi->inode_hash_lock);
267} 272}
268
269EXPORT_SYMBOL_GPL(fat_attach); 273EXPORT_SYMBOL_GPL(fat_attach);
270 274
271void fat_detach(struct inode *inode) 275void fat_detach(struct inode *inode)
@@ -276,13 +280,12 @@ void fat_detach(struct inode *inode)
276 hlist_del_init(&MSDOS_I(inode)->i_fat_hash); 280 hlist_del_init(&MSDOS_I(inode)->i_fat_hash);
277 spin_unlock(&sbi->inode_hash_lock); 281 spin_unlock(&sbi->inode_hash_lock);
278} 282}
279
280EXPORT_SYMBOL_GPL(fat_detach); 283EXPORT_SYMBOL_GPL(fat_detach);
281 284
282struct inode *fat_iget(struct super_block *sb, loff_t i_pos) 285struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
283{ 286{
284 struct msdos_sb_info *sbi = MSDOS_SB(sb); 287 struct msdos_sb_info *sbi = MSDOS_SB(sb);
285 struct hlist_head *head = sbi->inode_hashtable + fat_hash(sb, i_pos); 288 struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
286 struct hlist_node *_p; 289 struct hlist_node *_p;
287 struct msdos_inode_info *i; 290 struct msdos_inode_info *i;
288 struct inode *inode = NULL; 291 struct inode *inode = NULL;
@@ -341,8 +344,7 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
341 344
342 if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) { 345 if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
343 inode->i_generation &= ~1; 346 inode->i_generation &= ~1;
344 inode->i_mode = MSDOS_MKMODE(de->attr, 347 inode->i_mode = fat_make_mode(sbi, de->attr, S_IRWXUGO);
345 S_IRWXUGO & ~sbi->options.fs_dmask) | S_IFDIR;
346 inode->i_op = sbi->dir_ops; 348 inode->i_op = sbi->dir_ops;
347 inode->i_fop = &fat_dir_operations; 349 inode->i_fop = &fat_dir_operations;
348 350
@@ -359,10 +361,9 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
359 inode->i_nlink = fat_subdirs(inode); 361 inode->i_nlink = fat_subdirs(inode);
360 } else { /* not a directory */ 362 } else { /* not a directory */
361 inode->i_generation |= 1; 363 inode->i_generation |= 1;
362 inode->i_mode = MSDOS_MKMODE(de->attr, 364 inode->i_mode = fat_make_mode(sbi, de->attr,
363 ((sbi->options.showexec && !is_exec(de->name + 8)) 365 ((sbi->options.showexec && !is_exec(de->name + 8))
364 ? S_IRUGO|S_IWUGO : S_IRWXUGO) 366 ? S_IRUGO|S_IWUGO : S_IRWXUGO));
365 & ~sbi->options.fs_fmask) | S_IFREG;
366 MSDOS_I(inode)->i_start = le16_to_cpu(de->start); 367 MSDOS_I(inode)->i_start = le16_to_cpu(de->start);
367 if (sbi->fat_bits == 32) 368 if (sbi->fat_bits == 32)
368 MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16); 369 MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16);
@@ -378,25 +379,16 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
378 if (sbi->options.sys_immutable) 379 if (sbi->options.sys_immutable)
379 inode->i_flags |= S_IMMUTABLE; 380 inode->i_flags |= S_IMMUTABLE;
380 } 381 }
381 MSDOS_I(inode)->i_attrs = de->attr & ATTR_UNUSED; 382 fat_save_attrs(inode, de->attr);
383
382 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) 384 inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
383 & ~((loff_t)sbi->cluster_size - 1)) >> 9; 385 & ~((loff_t)sbi->cluster_size - 1)) >> 9;
384 inode->i_mtime.tv_sec = 386
385 date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date), 387 fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
386 sbi->options.tz_utc);
387 inode->i_mtime.tv_nsec = 0;
388 if (sbi->options.isvfat) { 388 if (sbi->options.isvfat) {
389 int secs = de->ctime_cs / 100; 389 fat_time_fat2unix(sbi, &inode->i_ctime, de->ctime,
390 int csecs = de->ctime_cs % 100; 390 de->cdate, de->ctime_cs);
391 inode->i_ctime.tv_sec = 391 fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
392 date_dos2unix(le16_to_cpu(de->ctime),
393 le16_to_cpu(de->cdate),
394 sbi->options.tz_utc) + secs;
395 inode->i_ctime.tv_nsec = csecs * 10000000;
396 inode->i_atime.tv_sec =
397 date_dos2unix(0, le16_to_cpu(de->adate),
398 sbi->options.tz_utc);
399 inode->i_atime.tv_nsec = 0;
400 } else 392 } else
401 inode->i_ctime = inode->i_atime = inode->i_mtime; 393 inode->i_ctime = inode->i_atime = inode->i_mtime;
402 394
@@ -443,13 +435,8 @@ static void fat_delete_inode(struct inode *inode)
443 435
444static void fat_clear_inode(struct inode *inode) 436static void fat_clear_inode(struct inode *inode)
445{ 437{
446 struct super_block *sb = inode->i_sb;
447 struct msdos_sb_info *sbi = MSDOS_SB(sb);
448
449 spin_lock(&sbi->inode_hash_lock);
450 fat_cache_inval_inode(inode); 438 fat_cache_inval_inode(inode);
451 hlist_del_init(&MSDOS_I(inode)->i_fat_hash); 439 fat_detach(inode);
452 spin_unlock(&sbi->inode_hash_lock);
453} 440}
454 441
455static void fat_write_super(struct super_block *sb) 442static void fat_write_super(struct super_block *sb)
@@ -555,6 +542,20 @@ static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
555 return 0; 542 return 0;
556} 543}
557 544
545static inline loff_t fat_i_pos_read(struct msdos_sb_info *sbi,
546 struct inode *inode)
547{
548 loff_t i_pos;
549#if BITS_PER_LONG == 32
550 spin_lock(&sbi->inode_hash_lock);
551#endif
552 i_pos = MSDOS_I(inode)->i_pos;
553#if BITS_PER_LONG == 32
554 spin_unlock(&sbi->inode_hash_lock);
555#endif
556 return i_pos;
557}
558
558static int fat_write_inode(struct inode *inode, int wait) 559static int fat_write_inode(struct inode *inode, int wait)
559{ 560{
560 struct super_block *sb = inode->i_sb; 561 struct super_block *sb = inode->i_sb;
@@ -564,9 +565,12 @@ static int fat_write_inode(struct inode *inode, int wait)
564 loff_t i_pos; 565 loff_t i_pos;
565 int err; 566 int err;
566 567
568 if (inode->i_ino == MSDOS_ROOT_INO)
569 return 0;
570
567retry: 571retry:
568 i_pos = MSDOS_I(inode)->i_pos; 572 i_pos = fat_i_pos_read(sbi, inode);
569 if (inode->i_ino == MSDOS_ROOT_INO || !i_pos) 573 if (!i_pos)
570 return 0; 574 return 0;
571 575
572 bh = sb_bread(sb, i_pos >> sbi->dir_per_block_bits); 576 bh = sb_bread(sb, i_pos >> sbi->dir_per_block_bits);
@@ -588,19 +592,17 @@ retry:
588 raw_entry->size = 0; 592 raw_entry->size = 0;
589 else 593 else
590 raw_entry->size = cpu_to_le32(inode->i_size); 594 raw_entry->size = cpu_to_le32(inode->i_size);
591 raw_entry->attr = fat_attr(inode); 595 raw_entry->attr = fat_make_attrs(inode);
592 raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart); 596 raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart);
593 raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); 597 raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16);
594 fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, 598 fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
595 &raw_entry->date, sbi->options.tz_utc); 599 &raw_entry->date, NULL);
596 if (sbi->options.isvfat) { 600 if (sbi->options.isvfat) {
597 __le16 atime; 601 __le16 atime;
598 fat_date_unix2dos(inode->i_ctime.tv_sec, &raw_entry->ctime, 602 fat_time_unix2fat(sbi, &inode->i_ctime, &raw_entry->ctime,
599 &raw_entry->cdate, sbi->options.tz_utc); 603 &raw_entry->cdate, &raw_entry->ctime_cs);
600 fat_date_unix2dos(inode->i_atime.tv_sec, &atime, 604 fat_time_unix2fat(sbi, &inode->i_atime, &atime,
601 &raw_entry->adate, sbi->options.tz_utc); 605 &raw_entry->adate, NULL);
602 raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 +
603 inode->i_ctime.tv_nsec / 10000000;
604 } 606 }
605 spin_unlock(&sbi->inode_hash_lock); 607 spin_unlock(&sbi->inode_hash_lock);
606 mark_buffer_dirty(bh); 608 mark_buffer_dirty(bh);
@@ -819,8 +821,10 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
819 seq_puts(m, ",uni_xlate"); 821 seq_puts(m, ",uni_xlate");
820 if (!opts->numtail) 822 if (!opts->numtail)
821 seq_puts(m, ",nonumtail"); 823 seq_puts(m, ",nonumtail");
824 if (opts->rodir)
825 seq_puts(m, ",rodir");
822 } 826 }
823 if (sbi->options.flush) 827 if (opts->flush)
824 seq_puts(m, ",flush"); 828 seq_puts(m, ",flush");
825 if (opts->tz_utc) 829 if (opts->tz_utc)
826 seq_puts(m, ",tz=UTC"); 830 seq_puts(m, ",tz=UTC");
@@ -836,7 +840,7 @@ enum {
836 Opt_charset, Opt_shortname_lower, Opt_shortname_win95, 840 Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
837 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes, 841 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
838 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes, 842 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
839 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_err, 843 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err,
840}; 844};
841 845
842static const match_table_t fat_tokens = { 846static const match_table_t fat_tokens = {
@@ -908,6 +912,7 @@ static const match_table_t vfat_tokens = {
908 {Opt_nonumtail_yes, "nonumtail=yes"}, 912 {Opt_nonumtail_yes, "nonumtail=yes"},
909 {Opt_nonumtail_yes, "nonumtail=true"}, 913 {Opt_nonumtail_yes, "nonumtail=true"},
910 {Opt_nonumtail_yes, "nonumtail"}, 914 {Opt_nonumtail_yes, "nonumtail"},
915 {Opt_rodir, "rodir"},
911 {Opt_err, NULL} 916 {Opt_err, NULL}
912}; 917};
913 918
@@ -927,10 +932,13 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
927 opts->allow_utime = -1; 932 opts->allow_utime = -1;
928 opts->codepage = fat_default_codepage; 933 opts->codepage = fat_default_codepage;
929 opts->iocharset = fat_default_iocharset; 934 opts->iocharset = fat_default_iocharset;
930 if (is_vfat) 935 if (is_vfat) {
931 opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95; 936 opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95;
932 else 937 opts->rodir = 0;
938 } else {
933 opts->shortname = 0; 939 opts->shortname = 0;
940 opts->rodir = 1;
941 }
934 opts->name_check = 'n'; 942 opts->name_check = 'n';
935 opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0; 943 opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
936 opts->utf8 = opts->unicode_xlate = 0; 944 opts->utf8 = opts->unicode_xlate = 0;
@@ -1081,6 +1089,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1081 case Opt_nonumtail_yes: /* empty or 1 or yes or true */ 1089 case Opt_nonumtail_yes: /* empty or 1 or yes or true */
1082 opts->numtail = 0; /* negated option */ 1090 opts->numtail = 0; /* negated option */
1083 break; 1091 break;
1092 case Opt_rodir:
1093 opts->rodir = 1;
1094 break;
1084 1095
1085 /* obsolete mount options */ 1096 /* obsolete mount options */
1086 case Opt_obsolate: 1097 case Opt_obsolate:
@@ -1126,7 +1137,7 @@ static int fat_read_root(struct inode *inode)
1126 inode->i_gid = sbi->options.fs_gid; 1137 inode->i_gid = sbi->options.fs_gid;
1127 inode->i_version++; 1138 inode->i_version++;
1128 inode->i_generation = 0; 1139 inode->i_generation = 0;
1129 inode->i_mode = (S_IRWXUGO & ~sbi->options.fs_dmask) | S_IFDIR; 1140 inode->i_mode = fat_make_mode(sbi, ATTR_DIR, S_IRWXUGO);
1130 inode->i_op = sbi->dir_ops; 1141 inode->i_op = sbi->dir_ops;
1131 inode->i_fop = &fat_dir_operations; 1142 inode->i_fop = &fat_dir_operations;
1132 if (sbi->fat_bits == 32) { 1143 if (sbi->fat_bits == 32) {
@@ -1143,7 +1154,7 @@ static int fat_read_root(struct inode *inode)
1143 MSDOS_I(inode)->i_logstart = 0; 1154 MSDOS_I(inode)->i_logstart = 0;
1144 MSDOS_I(inode)->mmu_private = inode->i_size; 1155 MSDOS_I(inode)->mmu_private = inode->i_size;
1145 1156
1146 MSDOS_I(inode)->i_attrs = ATTR_NONE; 1157 fat_save_attrs(inode, ATTR_DIR);
1147 inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = 0; 1158 inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = 0;
1148 inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0; 1159 inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0;
1149 inode->i_nlink = fat_subdirs(inode)+2; 1160 inode->i_nlink = fat_subdirs(inode)+2;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 79fb98ad36d4..ac39ebcc1496 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/msdos_fs.h>
12#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include "fat.h"
13 13
14/* 14/*
15 * fat_fs_panic reports a severe file system problem and sets the file system 15 * fat_fs_panic reports a severe file system problem and sets the file system
@@ -124,8 +124,9 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
124 mark_inode_dirty(inode); 124 mark_inode_dirty(inode);
125 } 125 }
126 if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { 126 if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
127 fat_fs_panic(sb, "clusters badly computed (%d != %lu)", 127 fat_fs_panic(sb, "clusters badly computed (%d != %llu)",
128 new_fclus, inode->i_blocks >> (sbi->cluster_bits - 9)); 128 new_fclus,
129 (llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
129 fat_cache_inval_inode(inode); 130 fat_cache_inval_inode(inode);
130 } 131 }
131 inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9); 132 inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
@@ -135,65 +136,131 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
135 136
136extern struct timezone sys_tz; 137extern struct timezone sys_tz;
137 138
139/*
140 * The epoch of FAT timestamp is 1980.
141 * : bits : value
142 * date: 0 - 4: day (1 - 31)
143 * date: 5 - 8: month (1 - 12)
144 * date: 9 - 15: year (0 - 127) from 1980
145 * time: 0 - 4: sec (0 - 29) 2sec counts
146 * time: 5 - 10: min (0 - 59)
147 * time: 11 - 15: hour (0 - 23)
148 */
149#define SECS_PER_MIN 60
150#define SECS_PER_HOUR (60 * 60)
151#define SECS_PER_DAY (SECS_PER_HOUR * 24)
152#define UNIX_SECS_1980 315532800L
153#if BITS_PER_LONG == 64
154#define UNIX_SECS_2108 4354819200L
155#endif
156/* days between 1.1.70 and 1.1.80 (2 leap days) */
157#define DAYS_DELTA (365 * 10 + 2)
158/* 120 (2100 - 1980) isn't leap year */
159#define YEAR_2100 120
160#define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != YEAR_2100)
161
138/* Linear day numbers of the respective 1sts in non-leap years. */ 162/* Linear day numbers of the respective 1sts in non-leap years. */
139static int day_n[] = { 163static time_t days_in_year[] = {
140 /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ 164 /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
141 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0 165 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
142}; 166};
143 167
144/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */ 168/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
145int date_dos2unix(unsigned short time, unsigned short date, int tz_utc) 169void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
170 __le16 __time, __le16 __date, u8 time_cs)
146{ 171{
147 int month, year, secs; 172 u16 time = le16_to_cpu(__time), date = le16_to_cpu(__date);
173 time_t second, day, leap_day, month, year;
148 174
149 /* 175 year = date >> 9;
150 * first subtract and mask after that... Otherwise, if 176 month = max(1, (date >> 5) & 0xf);
151 * date == 0, bad things happen 177 day = max(1, date & 0x1f) - 1;
152 */ 178
153 month = ((date >> 5) - 1) & 15; 179 leap_day = (year + 3) / 4;
154 year = date >> 9; 180 if (year > YEAR_2100) /* 2100 isn't leap year */
155 secs = (time & 31)*2+60*((time >> 5) & 63)+(time >> 11)*3600+86400* 181 leap_day--;
156 ((date & 31)-1+day_n[month]+(year/4)+year*365-((year & 3) == 0 && 182 if (IS_LEAP_YEAR(year) && month > 2)
157 month < 2 ? 1 : 0)+3653); 183 leap_day++;
158 /* days since 1.1.70 plus 80's leap day */ 184
159 if (!tz_utc) 185 second = (time & 0x1f) << 1;
160 secs += sys_tz.tz_minuteswest*60; 186 second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
161 return secs; 187 second += (time >> 11) * SECS_PER_HOUR;
188 second += (year * 365 + leap_day
189 + days_in_year[month] + day
190 + DAYS_DELTA) * SECS_PER_DAY;
191
192 if (!sbi->options.tz_utc)
193 second += sys_tz.tz_minuteswest * SECS_PER_MIN;
194
195 if (time_cs) {
196 ts->tv_sec = second + (time_cs / 100);
197 ts->tv_nsec = (time_cs % 100) * 10000000;
198 } else {
199 ts->tv_sec = second;
200 ts->tv_nsec = 0;
201 }
162} 202}
163 203
164/* Convert linear UNIX date to a MS-DOS time/date pair. */ 204/* Convert linear UNIX date to a FAT time/date pair. */
165void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date, int tz_utc) 205void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
206 __le16 *time, __le16 *date, u8 *time_cs)
166{ 207{
167 int day, year, nl_day, month; 208 time_t second = ts->tv_sec;
209 time_t day, leap_day, month, year;
168 210
169 if (!tz_utc) 211 if (!sbi->options.tz_utc)
170 unix_date -= sys_tz.tz_minuteswest*60; 212 second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
171 213
172 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ 214 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
173 if (unix_date < 315532800) 215 if (second < UNIX_SECS_1980) {
174 unix_date = 315532800; 216 *time = 0;
175 217 *date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
176 *time = cpu_to_le16((unix_date % 60)/2+(((unix_date/60) % 60) << 5)+ 218 if (time_cs)
177 (((unix_date/3600) % 24) << 11)); 219 *time_cs = 0;
178 day = unix_date/86400-3652; 220 return;
179 year = day/365; 221 }
180 if ((year+3)/4+365*year > day) 222#if BITS_PER_LONG == 64
223 if (second >= UNIX_SECS_2108) {
224 *time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
225 *date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
226 if (time_cs)
227 *time_cs = 199;
228 return;
229 }
230#endif
231
232 day = second / SECS_PER_DAY - DAYS_DELTA;
233 year = day / 365;
234 leap_day = (year + 3) / 4;
235 if (year > YEAR_2100) /* 2100 isn't leap year */
236 leap_day--;
237 if (year * 365 + leap_day > day)
181 year--; 238 year--;
182 day -= (year+3)/4+365*year; 239 leap_day = (year + 3) / 4;
183 if (day == 59 && !(year & 3)) { 240 if (year > YEAR_2100) /* 2100 isn't leap year */
184 nl_day = day; 241 leap_day--;
242 day -= year * 365 + leap_day;
243
244 if (IS_LEAP_YEAR(year) && day == days_in_year[3]) {
185 month = 2; 245 month = 2;
186 } else { 246 } else {
187 nl_day = (year & 3) || day <= 59 ? day : day-1; 247 if (IS_LEAP_YEAR(year) && day > days_in_year[3])
188 for (month = 0; month < 12; month++) { 248 day--;
189 if (day_n[month] > nl_day) 249 for (month = 1; month < 12; month++) {
250 if (days_in_year[month + 1] > day)
190 break; 251 break;
191 } 252 }
192 } 253 }
193 *date = cpu_to_le16(nl_day-day_n[month-1]+1+(month << 5)+(year << 9)); 254 day -= days_in_year[month];
194}
195 255
196EXPORT_SYMBOL_GPL(fat_date_unix2dos); 256 *time = cpu_to_le16(((second / SECS_PER_HOUR) % 24) << 11
257 | ((second / SECS_PER_MIN) % 60) << 5
258 | (second % SECS_PER_MIN) >> 1);
259 *date = cpu_to_le16((year << 9) | (month << 5) | (day + 1));
260 if (time_cs)
261 *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
262}
263EXPORT_SYMBOL_GPL(fat_time_unix2fat);
197 264
198int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) 265int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
199{ 266{
diff --git a/fs/msdos/namei.c b/fs/fat/namei_msdos.c
index e844b9809d27..7ba03a4acbe0 100644
--- a/fs/msdos/namei.c
+++ b/fs/fat/namei_msdos.c
@@ -9,8 +9,8 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/msdos_fs.h>
13#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
13#include "fat.h"
14 14
15/* Characters that are undesirable in an MS-DOS file name */ 15/* Characters that are undesirable in an MS-DOS file name */
16static unsigned char bad_chars[] = "*?<>|\""; 16static unsigned char bad_chars[] = "*?<>|\"";
@@ -203,33 +203,37 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
203{ 203{
204 struct super_block *sb = dir->i_sb; 204 struct super_block *sb = dir->i_sb;
205 struct fat_slot_info sinfo; 205 struct fat_slot_info sinfo;
206 struct inode *inode = NULL; 206 struct inode *inode;
207 int res; 207 int err;
208
209 dentry->d_op = &msdos_dentry_operations;
210 208
211 lock_super(sb); 209 lock_super(sb);
212 res = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); 210
213 if (res == -ENOENT) 211 err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
214 goto add; 212 if (err) {
215 if (res < 0) 213 if (err == -ENOENT) {
216 goto out; 214 inode = NULL;
215 goto out;
216 }
217 goto error;
218 }
219
217 inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); 220 inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
218 brelse(sinfo.bh); 221 brelse(sinfo.bh);
219 if (IS_ERR(inode)) { 222 if (IS_ERR(inode)) {
220 res = PTR_ERR(inode); 223 err = PTR_ERR(inode);
221 goto out; 224 goto error;
222 } 225 }
223add: 226out:
224 res = 0; 227 unlock_super(sb);
228 dentry->d_op = &msdos_dentry_operations;
225 dentry = d_splice_alias(inode, dentry); 229 dentry = d_splice_alias(inode, dentry);
226 if (dentry) 230 if (dentry)
227 dentry->d_op = &msdos_dentry_operations; 231 dentry->d_op = &msdos_dentry_operations;
228out: 232 return dentry;
233
234error:
229 unlock_super(sb); 235 unlock_super(sb);
230 if (!res) 236 return ERR_PTR(err);
231 return dentry;
232 return ERR_PTR(res);
233} 237}
234 238
235/***** Creates a directory entry (name is already formatted). */ 239/***** Creates a directory entry (name is already formatted). */
@@ -247,7 +251,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
247 if (is_hid) 251 if (is_hid)
248 de.attr |= ATTR_HIDDEN; 252 de.attr |= ATTR_HIDDEN;
249 de.lcase = 0; 253 de.lcase = 0;
250 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc); 254 fat_time_unix2fat(sbi, ts, &time, &date, NULL);
251 de.cdate = de.adate = 0; 255 de.cdate = de.adate = 0;
252 de.ctime = 0; 256 de.ctime = 0;
253 de.ctime_cs = 0; 257 de.ctime_cs = 0;
diff --git a/fs/vfat/namei.c b/fs/fat/namei_vfat.c
index 155c10b4adbd..bf326d4356a3 100644
--- a/fs/vfat/namei.c
+++ b/fs/fat/namei_vfat.c
@@ -16,36 +16,75 @@
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19
20#include <linux/jiffies.h> 19#include <linux/jiffies.h>
21#include <linux/msdos_fs.h>
22#include <linux/ctype.h> 20#include <linux/ctype.h>
23#include <linux/slab.h> 21#include <linux/slab.h>
24#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
25#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
26#include <linux/namei.h> 24#include <linux/namei.h>
25#include "fat.h"
27 26
28static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 27/*
28 * If new entry was created in the parent, it could create the 8.3
29 * alias (the shortname of logname). So, the parent may have the
30 * negative-dentry which matches the created 8.3 alias.
31 *
32 * If it happened, the negative dentry isn't actually negative
33 * anymore. So, drop it.
34 */
35static int vfat_revalidate_shortname(struct dentry *dentry)
29{ 36{
30 int ret = 1; 37 int ret = 1;
31 38 spin_lock(&dentry->d_lock);
32 if (!dentry->d_inode && 39 if (dentry->d_time != dentry->d_parent->d_inode->i_version)
33 nd && !(nd->flags & LOOKUP_CONTINUE) && (nd->flags & LOOKUP_CREATE))
34 /*
35 * negative dentry is dropped, in order to make sure
36 * to use the name which a user desires if this is
37 * create path.
38 */
39 ret = 0; 40 ret = 0;
40 else { 41 spin_unlock(&dentry->d_lock);
41 spin_lock(&dentry->d_lock);
42 if (dentry->d_time != dentry->d_parent->d_inode->i_version)
43 ret = 0;
44 spin_unlock(&dentry->d_lock);
45 }
46 return ret; 42 return ret;
47} 43}
48 44
45static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
46{
47 /* This is not negative dentry. Always valid. */
48 if (dentry->d_inode)
49 return 1;
50 return vfat_revalidate_shortname(dentry);
51}
52
53static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
54{
55 /*
56 * This is not negative dentry. Always valid.
57 *
58 * Note, rename() to existing directory entry will have ->d_inode,
59 * and will use existing name which isn't specified name by user.
60 *
61 * We may be able to drop this positive dentry here. But dropping
62 * positive dentry isn't good idea. So it's unsupported like
63 * rename("filename", "FILENAME") for now.
64 */
65 if (dentry->d_inode)
66 return 1;
67
68 /*
69 * This may be nfsd (or something), anyway, we can't see the
70 * intent of this. So, since this can be for creation, drop it.
71 */
72 if (!nd)
73 return 0;
74
75 /*
76 * Drop the negative dentry, in order to make sure to use the
77 * case sensitive name which is specified by user if this is
78 * for creation.
79 */
80 if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) {
81 if (nd->flags & LOOKUP_CREATE)
82 return 0;
83 }
84
85 return vfat_revalidate_shortname(dentry);
86}
87
49/* returns the length of a struct qstr, ignoring trailing dots */ 88/* returns the length of a struct qstr, ignoring trailing dots */
50static unsigned int vfat_striptail_len(struct qstr *qstr) 89static unsigned int vfat_striptail_len(struct qstr *qstr)
51{ 90{
@@ -127,25 +166,16 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
127 return 1; 166 return 1;
128} 167}
129 168
130static struct dentry_operations vfat_dentry_ops[4] = { 169static struct dentry_operations vfat_ci_dentry_ops = {
131 { 170 .d_revalidate = vfat_revalidate_ci,
132 .d_hash = vfat_hashi, 171 .d_hash = vfat_hashi,
133 .d_compare = vfat_cmpi, 172 .d_compare = vfat_cmpi,
134 }, 173};
135 { 174
136 .d_revalidate = vfat_revalidate, 175static struct dentry_operations vfat_dentry_ops = {
137 .d_hash = vfat_hashi, 176 .d_revalidate = vfat_revalidate,
138 .d_compare = vfat_cmpi, 177 .d_hash = vfat_hash,
139 }, 178 .d_compare = vfat_cmp,
140 {
141 .d_hash = vfat_hash,
142 .d_compare = vfat_cmp,
143 },
144 {
145 .d_revalidate = vfat_revalidate,
146 .d_hash = vfat_hash,
147 .d_compare = vfat_cmp,
148 }
149}; 179};
150 180
151/* Characters that are undesirable in an MS-DOS file name */ 181/* Characters that are undesirable in an MS-DOS file name */
@@ -569,6 +599,7 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
569 unsigned char msdos_name[MSDOS_NAME]; 599 unsigned char msdos_name[MSDOS_NAME];
570 wchar_t *uname; 600 wchar_t *uname;
571 __le16 time, date; 601 __le16 time, date;
602 u8 time_cs;
572 int err, ulen, usize, i; 603 int err, ulen, usize, i;
573 loff_t offset; 604 loff_t offset;
574 605
@@ -621,10 +652,10 @@ shortname:
621 memcpy(de->name, msdos_name, MSDOS_NAME); 652 memcpy(de->name, msdos_name, MSDOS_NAME);
622 de->attr = is_dir ? ATTR_DIR : ATTR_ARCH; 653 de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
623 de->lcase = lcase; 654 de->lcase = lcase;
624 fat_date_unix2dos(ts->tv_sec, &time, &date, sbi->options.tz_utc); 655 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
625 de->time = de->ctime = time; 656 de->time = de->ctime = time;
626 de->date = de->cdate = de->adate = date; 657 de->date = de->cdate = de->adate = date;
627 de->ctime_cs = 0; 658 de->ctime_cs = time_cs;
628 de->start = cpu_to_le16(cluster); 659 de->start = cpu_to_le16(cluster);
629 de->starthi = cpu_to_le16(cluster >> 16); 660 de->starthi = cpu_to_le16(cluster >> 16);
630 de->size = 0; 661 de->size = 0;
@@ -683,46 +714,58 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
683{ 714{
684 struct super_block *sb = dir->i_sb; 715 struct super_block *sb = dir->i_sb;
685 struct fat_slot_info sinfo; 716 struct fat_slot_info sinfo;
686 struct inode *inode = NULL; 717 struct inode *inode;
687 struct dentry *alias; 718 struct dentry *alias;
688 int err, table; 719 int err;
689 720
690 lock_super(sb); 721 lock_super(sb);
691 table = (MSDOS_SB(sb)->options.name_check == 's') ? 2 : 0;
692 dentry->d_op = &vfat_dentry_ops[table];
693 722
694 err = vfat_find(dir, &dentry->d_name, &sinfo); 723 err = vfat_find(dir, &dentry->d_name, &sinfo);
695 if (err) { 724 if (err) {
696 table++; 725 if (err == -ENOENT) {
726 inode = NULL;
727 goto out;
728 }
697 goto error; 729 goto error;
698 } 730 }
731
699 inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); 732 inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
700 brelse(sinfo.bh); 733 brelse(sinfo.bh);
701 if (IS_ERR(inode)) { 734 if (IS_ERR(inode)) {
702 unlock_super(sb); 735 err = PTR_ERR(inode);
703 return ERR_CAST(inode); 736 goto error;
704 } 737 }
705 alias = d_find_alias(inode);
706 if (alias) {
707 if (d_invalidate(alias) == 0)
708 dput(alias);
709 else {
710 iput(inode);
711 unlock_super(sb);
712 return alias;
713 }
714 738
739 alias = d_find_alias(inode);
740 if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
741 /*
742 * This inode has non DCACHE_DISCONNECTED dentry. This
743 * means, the user did ->lookup() by an another name
744 * (longname vs 8.3 alias of it) in past.
745 *
746 * Switch to new one for reason of locality if possible.
747 */
748 BUG_ON(d_unhashed(alias));
749 if (!S_ISDIR(inode->i_mode))
750 d_move(alias, dentry);
751 iput(inode);
752 unlock_super(sb);
753 return alias;
715 } 754 }
716error: 755out:
717 unlock_super(sb); 756 unlock_super(sb);
718 dentry->d_op = &vfat_dentry_ops[table]; 757 dentry->d_op = sb->s_root->d_op;
719 dentry->d_time = dentry->d_parent->d_inode->i_version; 758 dentry->d_time = dentry->d_parent->d_inode->i_version;
720 dentry = d_splice_alias(inode, dentry); 759 dentry = d_splice_alias(inode, dentry);
721 if (dentry) { 760 if (dentry) {
722 dentry->d_op = &vfat_dentry_ops[table]; 761 dentry->d_op = sb->s_root->d_op;
723 dentry->d_time = dentry->d_parent->d_inode->i_version; 762 dentry->d_time = dentry->d_parent->d_inode->i_version;
724 } 763 }
725 return dentry; 764 return dentry;
765
766error:
767 unlock_super(sb);
768 return ERR_PTR(err);
726} 769}
727 770
728static int vfat_create(struct inode *dir, struct dentry *dentry, int mode, 771static int vfat_create(struct inode *dir, struct dentry *dentry, int mode,
@@ -1014,9 +1057,9 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent)
1014 return res; 1057 return res;
1015 1058
1016 if (MSDOS_SB(sb)->options.name_check != 's') 1059 if (MSDOS_SB(sb)->options.name_check != 's')
1017 sb->s_root->d_op = &vfat_dentry_ops[0]; 1060 sb->s_root->d_op = &vfat_ci_dentry_ops;
1018 else 1061 else
1019 sb->s_root->d_op = &vfat_dentry_ops[2]; 1062 sb->s_root->d_op = &vfat_dentry_ops;
1020 1063
1021 return 0; 1064 return 0;
1022} 1065}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 1bd8d4acc6f2..61f32f3868cd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
115 */ 115 */
116void __log_wait_for_space(journal_t *journal) 116void __log_wait_for_space(journal_t *journal)
117{ 117{
118 int nblocks; 118 int nblocks, space_left;
119 assert_spin_locked(&journal->j_state_lock); 119 assert_spin_locked(&journal->j_state_lock);
120 120
121 nblocks = jbd_space_needed(journal); 121 nblocks = jbd_space_needed(journal);
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
128 /* 128 /*
129 * Test again, another process may have checkpointed while we 129 * Test again, another process may have checkpointed while we
130 * were waiting for the checkpoint lock. If there are no 130 * were waiting for the checkpoint lock. If there are no
131 * outstanding transactions there is nothing to checkpoint and 131 * transactions ready to be checkpointed, try to recover
132 * we can't make progress. Abort the journal in this case. 132 * journal space by calling cleanup_journal_tail(), and if
133 * that doesn't work, by waiting for the currently committing
134 * transaction to complete. If there is absolutely no way
135 * to make progress, this is either a BUG or corrupted
136 * filesystem, so abort the journal and leave a stack
137 * trace for forensic evidence.
133 */ 138 */
134 spin_lock(&journal->j_state_lock); 139 spin_lock(&journal->j_state_lock);
135 spin_lock(&journal->j_list_lock); 140 spin_lock(&journal->j_list_lock);
136 nblocks = jbd_space_needed(journal); 141 nblocks = jbd_space_needed(journal);
137 if (__log_space_left(journal) < nblocks) { 142 space_left = __log_space_left(journal);
143 if (space_left < nblocks) {
138 int chkpt = journal->j_checkpoint_transactions != NULL; 144 int chkpt = journal->j_checkpoint_transactions != NULL;
145 tid_t tid = 0;
139 146
147 if (journal->j_committing_transaction)
148 tid = journal->j_committing_transaction->t_tid;
140 spin_unlock(&journal->j_list_lock); 149 spin_unlock(&journal->j_list_lock);
141 spin_unlock(&journal->j_state_lock); 150 spin_unlock(&journal->j_state_lock);
142 if (chkpt) { 151 if (chkpt) {
143 log_do_checkpoint(journal); 152 log_do_checkpoint(journal);
153 } else if (cleanup_journal_tail(journal) == 0) {
154 /* We were able to recover space; yay! */
155 ;
156 } else if (tid) {
157 log_wait_commit(journal, tid);
144 } else { 158 } else {
145 printk(KERN_ERR "%s: no transactions\n", 159 printk(KERN_ERR "%s: needed %d blocks and "
146 __func__); 160 "only had %d space available\n",
161 __func__, nblocks, space_left);
162 printk(KERN_ERR "%s: no way to get more "
163 "journal space\n", __func__);
164 WARN_ON(1);
147 journal_abort(journal, 0); 165 journal_abort(journal, 0);
148 } 166 }
149
150 spin_lock(&journal->j_state_lock); 167 spin_lock(&journal->j_state_lock);
151 } else { 168 } else {
152 spin_unlock(&journal->j_list_lock); 169 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9203c3332f17..9497718fe920 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -116,7 +116,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
116 */ 116 */
117void __jbd2_log_wait_for_space(journal_t *journal) 117void __jbd2_log_wait_for_space(journal_t *journal)
118{ 118{
119 int nblocks; 119 int nblocks, space_left;
120 assert_spin_locked(&journal->j_state_lock); 120 assert_spin_locked(&journal->j_state_lock);
121 121
122 nblocks = jbd_space_needed(journal); 122 nblocks = jbd_space_needed(journal);
@@ -129,25 +129,43 @@ void __jbd2_log_wait_for_space(journal_t *journal)
129 /* 129 /*
130 * Test again, another process may have checkpointed while we 130 * Test again, another process may have checkpointed while we
131 * were waiting for the checkpoint lock. If there are no 131 * were waiting for the checkpoint lock. If there are no
132 * outstanding transactions there is nothing to checkpoint and 132 * transactions ready to be checkpointed, try to recover
133 * we can't make progress. Abort the journal in this case. 133 * journal space by calling cleanup_journal_tail(), and if
134 * that doesn't work, by waiting for the currently committing
135 * transaction to complete. If there is absolutely no way
136 * to make progress, this is either a BUG or corrupted
137 * filesystem, so abort the journal and leave a stack
138 * trace for forensic evidence.
134 */ 139 */
135 spin_lock(&journal->j_state_lock); 140 spin_lock(&journal->j_state_lock);
136 spin_lock(&journal->j_list_lock); 141 spin_lock(&journal->j_list_lock);
137 nblocks = jbd_space_needed(journal); 142 nblocks = jbd_space_needed(journal);
138 if (__jbd2_log_space_left(journal) < nblocks) { 143 space_left = __jbd2_log_space_left(journal);
144 if (space_left < nblocks) {
139 int chkpt = journal->j_checkpoint_transactions != NULL; 145 int chkpt = journal->j_checkpoint_transactions != NULL;
146 tid_t tid = 0;
140 147
148 if (journal->j_committing_transaction)
149 tid = journal->j_committing_transaction->t_tid;
141 spin_unlock(&journal->j_list_lock); 150 spin_unlock(&journal->j_list_lock);
142 spin_unlock(&journal->j_state_lock); 151 spin_unlock(&journal->j_state_lock);
143 if (chkpt) { 152 if (chkpt) {
144 jbd2_log_do_checkpoint(journal); 153 jbd2_log_do_checkpoint(journal);
154 } else if (jbd2_cleanup_journal_tail(journal) == 0) {
155 /* We were able to recover space; yay! */
156 ;
157 } else if (tid) {
158 jbd2_log_wait_commit(journal, tid);
145 } else { 159 } else {
146 printk(KERN_ERR "%s: no transactions\n", 160 printk(KERN_ERR "%s: needed %d blocks and "
147 __func__); 161 "only had %d space available\n",
162 __func__, nblocks, space_left);
163 printk(KERN_ERR "%s: no way to get more "
164 "journal space in %s\n", __func__,
165 journal->j_devname);
166 WARN_ON(1);
148 jbd2_journal_abort(journal, 0); 167 jbd2_journal_abort(journal, 0);
149 } 168 }
150
151 spin_lock(&journal->j_state_lock); 169 spin_lock(&journal->j_state_lock);
152 } else { 170 } else {
153 spin_unlock(&journal->j_list_lock); 171 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 783de118de92..e70d657a19f8 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1089,6 +1089,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1089 if (!journal->j_wbuf) { 1089 if (!journal->j_wbuf) {
1090 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 1090 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
1091 __func__); 1091 __func__);
1092 jbd2_stats_proc_exit(journal);
1092 kfree(journal); 1093 kfree(journal);
1093 return NULL; 1094 return NULL;
1094 } 1095 }
@@ -1098,6 +1099,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1098 if (err) { 1099 if (err) {
1099 printk(KERN_ERR "%s: Cannnot locate journal superblock\n", 1100 printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
1100 __func__); 1101 __func__);
1102 jbd2_stats_proc_exit(journal);
1101 kfree(journal); 1103 kfree(journal);
1102 return NULL; 1104 return NULL;
1103 } 1105 }
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 8adebd3e43c6..3cceef4ad2b7 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(void *_c)
85 for (;;) { 85 for (;;) {
86 allow_signal(SIGHUP); 86 allow_signal(SIGHUP);
87 again: 87 again:
88 spin_lock(&c->erase_completion_lock);
88 if (!jffs2_thread_should_wake(c)) { 89 if (!jffs2_thread_should_wake(c)) {
89 set_current_state (TASK_INTERRUPTIBLE); 90 set_current_state (TASK_INTERRUPTIBLE);
91 spin_unlock(&c->erase_completion_lock);
90 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); 92 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
91 /* Yes, there's a race here; we checked jffs2_thread_should_wake()
92 before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
93 matter - We don't care if we miss a wakeup, because the GC thread
94 is only an optimisation anyway. */
95 schedule(); 93 schedule();
96 } 94 } else
95 spin_unlock(&c->erase_completion_lock);
96
97 97
98 /* This thread is purely an optimisation. But if it runs when 98 /* This thread is purely an optimisation. But if it runs when
99 other things could be running, it actually makes things a 99 other things could be running, it actually makes things a
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index 47b045797e42..90cb60d09787 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -19,7 +19,7 @@
19 19
20static void *lzo_mem; 20static void *lzo_mem;
21static void *lzo_compress_buf; 21static void *lzo_compress_buf;
22static DEFINE_MUTEX(deflate_mutex); 22static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
23 23
24static void free_workspace(void) 24static void free_workspace(void)
25{ 25{
@@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out,
49 49
50 mutex_lock(&deflate_mutex); 50 mutex_lock(&deflate_mutex);
51 ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); 51 ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
52 mutex_unlock(&deflate_mutex);
53
54 if (ret != LZO_E_OK) 52 if (ret != LZO_E_OK)
55 return -1; 53 goto fail;
56 54
57 if (compress_size > *dstlen) 55 if (compress_size > *dstlen)
58 return -1; 56 goto fail;
59 57
60 memcpy(cpage_out, lzo_compress_buf, compress_size); 58 memcpy(cpage_out, lzo_compress_buf, compress_size);
61 *dstlen = compress_size; 59 mutex_unlock(&deflate_mutex);
62 60
61 *dstlen = compress_size;
63 return 0; 62 return 0;
63
64 fail:
65 mutex_unlock(&deflate_mutex);
66 return -1;
64} 67}
65 68
66static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, 69static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 0875b60b4bf7..21a052915aa9 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -261,9 +261,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
261 261
262 jffs2_sum_reset_collected(c->summary); /* reset collected summary */ 262 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
263 263
264#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
264 /* adjust write buffer offset, else we get a non contiguous write bug */ 265 /* adjust write buffer offset, else we get a non contiguous write bug */
265 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) 266 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
266 c->wbuf_ofs = 0xffffffff; 267 c->wbuf_ofs = 0xffffffff;
268#endif
267 269
268 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); 270 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
269 271
diff --git a/fs/msdos/Makefile b/fs/msdos/Makefile
deleted file mode 100644
index ea67646fcb95..000000000000
--- a/fs/msdos/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for the Linux msdos filesystem routines.
3#
4
5obj-$(CONFIG_MSDOS_FS) += msdos.o
6
7msdos-y := namei.o
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 848a03e83a42..4433c8f00163 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1875,11 +1875,11 @@ static int nfsd_buffered_readdir(struct file *file, filldir_t func,
1875 return -ENOMEM; 1875 return -ENOMEM;
1876 1876
1877 offset = *offsetp; 1877 offset = *offsetp;
1878 cdp->err = nfserr_eof; /* will be cleared on successful read */
1879 1878
1880 while (1) { 1879 while (1) {
1881 unsigned int reclen; 1880 unsigned int reclen;
1882 1881
1882 cdp->err = nfserr_eof; /* will be cleared on successful read */
1883 buf.used = 0; 1883 buf.used = 0;
1884 buf.full = 0; 1884 buf.full = 0;
1885 1885
@@ -1912,9 +1912,6 @@ static int nfsd_buffered_readdir(struct file *file, filldir_t func,
1912 de = (struct buffered_dirent *)((char *)de + reclen); 1912 de = (struct buffered_dirent *)((char *)de + reclen);
1913 } 1913 }
1914 offset = vfs_llseek(file, 0, SEEK_CUR); 1914 offset = vfs_llseek(file, 0, SEEK_CUR);
1915 cdp->err = nfserr_eof;
1916 if (!buf.full)
1917 break;
1918 } 1915 }
1919 1916
1920 done: 1917 done:
diff --git a/fs/vfat/Makefile b/fs/vfat/Makefile
deleted file mode 100644
index 40f2798a4f08..000000000000
--- a/fs/vfat/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
1#
2# Makefile for the linux vfat-filesystem routines.
3#
4
5obj-$(CONFIG_VFAT_FS) += vfat.o
6
7vfat-y := namei.o
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c62aff1..18546d8eb78e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
34 34
35#define __pfn_to_page(pfn) \ 35#define __pfn_to_page(pfn) \
36({ unsigned long __pfn = (pfn); \ 36({ unsigned long __pfn = (pfn); \
37 unsigned long __nid = arch_pfn_to_nid(pfn); \ 37 unsigned long __nid = arch_pfn_to_nid(__pfn); \
38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ 38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
39}) 39})
40 40
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1c91a176b9ae..6a642098e5c3 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -236,12 +236,16 @@ static inline void *bio_data(struct bio *bio)
236#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 236#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
237#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 237#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
238 238
239/* Default implementation of BIOVEC_PHYS_MERGEABLE */
240#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
241 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
242
239/* 243/*
240 * allow arch override, for eg virtualized architectures (put in asm/io.h) 244 * allow arch override, for eg virtualized architectures (put in asm/io.h)
241 */ 245 */
242#ifndef BIOVEC_PHYS_MERGEABLE 246#ifndef BIOVEC_PHYS_MERGEABLE
243#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 247#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
244 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 248 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
245#endif 249#endif
246 250
247#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 251#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 8c0f9505b48c..7605fdd1eb65 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/system.h>
19 20
20/* this is used only to give gcc a clue about good code generation */ 21/* this is used only to give gcc a clue about good code generation */
21union cnt32_to_63 { 22union cnt32_to_63 {
@@ -53,11 +54,19 @@ union cnt32_to_63 {
53 * needed increment. And any race in updating the value in memory is harmless 54 * needed increment. And any race in updating the value in memory is harmless
54 * as the same value would simply be stored more than once. 55 * as the same value would simply be stored more than once.
55 * 56 *
56 * The only restriction for the algorithm to work properly is that this 57 * The restrictions for the algorithm to work properly are:
57 * code must be executed at least once per each half period of the 32-bit 58 *
58 * counter to properly update the state bit in memory. This is usually not a 59 * 1) this code must be called at least once per each half period of the
59 * problem in practice, but if it is then a kernel timer could be scheduled 60 * 32-bit counter;
60 * to manage for this code to be executed often enough. 61 *
62 * 2) this code must not be preempted for a duration longer than the
63 * 32-bit counter half period minus the longest period between two
64 * calls to this code.
65 *
66 * Those requirements ensure proper update to the state bit in memory.
67 * This is usually not a problem in practice, but if it is then a kernel
68 * timer should be scheduled to manage for this code to be executed often
69 * enough.
61 * 70 *
62 * Note that the top bit (bit 63) in the returned value should be considered 71 * Note that the top bit (bit 63) in the returned value should be considered
63 * as garbage. It is not cleared here because callers are likely to use a 72 * as garbage. It is not cleared here because callers are likely to use a
@@ -68,9 +77,10 @@ union cnt32_to_63 {
68 */ 77 */
69#define cnt32_to_63(cnt_lo) \ 78#define cnt32_to_63(cnt_lo) \
70({ \ 79({ \
71 static volatile u32 __m_cnt_hi; \ 80 static u32 __m_cnt_hi; \
72 union cnt32_to_63 __x; \ 81 union cnt32_to_63 __x; \
73 __x.hi = __m_cnt_hi; \ 82 __x.hi = __m_cnt_hi; \
83 smp_rmb(); \
74 __x.lo = (cnt_lo); \ 84 __x.lo = (cnt_lo); \
75 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ 85 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
76 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ 86 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d3219d73f8e6..21e1dd43e52a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -5,6 +5,9 @@
5 * Cpumasks provide a bitmap suitable for representing the 5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. 6 * set of CPU's in a system, one bit position per CPU number.
7 * 7 *
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
8 * See detailed comments in the file linux/bitmap.h describing the 11 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these cpumasks are based. 12 * data type on which these cpumasks are based.
10 * 13 *
@@ -31,7 +34,7 @@
31 * will span the entire range of NR_CPUS. 34 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 * 36 *
34 * The available cpumask operations are: 37 * The obsolescent cpumask operations are:
35 * 38 *
36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
37 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
@@ -138,7 +141,7 @@
138#include <linux/threads.h> 141#include <linux/threads.h>
139#include <linux/bitmap.h> 142#include <linux/bitmap.h>
140 143
141typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
142extern cpumask_t _unused_cpumask_arg_; 145extern cpumask_t _unused_cpumask_arg_;
143 146
144#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 147#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
@@ -527,4 +530,556 @@ extern cpumask_t cpu_active_map;
527#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 530#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
528#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 531#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
529 532
533/* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */
535#define cpumask_bits(maskp) ((maskp)->bits)
536
537#if NR_CPUS <= BITS_PER_LONG
538#define CPU_BITS_ALL \
539{ \
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541}
542
543/* This produces more efficient code. */
544#define nr_cpumask_bits NR_CPUS
545
546#else /* NR_CPUS > BITS_PER_LONG */
547
548#define CPU_BITS_ALL \
549{ \
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552}
553
554#define nr_cpumask_bits nr_cpu_ids
555#endif /* NR_CPUS > BITS_PER_LONG */
556
557/* verify cpu argument to cpumask_* operators */
558static inline unsigned int cpumask_check(unsigned int cpu)
559{
560#ifdef CONFIG_DEBUG_PER_CPU_MAPS
561 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
562#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
563 return cpu;
564}
565
566#if NR_CPUS == 1
567/* Uniprocessor. Assume all masks are "1". */
568static inline unsigned int cpumask_first(const struct cpumask *srcp)
569{
570 return 0;
571}
572
573/* Valid inputs for n are -1 and 0. */
574static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
575{
576 return n+1;
577}
578
579static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
580{
581 return n+1;
582}
583
584static inline unsigned int cpumask_next_and(int n,
585 const struct cpumask *srcp,
586 const struct cpumask *andp)
587{
588 return n+1;
589}
590
591/* cpu must be a valid cpu, ie 0, so there's no other choice. */
592static inline unsigned int cpumask_any_but(const struct cpumask *mask,
593 unsigned int cpu)
594{
595 return 1;
596}
597
598#define for_each_cpu(cpu, mask) \
599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
600#define for_each_cpu_and(cpu, mask, and) \
601 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
602#else
603/**
604 * cpumask_first - get the first cpu in a cpumask
605 * @srcp: the cpumask pointer
606 *
607 * Returns >= nr_cpu_ids if no cpus set.
608 */
609static inline unsigned int cpumask_first(const struct cpumask *srcp)
610{
611 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
612}
613
614/**
615 * cpumask_next - get the next cpu in a cpumask
616 * @n: the cpu prior to the place to search (ie. return will be > @n)
617 * @srcp: the cpumask pointer
618 *
619 * Returns >= nr_cpu_ids if no further cpus set.
620 */
621static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
622{
623 /* -1 is a legal arg here. */
624 if (n != -1)
625 cpumask_check(n);
626 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
627}
628
629/**
630 * cpumask_next_zero - get the next unset cpu in a cpumask
631 * @n: the cpu prior to the place to search (ie. return will be > @n)
632 * @srcp: the cpumask pointer
633 *
634 * Returns >= nr_cpu_ids if no further cpus unset.
635 */
636static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
637{
638 /* -1 is a legal arg here. */
639 if (n != -1)
640 cpumask_check(n);
641 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
642}
643
644int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
645int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
646
647/**
648 * for_each_cpu - iterate over every cpu in a mask
649 * @cpu: the (optionally unsigned) integer iterator
650 * @mask: the cpumask pointer
651 *
652 * After the loop, cpu is >= nr_cpu_ids.
653 */
654#define for_each_cpu(cpu, mask) \
655 for ((cpu) = -1; \
656 (cpu) = cpumask_next((cpu), (mask)), \
657 (cpu) < nr_cpu_ids;)
658
659/**
660 * for_each_cpu_and - iterate over every cpu in both masks
661 * @cpu: the (optionally unsigned) integer iterator
662 * @mask: the first cpumask pointer
663 * @and: the second cpumask pointer
664 *
665 * This saves a temporary CPU mask in many places. It is equivalent to:
666 * struct cpumask tmp;
667 * cpumask_and(&tmp, &mask, &and);
668 * for_each_cpu(cpu, &tmp)
669 * ...
670 *
671 * After the loop, cpu is >= nr_cpu_ids.
672 */
673#define for_each_cpu_and(cpu, mask, and) \
674 for ((cpu) = -1; \
675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
676 (cpu) < nr_cpu_ids;)
677#endif /* SMP */
678
679#define CPU_BITS_NONE \
680{ \
681 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
682}
683
684#define CPU_BITS_CPU0 \
685{ \
686 [0] = 1UL \
687}
688
689/**
690 * cpumask_set_cpu - set a cpu in a cpumask
691 * @cpu: cpu number (< nr_cpu_ids)
692 * @dstp: the cpumask pointer
693 */
694static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
695{
696 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
697}
698
699/**
700 * cpumask_clear_cpu - clear a cpu in a cpumask
701 * @cpu: cpu number (< nr_cpu_ids)
702 * @dstp: the cpumask pointer
703 */
704static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
705{
706 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
707}
708
709/**
710 * cpumask_test_cpu - test for a cpu in a cpumask
711 * @cpu: cpu number (< nr_cpu_ids)
712 * @cpumask: the cpumask pointer
713 *
714 * No static inline type checking - see Subtlety (1) above.
715 */
716#define cpumask_test_cpu(cpu, cpumask) \
717 test_bit(cpumask_check(cpu), (cpumask)->bits)
718
719/**
720 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
721 * @cpu: cpu number (< nr_cpu_ids)
722 * @cpumask: the cpumask pointer
723 *
724 * test_and_set_bit wrapper for cpumasks.
725 */
726static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
727{
728 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
729}
730
731/**
732 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
733 * @dstp: the cpumask pointer
734 */
735static inline void cpumask_setall(struct cpumask *dstp)
736{
737 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
738}
739
740/**
741 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
742 * @dstp: the cpumask pointer
743 */
744static inline void cpumask_clear(struct cpumask *dstp)
745{
746 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
747}
748
749/**
750 * cpumask_and - *dstp = *src1p & *src2p
751 * @dstp: the cpumask result
752 * @src1p: the first input
753 * @src2p: the second input
754 */
755static inline void cpumask_and(struct cpumask *dstp,
756 const struct cpumask *src1p,
757 const struct cpumask *src2p)
758{
759 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
760 cpumask_bits(src2p), nr_cpumask_bits);
761}
762
763/**
764 * cpumask_or - *dstp = *src1p | *src2p
765 * @dstp: the cpumask result
766 * @src1p: the first input
767 * @src2p: the second input
768 */
769static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
770 const struct cpumask *src2p)
771{
772 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
773 cpumask_bits(src2p), nr_cpumask_bits);
774}
775
776/**
777 * cpumask_xor - *dstp = *src1p ^ *src2p
778 * @dstp: the cpumask result
779 * @src1p: the first input
780 * @src2p: the second input
781 */
782static inline void cpumask_xor(struct cpumask *dstp,
783 const struct cpumask *src1p,
784 const struct cpumask *src2p)
785{
786 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
787 cpumask_bits(src2p), nr_cpumask_bits);
788}
789
790/**
791 * cpumask_andnot - *dstp = *src1p & ~*src2p
792 * @dstp: the cpumask result
793 * @src1p: the first input
794 * @src2p: the second input
795 */
796static inline void cpumask_andnot(struct cpumask *dstp,
797 const struct cpumask *src1p,
798 const struct cpumask *src2p)
799{
800 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
801 cpumask_bits(src2p), nr_cpumask_bits);
802}
803
804/**
805 * cpumask_complement - *dstp = ~*srcp
806 * @dstp: the cpumask result
807 * @srcp: the input to invert
808 */
809static inline void cpumask_complement(struct cpumask *dstp,
810 const struct cpumask *srcp)
811{
812 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
813 nr_cpumask_bits);
814}
815
816/**
817 * cpumask_equal - *src1p == *src2p
818 * @src1p: the first input
819 * @src2p: the second input
820 */
821static inline bool cpumask_equal(const struct cpumask *src1p,
822 const struct cpumask *src2p)
823{
824 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
825 nr_cpumask_bits);
826}
827
828/**
829 * cpumask_intersects - (*src1p & *src2p) != 0
830 * @src1p: the first input
831 * @src2p: the second input
832 */
833static inline bool cpumask_intersects(const struct cpumask *src1p,
834 const struct cpumask *src2p)
835{
836 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
837 nr_cpumask_bits);
838}
839
840/**
841 * cpumask_subset - (*src1p & ~*src2p) == 0
842 * @src1p: the first input
843 * @src2p: the second input
844 */
845static inline int cpumask_subset(const struct cpumask *src1p,
846 const struct cpumask *src2p)
847{
848 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
849 nr_cpumask_bits);
850}
851
852/**
853 * cpumask_empty - *srcp == 0
854 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
855 */
856static inline bool cpumask_empty(const struct cpumask *srcp)
857{
858 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
859}
860
861/**
862 * cpumask_full - *srcp == 0xFFFFFFFF...
863 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
864 */
865static inline bool cpumask_full(const struct cpumask *srcp)
866{
867 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
868}
869
870/**
871 * cpumask_weight - Count of bits in *srcp
872 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
873 */
874static inline unsigned int cpumask_weight(const struct cpumask *srcp)
875{
876 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
877}
878
879/**
880 * cpumask_shift_right - *dstp = *srcp >> n
881 * @dstp: the cpumask result
882 * @srcp: the input to shift
883 * @n: the number of bits to shift by
884 */
885static inline void cpumask_shift_right(struct cpumask *dstp,
886 const struct cpumask *srcp, int n)
887{
888 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
889 nr_cpumask_bits);
890}
891
892/**
893 * cpumask_shift_left - *dstp = *srcp << n
894 * @dstp: the cpumask result
895 * @srcp: the input to shift
896 * @n: the number of bits to shift by
897 */
898static inline void cpumask_shift_left(struct cpumask *dstp,
899 const struct cpumask *srcp, int n)
900{
901 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
902 nr_cpumask_bits);
903}
904
905/**
906 * cpumask_copy - *dstp = *srcp
907 * @dstp: the result
908 * @srcp: the input cpumask
909 */
910static inline void cpumask_copy(struct cpumask *dstp,
911 const struct cpumask *srcp)
912{
913 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
914}
915
916/**
917 * cpumask_any - pick a "random" cpu from *srcp
918 * @srcp: the input cpumask
919 *
920 * Returns >= nr_cpu_ids if no cpus set.
921 */
922#define cpumask_any(srcp) cpumask_first(srcp)
923
924/**
925 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
926 * @src1p: the first input
927 * @src2p: the second input
928 *
929 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
930 */
931#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
932
933/**
934 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
935 * @mask1: the first input cpumask
936 * @mask2: the second input cpumask
937 *
938 * Returns >= nr_cpu_ids if no cpus set.
939 */
940#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
941
942/**
943 * cpumask_of - the cpumask containing just a given cpu
944 * @cpu: the cpu (<= nr_cpu_ids)
945 */
946#define cpumask_of(cpu) (get_cpu_mask(cpu))
947
948/**
949 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
950 * @bitmap: the bitmap
951 *
952 * There are a few places where cpumask_var_t isn't appropriate and
953 * static cpumasks must be used (eg. very early boot), yet we don't
954 * expose the definition of 'struct cpumask'.
955 *
956 * This does the conversion, and can be used as a constant initializer.
957 */
958#define to_cpumask(bitmap) \
959 ((struct cpumask *)(1 ? (bitmap) \
960 : (void *)sizeof(__check_is_bitmap(bitmap))))
961
962static inline int __check_is_bitmap(const unsigned long *bitmap)
963{
964 return 1;
965}
966
967/**
968 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
969 *
970 * This will eventually be a runtime variable, depending on nr_cpu_ids.
971 */
972static inline size_t cpumask_size(void)
973{
974 /* FIXME: Once all cpumask assignments are eliminated, this
975 * can be nr_cpumask_bits */
976 return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
977}
978
979/*
980 * cpumask_var_t: struct cpumask for stack usage.
981 *
982 * Oh, the wicked games we play! In order to make kernel coding a
983 * little more difficult, we typedef cpumask_var_t to an array or a
984 * pointer: doing &mask on an array is a noop, so it still works.
985 *
986 * ie.
987 * cpumask_var_t tmpmask;
988 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
989 * return -ENOMEM;
990 *
991 * ... use 'tmpmask' like a normal struct cpumask * ...
992 *
993 * free_cpumask_var(tmpmask);
994 */
995#ifdef CONFIG_CPUMASK_OFFSTACK
996typedef struct cpumask *cpumask_var_t;
997
998bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
999void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1000void free_cpumask_var(cpumask_var_t mask);
1001void free_bootmem_cpumask_var(cpumask_var_t mask);
1002
1003#else
1004typedef struct cpumask cpumask_var_t[1];
1005
1006static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1007{
1008 return true;
1009}
1010
1011static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1012{
1013}
1014
1015static inline void free_cpumask_var(cpumask_var_t mask)
1016{
1017}
1018
1019static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1020{
1021}
1022#endif /* CONFIG_CPUMASK_OFFSTACK */
1023
1024/* The pointer versions of the maps, these will become the primary versions. */
1025#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
1026#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
1027#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
1028#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
1029
1030/* It's common to want to use cpu_all_mask in struct member initializers,
1031 * so it has to refer to an address rather than a pointer. */
1032extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1033#define cpu_all_mask to_cpumask(cpu_all_bits)
1034
1035/* First bits of cpu_bit_bitmap are in fact unset. */
1036#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1037
1038/* Wrappers for arch boot code to manipulate normally-constant masks */
1039static inline void set_cpu_possible(unsigned int cpu, bool possible)
1040{
1041 if (possible)
1042 cpumask_set_cpu(cpu, &cpu_possible_map);
1043 else
1044 cpumask_clear_cpu(cpu, &cpu_possible_map);
1045}
1046
1047static inline void set_cpu_present(unsigned int cpu, bool present)
1048{
1049 if (present)
1050 cpumask_set_cpu(cpu, &cpu_present_map);
1051 else
1052 cpumask_clear_cpu(cpu, &cpu_present_map);
1053}
1054
1055static inline void set_cpu_online(unsigned int cpu, bool online)
1056{
1057 if (online)
1058 cpumask_set_cpu(cpu, &cpu_online_map);
1059 else
1060 cpumask_clear_cpu(cpu, &cpu_online_map);
1061}
1062
1063static inline void set_cpu_active(unsigned int cpu, bool active)
1064{
1065 if (active)
1066 cpumask_set_cpu(cpu, &cpu_active_map);
1067 else
1068 cpumask_clear_cpu(cpu, &cpu_active_map);
1069}
1070
1071static inline void init_cpu_present(const struct cpumask *src)
1072{
1073 cpumask_copy(&cpu_present_map, src);
1074}
1075
1076static inline void init_cpu_possible(const struct cpumask *src)
1077{
1078 cpumask_copy(&cpu_possible_map, src);
1079}
1080
1081static inline void init_cpu_online(const struct cpumask *src)
1082{
1083 cpumask_copy(&cpu_online_map, src);
1084}
530#endif /* __LINUX_CPUMASK_H */ 1085#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2b3645b1acf4..07e510a3b00a 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -239,7 +239,7 @@ static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
239 timer->_softexpires = ktime_add_safe(timer->_softexpires, time); 239 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
240} 240}
241 241
242static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) 242static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
243{ 243{
244 timer->_expires = ktime_add_ns(timer->_expires, ns); 244 timer->_expires = ktime_add_ns(timer->_expires, ns);
245 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); 245 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 9e7b49b8062d..a5cb0c3f6dcf 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -114,6 +114,8 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
114 114
115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
116 u16 vlan_tci, int polling); 116 u16 vlan_tci, int polling);
117extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
118
117#else 119#else
118static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) 120static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
119{ 121{
@@ -133,6 +135,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
133 BUG(); 135 BUG();
134 return NET_XMIT_SUCCESS; 136 return NET_XMIT_SUCCESS;
135} 137}
138
139static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
140{
141 return 0;
142}
136#endif 143#endif
137 144
138/** 145/**
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f5441edee55f..59b0f1c807b5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -373,6 +373,8 @@ enum {
373 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ 373 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
374 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ 374 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
375 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 375 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
376 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
377 not multiple of 16 bytes */
376 378
377 /* DMA mask for user DMA control: User visible values; DO NOT 379 /* DMA mask for user DMA control: User visible values; DO NOT
378 renumber */ 380 renumber */
@@ -696,6 +698,7 @@ struct ata_port {
696 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 698 unsigned int cbl; /* cable type; ATA_CBL_xxx */
697 699
698 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 700 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
701 unsigned long qc_allocated;
699 unsigned int qc_active; 702 unsigned int qc_active;
700 int nr_active_links; /* #links with active qcs */ 703 int nr_active_links; /* #links with active qcs */
701 704
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index ee6e822d5994..403aa505f27e 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -130,7 +130,7 @@ struct mmc_card {
130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
131 131
132#define mmc_card_name(c) ((c)->cid.prod_name) 132#define mmc_card_name(c) ((c)->cid.prod_name)
133#define mmc_card_id(c) ((c)->dev.bus_id) 133#define mmc_card_id(c) (dev_name(&(c)->dev))
134 134
135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bde891f64591..f842f234e44f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -176,7 +176,7 @@ static inline void *mmc_priv(struct mmc_host *host)
176 176
177#define mmc_dev(x) ((x)->parent) 177#define mmc_dev(x) ((x)->parent)
178#define mmc_classdev(x) (&(x)->class_dev) 178#define mmc_classdev(x) (&(x)->class_dev)
179#define mmc_hostname(x) ((x)->class_dev.bus_id) 179#define mmc_hostname(x) (dev_name(&(x)->class_dev))
180 180
181extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 181extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
182extern int mmc_resume_host(struct mmc_host *); 182extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 07bee4a0d457..451bdfc85830 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -63,7 +63,7 @@ struct sdio_func {
63 63
64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) 64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
65 65
66#define sdio_func_id(f) ((f)->dev.bus_id) 66#define sdio_func_id(f) (dev_name(&(f)->dev))
67 67
68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) 68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) 69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index ba63858056c7..e0a9b207920d 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -46,11 +46,6 @@
46#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */ 46#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */
47#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG) 47#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG)
48 48
49/* valid file mode bits */
50#define MSDOS_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO)
51/* Convert attribute bits and a mask to the UNIX mode. */
52#define MSDOS_MKMODE(a, m) (m & (a & ATTR_RO ? S_IRUGO|S_IXUGO : S_IRWXUGO))
53
54#define MSDOS_NAME 11 /* maximum name length */ 49#define MSDOS_NAME 11 /* maximum name length */
55#define MSDOS_LONGNAME 256 /* maximum name length */ 50#define MSDOS_LONGNAME 256 /* maximum name length */
56#define MSDOS_SLOTS 21 /* max # of slots for short and long names */ 51#define MSDOS_SLOTS 21 /* max # of slots for short and long names */
@@ -167,282 +162,10 @@ struct msdos_dir_slot {
167}; 162};
168 163
169#ifdef __KERNEL__ 164#ifdef __KERNEL__
170
171#include <linux/buffer_head.h>
172#include <linux/string.h>
173#include <linux/nls.h>
174#include <linux/fs.h>
175#include <linux/mutex.h>
176
177/*
178 * vfat shortname flags
179 */
180#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
181#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
182#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
183#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
184#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
185
186struct fat_mount_options {
187 uid_t fs_uid;
188 gid_t fs_gid;
189 unsigned short fs_fmask;
190 unsigned short fs_dmask;
191 unsigned short codepage; /* Codepage for shortname conversions */
192 char *iocharset; /* Charset used for filename input/display */
193 unsigned short shortname; /* flags for shortname display/create rule */
194 unsigned char name_check; /* r = relaxed, n = normal, s = strict */
195 unsigned short allow_utime;/* permission for setting the [am]time */
196 unsigned quiet:1, /* set = fake successful chmods and chowns */
197 showexec:1, /* set = only set x bit for com/exe/bat */
198 sys_immutable:1, /* set = system files are immutable */
199 dotsOK:1, /* set = hidden and system files are named '.filename' */
200 isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
201 utf8:1, /* Use of UTF-8 character set (Default) */
202 unicode_xlate:1, /* create escape sequences for unhandled Unicode */
203 numtail:1, /* Does first alias have a numeric '~1' type tail? */
204 flush:1, /* write things quickly */
205 nocase:1, /* Does this need case conversion? 0=need case conversion*/
206 usefree:1, /* Use free_clusters for FAT32 */
207 tz_utc:1; /* Filesystem timestamps are in UTC */
208};
209
210#define FAT_HASH_BITS 8
211#define FAT_HASH_SIZE (1UL << FAT_HASH_BITS)
212#define FAT_HASH_MASK (FAT_HASH_SIZE-1)
213
214/*
215 * MS-DOS file system in-core superblock data
216 */
217struct msdos_sb_info {
218 unsigned short sec_per_clus; /* sectors/cluster */
219 unsigned short cluster_bits; /* log2(cluster_size) */
220 unsigned int cluster_size; /* cluster size */
221 unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */
222 unsigned short fat_start;
223 unsigned long fat_length; /* FAT start & length (sec.) */
224 unsigned long dir_start;
225 unsigned short dir_entries; /* root dir start & entries */
226 unsigned long data_start; /* first data sector */
227 unsigned long max_cluster; /* maximum cluster number */
228 unsigned long root_cluster; /* first cluster of the root directory */
229 unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
230 struct mutex fat_lock;
231 unsigned int prev_free; /* previously allocated cluster number */
232 unsigned int free_clusters; /* -1 if undefined */
233 unsigned int free_clus_valid; /* is free_clusters valid? */
234 struct fat_mount_options options;
235 struct nls_table *nls_disk; /* Codepage used on disk */
236 struct nls_table *nls_io; /* Charset used for input and display */
237 const void *dir_ops; /* Opaque; default directory operations */
238 int dir_per_block; /* dir entries per block */
239 int dir_per_block_bits; /* log2(dir_per_block) */
240
241 int fatent_shift;
242 struct fatent_operations *fatent_ops;
243
244 spinlock_t inode_hash_lock;
245 struct hlist_head inode_hashtable[FAT_HASH_SIZE];
246};
247
248#define FAT_CACHE_VALID 0 /* special case for valid cache */
249
250/*
251 * MS-DOS file system inode data in memory
252 */
253struct msdos_inode_info {
254 spinlock_t cache_lru_lock;
255 struct list_head cache_lru;
256 int nr_caches;
257 /* for avoiding the race between fat_free() and fat_get_cluster() */
258 unsigned int cache_valid_id;
259
260 loff_t mmu_private;
261 int i_start; /* first cluster or 0 */
262 int i_logstart; /* logical first cluster */
263 int i_attrs; /* unused attribute bits */
264 loff_t i_pos; /* on-disk position of directory entry or 0 */
265 struct hlist_node i_fat_hash; /* hash by i_location */
266 struct inode vfs_inode;
267};
268
269struct fat_slot_info {
270 loff_t i_pos; /* on-disk position of directory entry */
271 loff_t slot_off; /* offset for slot or de start */
272 int nr_slots; /* number of slots + 1(de) in filename */
273 struct msdos_dir_entry *de;
274 struct buffer_head *bh;
275};
276
277static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb)
278{
279 return sb->s_fs_info;
280}
281
282static inline struct msdos_inode_info *MSDOS_I(struct inode *inode)
283{
284 return container_of(inode, struct msdos_inode_info, vfs_inode);
285}
286
287/* Return the FAT attribute byte for this inode */
288static inline u8 fat_attr(struct inode *inode)
289{
290 return ((inode->i_mode & S_IWUGO) ? ATTR_NONE : ATTR_RO) |
291 (S_ISDIR(inode->i_mode) ? ATTR_DIR : ATTR_NONE) |
292 MSDOS_I(inode)->i_attrs;
293}
294
295static inline unsigned char fat_checksum(const __u8 *name)
296{
297 unsigned char s = name[0];
298 s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2];
299 s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4];
300 s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6];
301 s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8];
302 s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10];
303 return s;
304}
305
306static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus)
307{
308 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus
309 + sbi->data_start;
310}
311
312static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len)
313{
314#ifdef __BIG_ENDIAN
315 while (len--) {
316 *dst++ = src[0] | (src[1] << 8);
317 src += 2;
318 }
319#else
320 memcpy(dst, src, len * 2);
321#endif
322}
323
324static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
325{
326#ifdef __BIG_ENDIAN
327 while (len--) {
328 dst[0] = *src & 0x00FF;
329 dst[1] = (*src & 0xFF00) >> 8;
330 dst += 2;
331 src++;
332 }
333#else
334 memcpy(dst, src, len * 2);
335#endif
336}
337
338/* media of boot sector */ 165/* media of boot sector */
339static inline int fat_valid_media(u8 media) 166static inline int fat_valid_media(u8 media)
340{ 167{
341 return 0xf8 <= media || media == 0xf0; 168 return 0xf8 <= media || media == 0xf0;
342} 169}
343 170#endif /* !__KERNEL__ */
344/* fat/cache.c */ 171#endif /* !_LINUX_MSDOS_FS_H */
345extern void fat_cache_inval_inode(struct inode *inode);
346extern int fat_get_cluster(struct inode *inode, int cluster,
347 int *fclus, int *dclus);
348extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
349 unsigned long *mapped_blocks);
350
351/* fat/dir.c */
352extern const struct file_operations fat_dir_operations;
353extern int fat_search_long(struct inode *inode, const unsigned char *name,
354 int name_len, struct fat_slot_info *sinfo);
355extern int fat_dir_empty(struct inode *dir);
356extern int fat_subdirs(struct inode *dir);
357extern int fat_scan(struct inode *dir, const unsigned char *name,
358 struct fat_slot_info *sinfo);
359extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
360 struct msdos_dir_entry **de, loff_t *i_pos);
361extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
362extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
363 struct fat_slot_info *sinfo);
364extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo);
365
366/* fat/fatent.c */
367struct fat_entry {
368 int entry;
369 union {
370 u8 *ent12_p[2];
371 __le16 *ent16_p;
372 __le32 *ent32_p;
373 } u;
374 int nr_bhs;
375 struct buffer_head *bhs[2];
376};
377
378static inline void fatent_init(struct fat_entry *fatent)
379{
380 fatent->nr_bhs = 0;
381 fatent->entry = 0;
382 fatent->u.ent32_p = NULL;
383 fatent->bhs[0] = fatent->bhs[1] = NULL;
384}
385
386static inline void fatent_set_entry(struct fat_entry *fatent, int entry)
387{
388 fatent->entry = entry;
389 fatent->u.ent32_p = NULL;
390}
391
392static inline void fatent_brelse(struct fat_entry *fatent)
393{
394 int i;
395 fatent->u.ent32_p = NULL;
396 for (i = 0; i < fatent->nr_bhs; i++)
397 brelse(fatent->bhs[i]);
398 fatent->nr_bhs = 0;
399 fatent->bhs[0] = fatent->bhs[1] = NULL;
400}
401
402extern void fat_ent_access_init(struct super_block *sb);
403extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
404 int entry);
405extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
406 int new, int wait);
407extern int fat_alloc_clusters(struct inode *inode, int *cluster,
408 int nr_cluster);
409extern int fat_free_clusters(struct inode *inode, int cluster);
410extern int fat_count_free_clusters(struct super_block *sb);
411
412/* fat/file.c */
413extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
414 unsigned int cmd, unsigned long arg);
415extern const struct file_operations fat_file_operations;
416extern const struct inode_operations fat_file_inode_operations;
417extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
418extern void fat_truncate(struct inode *inode);
419extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
420 struct kstat *stat);
421
422/* fat/inode.c */
423extern void fat_attach(struct inode *inode, loff_t i_pos);
424extern void fat_detach(struct inode *inode);
425extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
426extern struct inode *fat_build_inode(struct super_block *sb,
427 struct msdos_dir_entry *de, loff_t i_pos);
428extern int fat_sync_inode(struct inode *inode);
429extern int fat_fill_super(struct super_block *sb, void *data, int silent,
430 const struct inode_operations *fs_dir_inode_ops, int isvfat);
431
432extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
433 struct inode *i2);
434/* fat/misc.c */
435extern void fat_fs_panic(struct super_block *s, const char *fmt, ...);
436extern void fat_clusters_flush(struct super_block *sb);
437extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
438extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc);
439extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date,
440 int tz_utc);
441extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
442
443int fat_cache_init(void);
444void fat_cache_destroy(void);
445
446#endif /* __KERNEL__ */
447
448#endif
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index ee5124ec319e..00e2b575021f 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -282,9 +282,25 @@ struct cfi_private {
282/* 282/*
283 * Returns the command address according to the given geometry. 283 * Returns the command address according to the given geometry.
284 */ 284 */
285static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) 285static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
286 struct map_info *map, struct cfi_private *cfi)
286{ 287{
287 return (cmd_ofs * type) * interleave; 288 unsigned bankwidth = map_bankwidth(map);
289 unsigned interleave = cfi_interleave(cfi);
290 unsigned type = cfi->device_type;
291 uint32_t addr;
292
293 addr = (cmd_ofs * type) * interleave;
294
295 /* Modify the unlock address if we are in compatiblity mode.
296 * For 16bit devices on 8 bit busses
297 * and 32bit devices on 16 bit busses
298 * set the low bit of the alternating bit sequence of the address.
299 */
300 if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
301 addr |= (type >> 1)*interleave;
302
303 return addr;
288} 304}
289 305
290/* 306/*
@@ -430,7 +446,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t
430 int type, map_word *prev_val) 446 int type, map_word *prev_val)
431{ 447{
432 map_word val; 448 map_word val;
433 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); 449 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
434 val = cfi_build_cmd(cmd, map, cfi); 450 val = cfi_build_cmd(cmd, map, cfi);
435 451
436 if (prev_val) 452 if (prev_val)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c75b82bda327..feb4657bb043 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1136,7 +1136,7 @@ static inline void pci_mmcfg_late_init(void) { }
1136#endif 1136#endif
1137 1137
1138#ifdef CONFIG_HAS_IOMEM 1138#ifdef CONFIG_HAS_IOMEM
1139static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar) 1139static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
1140{ 1140{
1141 /* 1141 /*
1142 * Make sure the BAR is actually a memory resource, not an IO resource 1142 * Make sure the BAR is actually a memory resource, not an IO resource
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b483f39a7112..295b7c756ca6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1349,6 +1349,8 @@ struct task_struct {
1349 */ 1349 */
1350 unsigned long timer_slack_ns; 1350 unsigned long timer_slack_ns;
1351 unsigned long default_timer_slack_ns; 1351 unsigned long default_timer_slack_ns;
1352
1353 struct list_head *scm_work_list;
1352}; 1354};
1353 1355
1354/* 1356/*
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 2e4d58b26c06..3f9a60043a97 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -64,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus);
64 * Call a function on all other processors 64 * Call a function on all other processors
65 */ 65 */
66int smp_call_function(void(*func)(void *info), void *info, int wait); 66int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
67int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
68 int wait); 69 int wait);
70
71static inline void smp_call_function_many(const struct cpumask *mask,
72 void (*func)(void *info), void *info,
73 int wait)
74{
75 smp_call_function_mask(*mask, func, info, wait);
76}
77
69int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
70 int wait); 79 int wait);
71void __smp_call_function_single(int cpuid, struct call_single_data *data); 80void __smp_call_function_single(int cpuid, struct call_single_data *data);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d4ba79248a27..daf9685b861c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -186,4 +186,9 @@ unsigned long __round_jiffies_relative(unsigned long j, int cpu);
186unsigned long round_jiffies(unsigned long j); 186unsigned long round_jiffies(unsigned long j);
187unsigned long round_jiffies_relative(unsigned long j); 187unsigned long round_jiffies_relative(unsigned long j);
188 188
189unsigned long __round_jiffies_up(unsigned long j, int cpu);
190unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
191unsigned long round_jiffies_up(unsigned long j);
192unsigned long round_jiffies_up_relative(unsigned long j);
193
189#endif 194#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 2158fc0d5a56..117f1b7405cf 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,7 +99,7 @@ void arch_update_cpu_topology(void);
99 | SD_BALANCE_FORK \ 99 | SD_BALANCE_FORK \
100 | SD_BALANCE_EXEC \ 100 | SD_BALANCE_EXEC \
101 | SD_WAKE_AFFINE \ 101 | SD_WAKE_AFFINE \
102 | SD_WAKE_IDLE \ 102 | SD_WAKE_BALANCE \
103 | SD_SHARE_CPUPOWER, \ 103 | SD_SHARE_CPUPOWER, \
104 .last_balance = jiffies, \ 104 .last_balance = jiffies, \
105 .balance_interval = 1, \ 105 .balance_interval = 1, \
@@ -120,10 +120,10 @@ void arch_update_cpu_topology(void);
120 .wake_idx = 1, \ 120 .wake_idx = 1, \
121 .forkexec_idx = 1, \ 121 .forkexec_idx = 1, \
122 .flags = SD_LOAD_BALANCE \ 122 .flags = SD_LOAD_BALANCE \
123 | SD_BALANCE_NEWIDLE \
124 | SD_BALANCE_FORK \ 123 | SD_BALANCE_FORK \
125 | SD_BALANCE_EXEC \ 124 | SD_BALANCE_EXEC \
126 | SD_WAKE_AFFINE \ 125 | SD_WAKE_AFFINE \
126 | SD_WAKE_BALANCE \
127 | SD_SHARE_PKG_RESOURCES\ 127 | SD_SHARE_PKG_RESOURCES\
128 | BALANCE_FOR_MC_POWER, \ 128 | BALANCE_FOR_MC_POWER, \
129 .last_balance = jiffies, \ 129 .last_balance = jiffies, \
@@ -146,10 +146,10 @@ void arch_update_cpu_topology(void);
146 .wake_idx = 1, \ 146 .wake_idx = 1, \
147 .forkexec_idx = 1, \ 147 .forkexec_idx = 1, \
148 .flags = SD_LOAD_BALANCE \ 148 .flags = SD_LOAD_BALANCE \
149 | SD_BALANCE_NEWIDLE \
150 | SD_BALANCE_FORK \
151 | SD_BALANCE_EXEC \ 149 | SD_BALANCE_EXEC \
150 | SD_BALANCE_FORK \
152 | SD_WAKE_AFFINE \ 151 | SD_WAKE_AFFINE \
152 | SD_WAKE_BALANCE \
153 | BALANCE_FOR_PKG_POWER,\ 153 | BALANCE_FOR_PKG_POWER,\
154 .last_balance = jiffies, \ 154 .last_balance = jiffies, \
155 .balance_interval = 1, \ 155 .balance_interval = 1, \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 89a5a1231ffb..b36291130f22 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work)
240 cancel_delayed_work_sync(work); 240 cancel_delayed_work_sync(work);
241} 241}
242 242
243#ifndef CONFIG_SMP
244static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
245{
246 return fn(arg);
247}
248#else
249long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
250#endif /* CONFIG_SMP */
243#endif 251#endif
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 7dd29b7e461d..c29ff1da8a18 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -54,6 +54,7 @@ struct unix_sock {
54 atomic_long_t inflight; 54 atomic_long_t inflight;
55 spinlock_t lock; 55 spinlock_t lock;
56 unsigned int gc_candidate : 1; 56 unsigned int gc_candidate : 1;
57 unsigned int gc_maybe_cycle : 1;
57 wait_queue_head_t peer_wait; 58 wait_queue_head_t peer_wait;
58}; 59};
59#define unix_sk(__sk) ((struct unix_sock *)__sk) 60#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/net/scm.h b/include/net/scm.h
index 06df126103ca..33e9986beb86 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -14,8 +14,9 @@
14 14
15struct scm_fp_list 15struct scm_fp_list
16{ 16{
17 int count; 17 struct list_head list;
18 struct file *fp[SCM_MAX_FD]; 18 int count;
19 struct file *fp[SCM_MAX_FD];
19}; 20};
20 21
21struct scm_cookie 22struct scm_cookie
diff --git a/include/sound/core.h b/include/sound/core.h
index 35424a971b7a..1508c4ec1ba9 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -385,9 +385,13 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
385 385
386#else /* !CONFIG_SND_DEBUG */ 386#else /* !CONFIG_SND_DEBUG */
387 387
388#define snd_printd(fmt, args...) /* nothing */ 388#define snd_printd(fmt, args...) do { } while (0)
389#define snd_BUG() /* nothing */ 389#define snd_BUG() do { } while (0)
390#define snd_BUG_ON(cond) ({/*(void)(cond);*/ 0;}) /* always false */ 390static inline int __snd_bug_on(void)
391{
392 return 0;
393}
394#define snd_BUG_ON(cond) __snd_bug_on() /* always false */
391 395
392#endif /* CONFIG_SND_DEBUG */ 396#endif /* CONFIG_SND_DEBUG */
393 397
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 35eebd5510c2..358e77564e6f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2497,7 +2497,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2497 list_del(&cgrp->sibling); 2497 list_del(&cgrp->sibling);
2498 spin_lock(&cgrp->dentry->d_lock); 2498 spin_lock(&cgrp->dentry->d_lock);
2499 d = dget(cgrp->dentry); 2499 d = dget(cgrp->dentry);
2500 cgrp->dentry = NULL;
2501 spin_unlock(&d->d_lock); 2500 spin_unlock(&d->d_lock);
2502 2501
2503 cgroup_d_remove_dir(d); 2502 cgroup_d_remove_dir(d);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 86d49045daed..5a732c5ef08b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -499,3 +499,6 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
499#endif 499#endif
500}; 500};
501EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 501EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
502
503const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
504EXPORT_SYMBOL(cpu_all_bits);
diff --git a/kernel/sched.c b/kernel/sched.c
index e8819bc6f462..57c933ffbee1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -397,7 +397,7 @@ struct cfs_rq {
397 * 'curr' points to currently running entity on this cfs_rq. 397 * 'curr' points to currently running entity on this cfs_rq.
398 * It is set to NULL otherwise (i.e when none are currently running). 398 * It is set to NULL otherwise (i.e when none are currently running).
399 */ 399 */
400 struct sched_entity *curr, *next; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned long nr_spread_over;
403 403
@@ -1805,7 +1805,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1805 /* 1805 /*
1806 * Buddy candidates are cache hot: 1806 * Buddy candidates are cache hot:
1807 */ 1807 */
1808 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) 1808 if (sched_feat(CACHE_HOT_BUDDY) &&
1809 (&p->se == cfs_rq_of(&p->se)->next ||
1810 &p->se == cfs_rq_of(&p->se)->last))
1809 return 1; 1811 return 1;
1810 1812
1811 if (p->sched_class != &fair_sched_class) 1813 if (p->sched_class != &fair_sched_class)
@@ -6875,15 +6877,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6875 struct sched_domain *tmp; 6877 struct sched_domain *tmp;
6876 6878
6877 /* Remove the sched domains which do not contribute to scheduling. */ 6879 /* Remove the sched domains which do not contribute to scheduling. */
6878 for (tmp = sd; tmp; tmp = tmp->parent) { 6880 for (tmp = sd; tmp; ) {
6879 struct sched_domain *parent = tmp->parent; 6881 struct sched_domain *parent = tmp->parent;
6880 if (!parent) 6882 if (!parent)
6881 break; 6883 break;
6884
6882 if (sd_parent_degenerate(tmp, parent)) { 6885 if (sd_parent_degenerate(tmp, parent)) {
6883 tmp->parent = parent->parent; 6886 tmp->parent = parent->parent;
6884 if (parent->parent) 6887 if (parent->parent)
6885 parent->parent->child = tmp; 6888 parent->parent->child = tmp;
6886 } 6889 } else
6890 tmp = tmp->parent;
6887 } 6891 }
6888 6892
6889 if (sd && sd_degenerate(sd)) { 6893 if (sd && sd_degenerate(sd)) {
@@ -7672,6 +7676,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7672error: 7676error:
7673 free_sched_groups(cpu_map, tmpmask); 7677 free_sched_groups(cpu_map, tmpmask);
7674 SCHED_CPUMASK_FREE((void *)allmasks); 7678 SCHED_CPUMASK_FREE((void *)allmasks);
7679 kfree(rd);
7675 return -ENOMEM; 7680 return -ENOMEM;
7676#endif 7681#endif
7677} 7682}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ce514afd78ff..51aa3e102acb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -341,23 +341,20 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
341 cfs_rq->rb_leftmost = next_node; 341 cfs_rq->rb_leftmost = next_node;
342 } 342 }
343 343
344 if (cfs_rq->next == se)
345 cfs_rq->next = NULL;
346
347 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); 344 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
348} 345}
349 346
350static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
351{
352 return cfs_rq->rb_leftmost;
353}
354
355static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) 347static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
356{ 348{
357 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); 349 struct rb_node *left = cfs_rq->rb_leftmost;
350
351 if (!left)
352 return NULL;
353
354 return rb_entry(left, struct sched_entity, run_node);
358} 355}
359 356
360static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 357static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
361{ 358{
362 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); 359 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
363 360
@@ -741,6 +738,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
741#endif 738#endif
742 } 739 }
743 740
741 if (cfs_rq->last == se)
742 cfs_rq->last = NULL;
743
744 if (cfs_rq->next == se)
745 cfs_rq->next = NULL;
746
744 if (se != cfs_rq->curr) 747 if (se != cfs_rq->curr)
745 __dequeue_entity(cfs_rq, se); 748 __dequeue_entity(cfs_rq, se);
746 account_entity_dequeue(cfs_rq, se); 749 account_entity_dequeue(cfs_rq, se);
@@ -794,24 +797,15 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
794static int 797static int
795wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 798wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
796 799
797static struct sched_entity *
798pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
799{
800 if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1)
801 return se;
802
803 return cfs_rq->next;
804}
805
806static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 800static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
807{ 801{
808 struct sched_entity *se = NULL; 802 struct sched_entity *se = __pick_next_entity(cfs_rq);
809 803
810 if (first_fair(cfs_rq)) { 804 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
811 se = __pick_next_entity(cfs_rq); 805 return cfs_rq->next;
812 se = pick_next(cfs_rq, se); 806
813 set_next_entity(cfs_rq, se); 807 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
814 } 808 return cfs_rq->last;
815 809
816 return se; 810 return se;
817} 811}
@@ -1325,26 +1319,53 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1325 return 0; 1319 return 0;
1326} 1320}
1327 1321
1322static void set_last_buddy(struct sched_entity *se)
1323{
1324 for_each_sched_entity(se)
1325 cfs_rq_of(se)->last = se;
1326}
1327
1328static void set_next_buddy(struct sched_entity *se)
1329{
1330 for_each_sched_entity(se)
1331 cfs_rq_of(se)->next = se;
1332}
1333
1328/* 1334/*
1329 * Preempt the current task with a newly woken task if needed: 1335 * Preempt the current task with a newly woken task if needed:
1330 */ 1336 */
1331static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) 1337static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1332{ 1338{
1333 struct task_struct *curr = rq->curr; 1339 struct task_struct *curr = rq->curr;
1334 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1335 struct sched_entity *se = &curr->se, *pse = &p->se; 1340 struct sched_entity *se = &curr->se, *pse = &p->se;
1336 1341
1337 if (unlikely(rt_prio(p->prio))) { 1342 if (unlikely(rt_prio(p->prio))) {
1343 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1344
1338 update_rq_clock(rq); 1345 update_rq_clock(rq);
1339 update_curr(cfs_rq); 1346 update_curr(cfs_rq);
1340 resched_task(curr); 1347 resched_task(curr);
1341 return; 1348 return;
1342 } 1349 }
1343 1350
1351 if (unlikely(p->sched_class != &fair_sched_class))
1352 return;
1353
1344 if (unlikely(se == pse)) 1354 if (unlikely(se == pse))
1345 return; 1355 return;
1346 1356
1347 cfs_rq_of(pse)->next = pse; 1357 /*
1358 * Only set the backward buddy when the current task is still on the
1359 * rq. This can happen when a wakeup gets interleaved with schedule on
1360 * the ->pre_schedule() or idle_balance() point, either of which can
1361 * drop the rq lock.
1362 *
1363 * Also, during early boot the idle thread is in the fair class, for
1364 * obvious reasons its a bad idea to schedule back to the idle thread.
1365 */
1366 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1367 set_last_buddy(se);
1368 set_next_buddy(pse);
1348 1369
1349 /* 1370 /*
1350 * We can come here with TIF_NEED_RESCHED already set from new task 1371 * We can come here with TIF_NEED_RESCHED already set from new task
@@ -1396,6 +1417,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1396 1417
1397 do { 1418 do {
1398 se = pick_next_entity(cfs_rq); 1419 se = pick_next_entity(cfs_rq);
1420 set_next_entity(cfs_rq, se);
1399 cfs_rq = group_cfs_rq(se); 1421 cfs_rq = group_cfs_rq(se);
1400 } while (cfs_rq); 1422 } while (cfs_rq);
1401 1423
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index fda016218296..da5d93b5d2c6 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -12,3 +12,4 @@ SCHED_FEAT(LB_BIAS, 1)
12SCHED_FEAT(LB_WAKEUP_UPDATE, 1) 12SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
13SCHED_FEAT(ASYM_EFF_LOAD, 1) 13SCHED_FEAT(ASYM_EFF_LOAD, 1)
14SCHED_FEAT(WAKEUP_OVERLAP, 0) 14SCHED_FEAT(WAKEUP_OVERLAP, 0)
15SCHED_FEAT(LAST_BUDDY, 1)
diff --git a/kernel/smp.c b/kernel/smp.c
index f362a8553777..75c8dde58c55 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -51,10 +51,6 @@ static void csd_flag_wait(struct call_single_data *data)
51{ 51{
52 /* Wait for response */ 52 /* Wait for response */
53 do { 53 do {
54 /*
55 * We need to see the flags store in the IPI handler
56 */
57 smp_mb();
58 if (!(data->flags & CSD_FLAG_WAIT)) 54 if (!(data->flags & CSD_FLAG_WAIT))
59 break; 55 break;
60 cpu_relax(); 56 cpu_relax();
@@ -76,6 +72,11 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
76 list_add_tail(&data->list, &dst->list); 72 list_add_tail(&data->list, &dst->list);
77 spin_unlock_irqrestore(&dst->lock, flags); 73 spin_unlock_irqrestore(&dst->lock, flags);
78 74
75 /*
76 * Make the list addition visible before sending the ipi.
77 */
78 smp_mb();
79
79 if (ipi) 80 if (ipi)
80 arch_send_call_function_single_ipi(cpu); 81 arch_send_call_function_single_ipi(cpu);
81 82
@@ -157,7 +158,7 @@ void generic_smp_call_function_single_interrupt(void)
157 * Need to see other stores to list head for checking whether 158 * Need to see other stores to list head for checking whether
158 * list is empty without holding q->lock 159 * list is empty without holding q->lock
159 */ 160 */
160 smp_mb(); 161 smp_read_barrier_depends();
161 while (!list_empty(&q->list)) { 162 while (!list_empty(&q->list)) {
162 unsigned int data_flags; 163 unsigned int data_flags;
163 164
@@ -191,7 +192,7 @@ void generic_smp_call_function_single_interrupt(void)
191 /* 192 /*
192 * See comment on outer loop 193 * See comment on outer loop
193 */ 194 */
194 smp_mb(); 195 smp_read_barrier_depends();
195 } 196 }
196} 197}
197 198
@@ -370,6 +371,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
370 list_add_tail_rcu(&data->csd.list, &call_function_queue); 371 list_add_tail_rcu(&data->csd.list, &call_function_queue);
371 spin_unlock_irqrestore(&call_function_lock, flags); 372 spin_unlock_irqrestore(&call_function_lock, flags);
372 373
374 /*
375 * Make the list addition visible before sending the ipi.
376 */
377 smp_mb();
378
373 /* Send a message to all CPUs in the map */ 379 /* Send a message to all CPUs in the map */
374 arch_send_call_function_ipi(mask); 380 arch_send_call_function_ipi(mask);
375 381
diff --git a/kernel/timer.c b/kernel/timer.c
index 56becf373c58..dbd50fabe4c7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -112,27 +112,8 @@ timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
112 tbase_get_deferrable(timer->base)); 112 tbase_get_deferrable(timer->base));
113} 113}
114 114
115/** 115static unsigned long round_jiffies_common(unsigned long j, int cpu,
116 * __round_jiffies - function to round jiffies to a full second 116 bool force_up)
117 * @j: the time in (absolute) jiffies that should be rounded
118 * @cpu: the processor number on which the timeout will happen
119 *
120 * __round_jiffies() rounds an absolute time in the future (in jiffies)
121 * up or down to (approximately) full seconds. This is useful for timers
122 * for which the exact time they fire does not matter too much, as long as
123 * they fire approximately every X seconds.
124 *
125 * By rounding these timers to whole seconds, all such timers will fire
126 * at the same time, rather than at various times spread out. The goal
127 * of this is to have the CPU wake up less, which saves power.
128 *
129 * The exact rounding is skewed for each processor to avoid all
130 * processors firing at the exact same time, which could lead
131 * to lock contention or spurious cache line bouncing.
132 *
133 * The return value is the rounded version of the @j parameter.
134 */
135unsigned long __round_jiffies(unsigned long j, int cpu)
136{ 117{
137 int rem; 118 int rem;
138 unsigned long original = j; 119 unsigned long original = j;
@@ -154,8 +135,9 @@ unsigned long __round_jiffies(unsigned long j, int cpu)
154 * due to delays of the timer irq, long irq off times etc etc) then 135 * due to delays of the timer irq, long irq off times etc etc) then
155 * we should round down to the whole second, not up. Use 1/4th second 136 * we should round down to the whole second, not up. Use 1/4th second
156 * as cutoff for this rounding as an extreme upper bound for this. 137 * as cutoff for this rounding as an extreme upper bound for this.
138 * But never round down if @force_up is set.
157 */ 139 */
158 if (rem < HZ/4) /* round down */ 140 if (rem < HZ/4 && !force_up) /* round down */
159 j = j - rem; 141 j = j - rem;
160 else /* round up */ 142 else /* round up */
161 j = j - rem + HZ; 143 j = j - rem + HZ;
@@ -167,6 +149,31 @@ unsigned long __round_jiffies(unsigned long j, int cpu)
167 return original; 149 return original;
168 return j; 150 return j;
169} 151}
152
153/**
154 * __round_jiffies - function to round jiffies to a full second
155 * @j: the time in (absolute) jiffies that should be rounded
156 * @cpu: the processor number on which the timeout will happen
157 *
158 * __round_jiffies() rounds an absolute time in the future (in jiffies)
159 * up or down to (approximately) full seconds. This is useful for timers
160 * for which the exact time they fire does not matter too much, as long as
161 * they fire approximately every X seconds.
162 *
163 * By rounding these timers to whole seconds, all such timers will fire
164 * at the same time, rather than at various times spread out. The goal
165 * of this is to have the CPU wake up less, which saves power.
166 *
167 * The exact rounding is skewed for each processor to avoid all
168 * processors firing at the exact same time, which could lead
169 * to lock contention or spurious cache line bouncing.
170 *
171 * The return value is the rounded version of the @j parameter.
172 */
173unsigned long __round_jiffies(unsigned long j, int cpu)
174{
175 return round_jiffies_common(j, cpu, false);
176}
170EXPORT_SYMBOL_GPL(__round_jiffies); 177EXPORT_SYMBOL_GPL(__round_jiffies);
171 178
172/** 179/**
@@ -191,13 +198,10 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
191 */ 198 */
192unsigned long __round_jiffies_relative(unsigned long j, int cpu) 199unsigned long __round_jiffies_relative(unsigned long j, int cpu)
193{ 200{
194 /* 201 unsigned long j0 = jiffies;
195 * In theory the following code can skip a jiffy in case jiffies 202
196 * increments right between the addition and the later subtraction. 203 /* Use j0 because jiffies might change while we run */
197 * However since the entire point of this function is to use approximate 204 return round_jiffies_common(j + j0, cpu, false) - j0;
198 * timeouts, it's entirely ok to not handle that.
199 */
200 return __round_jiffies(j + jiffies, cpu) - jiffies;
201} 205}
202EXPORT_SYMBOL_GPL(__round_jiffies_relative); 206EXPORT_SYMBOL_GPL(__round_jiffies_relative);
203 207
@@ -218,7 +222,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
218 */ 222 */
219unsigned long round_jiffies(unsigned long j) 223unsigned long round_jiffies(unsigned long j)
220{ 224{
221 return __round_jiffies(j, raw_smp_processor_id()); 225 return round_jiffies_common(j, raw_smp_processor_id(), false);
222} 226}
223EXPORT_SYMBOL_GPL(round_jiffies); 227EXPORT_SYMBOL_GPL(round_jiffies);
224 228
@@ -243,6 +247,71 @@ unsigned long round_jiffies_relative(unsigned long j)
243} 247}
244EXPORT_SYMBOL_GPL(round_jiffies_relative); 248EXPORT_SYMBOL_GPL(round_jiffies_relative);
245 249
250/**
251 * __round_jiffies_up - function to round jiffies up to a full second
252 * @j: the time in (absolute) jiffies that should be rounded
253 * @cpu: the processor number on which the timeout will happen
254 *
255 * This is the same as __round_jiffies() except that it will never
256 * round down. This is useful for timeouts for which the exact time
257 * of firing does not matter too much, as long as they don't fire too
258 * early.
259 */
260unsigned long __round_jiffies_up(unsigned long j, int cpu)
261{
262 return round_jiffies_common(j, cpu, true);
263}
264EXPORT_SYMBOL_GPL(__round_jiffies_up);
265
266/**
267 * __round_jiffies_up_relative - function to round jiffies up to a full second
268 * @j: the time in (relative) jiffies that should be rounded
269 * @cpu: the processor number on which the timeout will happen
270 *
271 * This is the same as __round_jiffies_relative() except that it will never
272 * round down. This is useful for timeouts for which the exact time
273 * of firing does not matter too much, as long as they don't fire too
274 * early.
275 */
276unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
277{
278 unsigned long j0 = jiffies;
279
280 /* Use j0 because jiffies might change while we run */
281 return round_jiffies_common(j + j0, cpu, true) - j0;
282}
283EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
284
285/**
286 * round_jiffies_up - function to round jiffies up to a full second
287 * @j: the time in (absolute) jiffies that should be rounded
288 *
289 * This is the same as round_jiffies() except that it will never
290 * round down. This is useful for timeouts for which the exact time
291 * of firing does not matter too much, as long as they don't fire too
292 * early.
293 */
294unsigned long round_jiffies_up(unsigned long j)
295{
296 return round_jiffies_common(j, raw_smp_processor_id(), true);
297}
298EXPORT_SYMBOL_GPL(round_jiffies_up);
299
300/**
301 * round_jiffies_up_relative - function to round jiffies up to a full second
302 * @j: the time in (relative) jiffies that should be rounded
303 *
304 * This is the same as round_jiffies_relative() except that it will never
305 * round down. This is useful for timeouts for which the exact time
306 * of firing does not matter too much, as long as they don't fire too
307 * early.
308 */
309unsigned long round_jiffies_up_relative(unsigned long j)
310{
311 return __round_jiffies_up_relative(j, raw_smp_processor_id());
312}
313EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
314
246 315
247static inline void set_running_timer(struct tvec_base *base, 316static inline void set_running_timer(struct tvec_base *base,
248 struct timer_list *timer) 317 struct timer_list *timer)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3f3380638646..2f76193c3489 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1060,7 +1060,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1060 1060
1061 /* Did the write stamp get updated already? */ 1061 /* Did the write stamp get updated already? */
1062 if (unlikely(ts < cpu_buffer->write_stamp)) 1062 if (unlikely(ts < cpu_buffer->write_stamp))
1063 goto again; 1063 delta = 0;
1064 1064
1065 if (test_time_stamp(delta)) { 1065 if (test_time_stamp(delta)) {
1066 1066
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 974973e39e87..697eda36b86a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2676{ 2676{
2677 unsigned long val; 2677 unsigned long val;
2678 char buf[64]; 2678 char buf[64];
2679 int ret; 2679 int ret, cpu;
2680 struct trace_array *tr = filp->private_data; 2680 struct trace_array *tr = filp->private_data;
2681 2681
2682 if (cnt >= sizeof(buf)) 2682 if (cnt >= sizeof(buf))
@@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 goto out; 2704 goto out;
2705 } 2705 }
2706 2706
2707 /* disable all cpu buffers */
2708 for_each_tracing_cpu(cpu) {
2709 if (global_trace.data[cpu])
2710 atomic_inc(&global_trace.data[cpu]->disabled);
2711 if (max_tr.data[cpu])
2712 atomic_inc(&max_tr.data[cpu]->disabled);
2713 }
2714
2707 if (val != global_trace.entries) { 2715 if (val != global_trace.entries) {
2708 ret = ring_buffer_resize(global_trace.buffer, val); 2716 ret = ring_buffer_resize(global_trace.buffer, val);
2709 if (ret < 0) { 2717 if (ret < 0) {
@@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2735 if (tracing_disabled) 2743 if (tracing_disabled)
2736 cnt = -ENOMEM; 2744 cnt = -ENOMEM;
2737 out: 2745 out:
2746 for_each_tracing_cpu(cpu) {
2747 if (global_trace.data[cpu])
2748 atomic_dec(&global_trace.data[cpu]->disabled);
2749 if (max_tr.data[cpu])
2750 atomic_dec(&max_tr.data[cpu]->disabled);
2751 }
2752
2738 max_tr.entries = global_trace.entries; 2753 max_tr.entries = global_trace.entries;
2739 mutex_unlock(&trace_types_lock); 2754 mutex_unlock(&trace_types_lock);
2740 2755
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f928f2a87b9b..d4dc69ddebd7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -970,6 +970,51 @@ undo:
970 return ret; 970 return ret;
971} 971}
972 972
973#ifdef CONFIG_SMP
974struct work_for_cpu {
975 struct work_struct work;
976 long (*fn)(void *);
977 void *arg;
978 long ret;
979};
980
981static void do_work_for_cpu(struct work_struct *w)
982{
983 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
984
985 wfc->ret = wfc->fn(wfc->arg);
986}
987
988/**
989 * work_on_cpu - run a function in user context on a particular cpu
990 * @cpu: the cpu to run on
991 * @fn: the function to run
992 * @arg: the function arg
993 *
994 * This will return -EINVAL in the cpu is not online, or the return value
995 * of @fn otherwise.
996 */
997long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
998{
999 struct work_for_cpu wfc;
1000
1001 INIT_WORK(&wfc.work, do_work_for_cpu);
1002 wfc.fn = fn;
1003 wfc.arg = arg;
1004 get_online_cpus();
1005 if (unlikely(!cpu_online(cpu)))
1006 wfc.ret = -EINVAL;
1007 else {
1008 schedule_work_on(cpu, &wfc.work);
1009 flush_work(&wfc.work);
1010 }
1011 put_online_cpus();
1012
1013 return wfc.ret;
1014}
1015EXPORT_SYMBOL_GPL(work_on_cpu);
1016#endif /* CONFIG_SMP */
1017
973void __init init_workqueues(void) 1018void __init init_workqueues(void)
974{ 1019{
975 cpu_populated_map = cpu_online_map; 1020 cpu_populated_map = cpu_online_map;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5f97dc25ef9c..8d03f22c6ced 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -2,6 +2,7 @@
2#include <linux/bitops.h> 2#include <linux/bitops.h>
3#include <linux/cpumask.h> 3#include <linux/cpumask.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/bootmem.h>
5 6
6int __first_cpu(const cpumask_t *srcp) 7int __first_cpu(const cpumask_t *srcp)
7{ 8{
@@ -35,3 +36,81 @@ int __any_online_cpu(const cpumask_t *mask)
35 return cpu; 36 return cpu;
36} 37}
37EXPORT_SYMBOL(__any_online_cpu); 38EXPORT_SYMBOL(__any_online_cpu);
39
40/**
41 * cpumask_next_and - get the next cpu in *src1p & *src2p
42 * @n: the cpu prior to the place to search (ie. return will be > @n)
43 * @src1p: the first cpumask pointer
44 * @src2p: the second cpumask pointer
45 *
46 * Returns >= nr_cpu_ids if no further cpus set in both.
47 */
48int cpumask_next_and(int n, const struct cpumask *src1p,
49 const struct cpumask *src2p)
50{
51 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
52 if (cpumask_test_cpu(n, src2p))
53 break;
54 return n;
55}
56EXPORT_SYMBOL(cpumask_next_and);
57
58/**
59 * cpumask_any_but - return a "random" in a cpumask, but not this one.
60 * @mask: the cpumask to search
61 * @cpu: the cpu to ignore.
62 *
63 * Often used to find any cpu but smp_processor_id() in a mask.
64 * Returns >= nr_cpu_ids if no cpus set.
65 */
66int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
67{
68 unsigned int i;
69
70 cpumask_check(cpu);
71 for_each_cpu(i, mask)
72 if (i != cpu)
73 break;
74 return i;
75}
76
77/* These are not inline because of header tangles. */
78#ifdef CONFIG_CPUMASK_OFFSTACK
79bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
80{
81 if (likely(slab_is_available()))
82 *mask = kmalloc(cpumask_size(), flags);
83 else {
84#ifdef CONFIG_DEBUG_PER_CPU_MAPS
85 printk(KERN_ERR
86 "=> alloc_cpumask_var: kmalloc not available!\n");
87 dump_stack();
88#endif
89 *mask = NULL;
90 }
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (!*mask) {
93 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
94 dump_stack();
95 }
96#endif
97 return *mask != NULL;
98}
99EXPORT_SYMBOL(alloc_cpumask_var);
100
101void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
102{
103 *mask = alloc_bootmem(cpumask_size());
104}
105
106void free_cpumask_var(cpumask_var_t mask)
107{
108 kfree(mask);
109}
110EXPORT_SYMBOL(free_cpumask_var);
111
112void __init free_bootmem_cpumask_var(cpumask_var_t mask)
113{
114 free_bootmem((unsigned long)mask, cpumask_size());
115}
116#endif
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 421aee99b84a..d143ab67be44 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -354,11 +354,26 @@ static int vma_has_reserves(struct vm_area_struct *vma)
354 return 0; 354 return 0;
355} 355}
356 356
357static void clear_gigantic_page(struct page *page,
358 unsigned long addr, unsigned long sz)
359{
360 int i;
361 struct page *p = page;
362
363 might_sleep();
364 for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
365 cond_resched();
366 clear_user_highpage(p, addr + i * PAGE_SIZE);
367 }
368}
357static void clear_huge_page(struct page *page, 369static void clear_huge_page(struct page *page,
358 unsigned long addr, unsigned long sz) 370 unsigned long addr, unsigned long sz)
359{ 371{
360 int i; 372 int i;
361 373
374 if (unlikely(sz > MAX_ORDER_NR_PAGES))
375 return clear_gigantic_page(page, addr, sz);
376
362 might_sleep(); 377 might_sleep();
363 for (i = 0; i < sz/PAGE_SIZE; i++) { 378 for (i = 0; i < sz/PAGE_SIZE; i++) {
364 cond_resched(); 379 cond_resched();
@@ -366,12 +381,32 @@ static void clear_huge_page(struct page *page,
366 } 381 }
367} 382}
368 383
384static void copy_gigantic_page(struct page *dst, struct page *src,
385 unsigned long addr, struct vm_area_struct *vma)
386{
387 int i;
388 struct hstate *h = hstate_vma(vma);
389 struct page *dst_base = dst;
390 struct page *src_base = src;
391 might_sleep();
392 for (i = 0; i < pages_per_huge_page(h); ) {
393 cond_resched();
394 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
395
396 i++;
397 dst = mem_map_next(dst, dst_base, i);
398 src = mem_map_next(src, src_base, i);
399 }
400}
369static void copy_huge_page(struct page *dst, struct page *src, 401static void copy_huge_page(struct page *dst, struct page *src,
370 unsigned long addr, struct vm_area_struct *vma) 402 unsigned long addr, struct vm_area_struct *vma)
371{ 403{
372 int i; 404 int i;
373 struct hstate *h = hstate_vma(vma); 405 struct hstate *h = hstate_vma(vma);
374 406
407 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
408 return copy_gigantic_page(dst, src, addr, vma);
409
375 might_sleep(); 410 might_sleep();
376 for (i = 0; i < pages_per_huge_page(h); i++) { 411 for (i = 0; i < pages_per_huge_page(h); i++) {
377 cond_resched(); 412 cond_resched();
@@ -456,6 +491,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
456{ 491{
457 int i; 492 int i;
458 493
494 VM_BUG_ON(h->order >= MAX_ORDER);
495
459 h->nr_huge_pages--; 496 h->nr_huge_pages--;
460 h->nr_huge_pages_node[page_to_nid(page)]--; 497 h->nr_huge_pages_node[page_to_nid(page)]--;
461 for (i = 0; i < pages_per_huge_page(h); i++) { 498 for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -970,6 +1007,14 @@ found:
970 return 1; 1007 return 1;
971} 1008}
972 1009
1010static void prep_compound_huge_page(struct page *page, int order)
1011{
1012 if (unlikely(order > (MAX_ORDER - 1)))
1013 prep_compound_gigantic_page(page, order);
1014 else
1015 prep_compound_page(page, order);
1016}
1017
973/* Put bootmem huge pages into the standard lists after mem_map is up */ 1018/* Put bootmem huge pages into the standard lists after mem_map is up */
974static void __init gather_bootmem_prealloc(void) 1019static void __init gather_bootmem_prealloc(void)
975{ 1020{
@@ -980,7 +1025,7 @@ static void __init gather_bootmem_prealloc(void)
980 struct hstate *h = m->hstate; 1025 struct hstate *h = m->hstate;
981 __ClearPageReserved(page); 1026 __ClearPageReserved(page);
982 WARN_ON(page_count(page) != 1); 1027 WARN_ON(page_count(page) != 1);
983 prep_compound_page(page, h->order); 1028 prep_compound_huge_page(page, h->order);
984 prep_new_huge_page(h, page, page_to_nid(page)); 1029 prep_new_huge_page(h, page, page_to_nid(page));
985 } 1030 }
986} 1031}
@@ -2130,7 +2175,7 @@ same_page:
2130 if (zeropage_ok) 2175 if (zeropage_ok)
2131 pages[i] = ZERO_PAGE(0); 2176 pages[i] = ZERO_PAGE(0);
2132 else 2177 else
2133 pages[i] = page + pfn_offset; 2178 pages[i] = mem_map_offset(page, pfn_offset);
2134 get_page(pages[i]); 2179 get_page(pages[i]);
2135 } 2180 }
2136 2181
diff --git a/mm/internal.h b/mm/internal.h
index e4e728bdf324..13333bc2eb68 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -17,6 +17,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling); 17 unsigned long floor, unsigned long ceiling);
18 18
19extern void prep_compound_page(struct page *page, unsigned long order); 19extern void prep_compound_page(struct page *page, unsigned long order);
20extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
20 21
21static inline void set_page_count(struct page *page, int v) 22static inline void set_page_count(struct page *page, int v)
22{ 23{
@@ -176,6 +177,34 @@ static inline void free_page_mlock(struct page *page) { }
176#endif /* CONFIG_UNEVICTABLE_LRU */ 177#endif /* CONFIG_UNEVICTABLE_LRU */
177 178
178/* 179/*
180 * Return the mem_map entry representing the 'offset' subpage within
181 * the maximally aligned gigantic page 'base'. Handle any discontiguity
182 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
183 */
184static inline struct page *mem_map_offset(struct page *base, int offset)
185{
186 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
187 return pfn_to_page(page_to_pfn(base) + offset);
188 return base + offset;
189}
190
191/*
192 * Iterator over all subpages withing the maximally aligned gigantic
193 * page 'base'. Handle any discontiguity in the mem_map.
194 */
195static inline struct page *mem_map_next(struct page *iter,
196 struct page *base, int offset)
197{
198 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
199 unsigned long pfn = page_to_pfn(base) + offset;
200 if (!pfn_valid(pfn))
201 return NULL;
202 return pfn_to_page(pfn);
203 }
204 return iter + 1;
205}
206
207/*
179 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, 208 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
180 * so all functions starting at paging_init should be marked __init 209 * so all functions starting at paging_init should be marked __init
181 * in those cases. SPARSEMEM, however, allows for memory hotplug, 210 * in those cases. SPARSEMEM, however, allows for memory hotplug,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 36f42573a335..e9493b1c1117 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -489,12 +489,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
489 int err; 489 int err;
490 struct vm_area_struct *first, *vma, *prev; 490 struct vm_area_struct *first, *vma, *prev;
491 491
492 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
493
494 err = migrate_prep();
495 if (err)
496 return ERR_PTR(err);
497 }
498 492
499 first = find_vma(mm, start); 493 first = find_vma(mm, start);
500 if (!first) 494 if (!first)
@@ -809,9 +803,13 @@ int do_migrate_pages(struct mm_struct *mm,
809 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 803 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
810{ 804{
811 int busy = 0; 805 int busy = 0;
812 int err = 0; 806 int err;
813 nodemask_t tmp; 807 nodemask_t tmp;
814 808
809 err = migrate_prep();
810 if (err)
811 return err;
812
815 down_read(&mm->mmap_sem); 813 down_read(&mm->mmap_sem);
816 814
817 err = migrate_vmas(mm, from_nodes, to_nodes, flags); 815 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
@@ -974,6 +972,12 @@ static long do_mbind(unsigned long start, unsigned long len,
974 start, start + len, mode, mode_flags, 972 start, start + len, mode, mode_flags,
975 nmask ? nodes_addr(*nmask)[0] : -1); 973 nmask ? nodes_addr(*nmask)[0] : -1);
976 974
975 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976
977 err = migrate_prep();
978 if (err)
979 return err;
980 }
977 down_write(&mm->mmap_sem); 981 down_write(&mm->mmap_sem);
978 vma = check_range(mm, start, end, nmask, 982 vma = check_range(mm, start, end, nmask,
979 flags | MPOL_MF_INVERT, &pagelist); 983 flags | MPOL_MF_INVERT, &pagelist);
diff --git a/mm/migrate.c b/mm/migrate.c
index 6602941bfab0..385db89f0c33 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -841,12 +841,12 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
841 struct page_to_node *pp; 841 struct page_to_node *pp;
842 LIST_HEAD(pagelist); 842 LIST_HEAD(pagelist);
843 843
844 migrate_prep();
844 down_read(&mm->mmap_sem); 845 down_read(&mm->mmap_sem);
845 846
846 /* 847 /*
847 * Build a list of pages to migrate 848 * Build a list of pages to migrate
848 */ 849 */
849 migrate_prep();
850 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 850 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
851 struct vm_area_struct *vma; 851 struct vm_area_struct *vma;
852 struct page *page; 852 struct page *page;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 64e5b4bcd964..a0a01902f551 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
38 * badness - calculate a numeric value for how bad this task has been 38 * badness - calculate a numeric value for how bad this task has been
39 * @p: task struct of which task we should calculate 39 * @p: task struct of which task we should calculate
40 * @uptime: current uptime in seconds 40 * @uptime: current uptime in seconds
41 * @mem: target memory controller
42 * 41 *
43 * The formula used is relatively simple and documented inline in the 42 * The formula used is relatively simple and documented inline in the
44 * function. The main rationale is that we want to select a good task 43 * function. The main rationale is that we want to select a good task
@@ -295,6 +294,8 @@ static void dump_tasks(const struct mem_cgroup *mem)
295 continue; 294 continue;
296 if (mem && !task_in_mem_cgroup(p, mem)) 295 if (mem && !task_in_mem_cgroup(p, mem))
297 continue; 296 continue;
297 if (!thread_group_leader(p))
298 continue;
298 299
299 task_lock(p); 300 task_lock(p);
300 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", 301 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d0a240fbb8bf..54069e64e3a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -263,24 +263,39 @@ void prep_compound_page(struct page *page, unsigned long order)
263{ 263{
264 int i; 264 int i;
265 int nr_pages = 1 << order; 265 int nr_pages = 1 << order;
266
267 set_compound_page_dtor(page, free_compound_page);
268 set_compound_order(page, order);
269 __SetPageHead(page);
270 for (i = 1; i < nr_pages; i++) {
271 struct page *p = page + i;
272
273 __SetPageTail(p);
274 p->first_page = page;
275 }
276}
277
278#ifdef CONFIG_HUGETLBFS
279void prep_compound_gigantic_page(struct page *page, unsigned long order)
280{
281 int i;
282 int nr_pages = 1 << order;
266 struct page *p = page + 1; 283 struct page *p = page + 1;
267 284
268 set_compound_page_dtor(page, free_compound_page); 285 set_compound_page_dtor(page, free_compound_page);
269 set_compound_order(page, order); 286 set_compound_order(page, order);
270 __SetPageHead(page); 287 __SetPageHead(page);
271 for (i = 1; i < nr_pages; i++, p++) { 288 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
272 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
273 p = pfn_to_page(page_to_pfn(page) + i);
274 __SetPageTail(p); 289 __SetPageTail(p);
275 p->first_page = page; 290 p->first_page = page;
276 } 291 }
277} 292}
293#endif
278 294
279static void destroy_compound_page(struct page *page, unsigned long order) 295static void destroy_compound_page(struct page *page, unsigned long order)
280{ 296{
281 int i; 297 int i;
282 int nr_pages = 1 << order; 298 int nr_pages = 1 << order;
283 struct page *p = page + 1;
284 299
285 if (unlikely(compound_order(page) != order)) 300 if (unlikely(compound_order(page) != order))
286 bad_page(page); 301 bad_page(page);
@@ -288,9 +303,8 @@ static void destroy_compound_page(struct page *page, unsigned long order)
288 if (unlikely(!PageHead(page))) 303 if (unlikely(!PageHead(page)))
289 bad_page(page); 304 bad_page(page);
290 __ClearPageHead(page); 305 __ClearPageHead(page);
291 for (i = 1; i < nr_pages; i++, p++) { 306 for (i = 1; i < nr_pages; i++) {
292 if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) 307 struct page *p = page + i;
293 p = pfn_to_page(page_to_pfn(page) + i);
294 308
295 if (unlikely(!PageTail(p) | 309 if (unlikely(!PageTail(p) |
296 (p->first_page != page))) 310 (p->first_page != page)))
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index b70a7fec1ff6..5e0ffd967452 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -130,10 +130,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
130 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 130 if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
131 break; 131 break;
132 } 132 }
133 if (pfn < end_pfn) 133 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
134 if ((pfn < end_pfn) || !page)
134 return -EBUSY; 135 return -EBUSY;
135 /* Check all pages are free or Marked as ISOLATED */ 136 /* Check all pages are free or Marked as ISOLATED */
136 zone = page_zone(pfn_to_page(pfn)); 137 zone = page_zone(page);
137 spin_lock_irqsave(&zone->lock, flags); 138 spin_lock_irqsave(&zone->lock, flags);
138 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); 139 ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
139 spin_unlock_irqrestore(&zone->lock, flags); 140 spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index a91b5f8fcaf6..a13ea6401ae7 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -64,7 +64,7 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
64 unsigned long pfn = pte_pfn(*pte); 64 unsigned long pfn = pte_pfn(*pte);
65 int actual_node = early_pfn_to_nid(pfn); 65 int actual_node = early_pfn_to_nid(pfn);
66 66
67 if (actual_node != node) 67 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
68 printk(KERN_WARNING "[%lx-%lx] potential offnode " 68 printk(KERN_WARNING "[%lx-%lx] potential offnode "
69 "page_structs\n", start, end - 1); 69 "page_structs\n", start, end - 1);
70} 70}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f1cc03bbf6ac..ba6b0f5f7fac 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -178,7 +178,7 @@ static int vmap_page_range(unsigned long addr, unsigned long end,
178static inline int is_vmalloc_or_module_addr(const void *x) 178static inline int is_vmalloc_or_module_addr(const void *x)
179{ 179{
180 /* 180 /*
181 * x86-64 and sparc64 put modules in a special place, 181 * ARM, x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others 182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space. 183 * just put it in the vmalloc space.
184 */ 184 */
@@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
592 592
593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
594 594
595static bool vmap_initialized __read_mostly = false;
596
595struct vmap_block_queue { 597struct vmap_block_queue {
596 spinlock_t lock; 598 spinlock_t lock;
597 struct list_head free; 599 struct list_head free;
@@ -828,6 +830,9 @@ void vm_unmap_aliases(void)
828 int cpu; 830 int cpu;
829 int flush = 0; 831 int flush = 0;
830 832
833 if (unlikely(!vmap_initialized))
834 return;
835
831 for_each_possible_cpu(cpu) { 836 for_each_possible_cpu(cpu) {
832 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 837 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
833 struct vmap_block *vb; 838 struct vmap_block *vb;
@@ -942,6 +947,8 @@ void __init vmalloc_init(void)
942 INIT_LIST_HEAD(&vbq->dirty); 947 INIT_LIST_HEAD(&vbq->dirty);
943 vbq->nr_dirty = 0; 948 vbq->nr_dirty = 0;
944 } 949 }
950
951 vmap_initialized = true;
945} 952}
946 953
947void unmap_kernel_range(unsigned long addr, unsigned long size) 954void unmap_kernel_range(unsigned long addr, unsigned long size)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 916061f681b6..68ced4bf158c 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,11 +3,20 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 17 u16 vlan_tci, int polling)
9{ 18{
10 struct net_device_stats *stats; 19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
11 20
12 if (skb_bond_should_drop(skb)) { 21 if (skb_bond_should_drop(skb)) {
13 dev_kfree_skb_any(skb); 22 dev_kfree_skb_any(skb);
@@ -15,23 +24,35 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
15 } 24 }
16 25
17 skb->vlan_tci = vlan_tci; 26 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
28
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
30}
31EXPORT_SYMBOL(__vlan_hwaccel_rx);
32
33int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats;
38
18 netif_nit_deliver(skb); 39 netif_nit_deliver(skb);
19 40
20 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 41 if (dev == NULL) {
21 if (skb->dev == NULL) { 42 kfree_skb(skb);
22 dev_kfree_skb_any(skb); 43 return -1;
23 /* Not NET_RX_DROP, this is not being dropped
24 * due to congestion. */
25 return NET_RX_SUCCESS;
26 } 44 }
27 skb->dev->last_rx = jiffies; 45
46 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
28 skb->vlan_tci = 0; 48 skb->vlan_tci = 0;
29 49
30 stats = &skb->dev->stats; 50 dev->last_rx = jiffies;
51
52 stats = &dev->stats;
31 stats->rx_packets++; 53 stats->rx_packets++;
32 stats->rx_bytes += skb->len; 54 stats->rx_bytes += skb->len;
33 55
34 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
35 switch (skb->pkt_type) { 56 switch (skb->pkt_type) {
36 case PACKET_BROADCAST: 57 case PACKET_BROADCAST:
37 break; 58 break;
@@ -43,13 +64,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
43 * This allows the VLAN to have a different MAC than the 64 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */ 65 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 66 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 skb->dev->dev_addr)) 67 dev->dev_addr))
47 skb->pkt_type = PACKET_HOST; 68 skb->pkt_type = PACKET_HOST;
48 break; 69 break;
49 }; 70 };
50 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 71 return 0;
51} 72}
52EXPORT_SYMBOL(__vlan_hwaccel_rx);
53 73
54struct net_device *vlan_dev_real_dev(const struct net_device *dev) 74struct net_device *vlan_dev_real_dev(const struct net_device *dev)
55{ 75{
diff --git a/net/9p/client.c b/net/9p/client.c
index 67717f69412e..4b529454616d 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -189,6 +189,9 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
189 printk(KERN_ERR "Couldn't grow tag array\n"); 189 printk(KERN_ERR "Couldn't grow tag array\n");
190 kfree(req->tc); 190 kfree(req->tc);
191 kfree(req->rc); 191 kfree(req->rc);
192 kfree(req->wq);
193 req->tc = req->rc = NULL;
194 req->wq = NULL;
192 return ERR_PTR(-ENOMEM); 195 return ERR_PTR(-ENOMEM);
193 } 196 }
194 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); 197 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
@@ -311,12 +314,6 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
311 r->status = REQ_STATUS_IDLE; 314 r->status = REQ_STATUS_IDLE;
312 if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool)) 315 if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool))
313 p9_idpool_put(tag, c->tagpool); 316 p9_idpool_put(tag, c->tagpool);
314
315 /* if this was a flush request we have to free response fcall */
316 if (r->rc->id == P9_RFLUSH) {
317 kfree(r->tc);
318 kfree(r->rc);
319 }
320} 317}
321 318
322/** 319/**
@@ -611,19 +608,21 @@ reterr:
611 608
612static struct p9_fid *p9_fid_create(struct p9_client *clnt) 609static struct p9_fid *p9_fid_create(struct p9_client *clnt)
613{ 610{
614 int err; 611 int ret;
615 struct p9_fid *fid; 612 struct p9_fid *fid;
613 unsigned long flags;
616 614
617 P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt); 615 P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt);
618 fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); 616 fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
619 if (!fid) 617 if (!fid)
620 return ERR_PTR(-ENOMEM); 618 return ERR_PTR(-ENOMEM);
621 619
622 fid->fid = p9_idpool_get(clnt->fidpool); 620 ret = p9_idpool_get(clnt->fidpool);
623 if (fid->fid < 0) { 621 if (fid->fid < 0) {
624 err = -ENOSPC; 622 ret = -ENOSPC;
625 goto error; 623 goto error;
626 } 624 }
625 fid->fid = ret;
627 626
628 memset(&fid->qid, 0, sizeof(struct p9_qid)); 627 memset(&fid->qid, 0, sizeof(struct p9_qid));
629 fid->mode = -1; 628 fid->mode = -1;
@@ -632,27 +631,28 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
632 fid->clnt = clnt; 631 fid->clnt = clnt;
633 fid->aux = NULL; 632 fid->aux = NULL;
634 633
635 spin_lock(&clnt->lock); 634 spin_lock_irqsave(&clnt->lock, flags);
636 list_add(&fid->flist, &clnt->fidlist); 635 list_add(&fid->flist, &clnt->fidlist);
637 spin_unlock(&clnt->lock); 636 spin_unlock_irqrestore(&clnt->lock, flags);
638 637
639 return fid; 638 return fid;
640 639
641error: 640error:
642 kfree(fid); 641 kfree(fid);
643 return ERR_PTR(err); 642 return ERR_PTR(ret);
644} 643}
645 644
646static void p9_fid_destroy(struct p9_fid *fid) 645static void p9_fid_destroy(struct p9_fid *fid)
647{ 646{
648 struct p9_client *clnt; 647 struct p9_client *clnt;
648 unsigned long flags;
649 649
650 P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid); 650 P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid);
651 clnt = fid->clnt; 651 clnt = fid->clnt;
652 p9_idpool_put(fid->fid, clnt->fidpool); 652 p9_idpool_put(fid->fid, clnt->fidpool);
653 spin_lock(&clnt->lock); 653 spin_lock_irqsave(&clnt->lock, flags);
654 list_del(&fid->flist); 654 list_del(&fid->flist);
655 spin_unlock(&clnt->lock); 655 spin_unlock_irqrestore(&clnt->lock, flags);
656 kfree(fid); 656 kfree(fid);
657} 657}
658 658
@@ -818,7 +818,9 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
818 } 818 }
819 819
820 P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n", 820 P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
821 qid.type, qid.path, qid.version); 821 qid.type,
822 (unsigned long long)qid.path,
823 qid.version);
822 824
823 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); 825 memmove(&fid->qid, &qid, sizeof(struct p9_qid));
824 826
@@ -865,7 +867,9 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname)
865 } 867 }
866 868
867 P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n", 869 P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n",
868 qid.type, qid.path, qid.version); 870 qid.type,
871 (unsigned long long)qid.path,
872 qid.version);
869 873
870 memmove(&afid->qid, &qid, sizeof(struct p9_qid)); 874 memmove(&afid->qid, &qid, sizeof(struct p9_qid));
871 p9_free_req(clnt, req); 875 p9_free_req(clnt, req);
@@ -930,7 +934,8 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
930 934
931 for (count = 0; count < nwqids; count++) 935 for (count = 0; count < nwqids; count++)
932 P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n", 936 P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n",
933 count, wqids[count].type, wqids[count].path, 937 count, wqids[count].type,
938 (unsigned long long)wqids[count].path,
934 wqids[count].version); 939 wqids[count].version);
935 940
936 if (nwname) 941 if (nwname)
@@ -980,7 +985,9 @@ int p9_client_open(struct p9_fid *fid, int mode)
980 } 985 }
981 986
982 P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n", 987 P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n",
983 qid.type, qid.path, qid.version, iounit); 988 qid.type,
989 (unsigned long long)qid.path,
990 qid.version, iounit);
984 991
985 fid->mode = mode; 992 fid->mode = mode;
986 fid->iounit = iounit; 993 fid->iounit = iounit;
@@ -1023,7 +1030,9 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1023 } 1030 }
1024 1031
1025 P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n", 1032 P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
1026 qid.type, qid.path, qid.version, iounit); 1033 qid.type,
1034 (unsigned long long)qid.path,
1035 qid.version, iounit);
1027 1036
1028 fid->mode = mode; 1037 fid->mode = mode;
1029 fid->iounit = iounit; 1038 fid->iounit = iounit;
@@ -1230,9 +1239,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1230 "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n" 1239 "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
1231 "<<< uid=%d gid=%d n_muid=%d\n", 1240 "<<< uid=%d gid=%d n_muid=%d\n",
1232 ret->size, ret->type, ret->dev, ret->qid.type, 1241 ret->size, ret->type, ret->dev, ret->qid.type,
1233 ret->qid.path, ret->qid.version, ret->mode, 1242 (unsigned long long)ret->qid.path, ret->qid.version, ret->mode,
1234 ret->atime, ret->mtime, ret->length, ret->name, 1243 ret->atime, ret->mtime, (unsigned long long)ret->length,
1235 ret->uid, ret->gid, ret->muid, ret->extension, 1244 ret->name, ret->uid, ret->gid, ret->muid, ret->extension,
1236 ret->n_uid, ret->n_gid, ret->n_muid); 1245 ret->n_uid, ret->n_gid, ret->n_muid);
1237 1246
1238free_and_error: 1247free_and_error:
@@ -1255,9 +1264,9 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1255 " name=%s uid=%s gid=%s muid=%s extension=(%s)\n" 1264 " name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
1256 " uid=%d gid=%d n_muid=%d\n", 1265 " uid=%d gid=%d n_muid=%d\n",
1257 wst->size, wst->type, wst->dev, wst->qid.type, 1266 wst->size, wst->type, wst->dev, wst->qid.type,
1258 wst->qid.path, wst->qid.version, wst->mode, 1267 (unsigned long long)wst->qid.path, wst->qid.version, wst->mode,
1259 wst->atime, wst->mtime, wst->length, wst->name, 1268 wst->atime, wst->mtime, (unsigned long long)wst->length,
1260 wst->uid, wst->gid, wst->muid, wst->extension, 1269 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1261 wst->n_uid, wst->n_gid, wst->n_muid); 1270 wst->n_uid, wst->n_gid, wst->n_muid);
1262 err = 0; 1271 err = 0;
1263 clnt = fid->clnt; 1272 clnt = fid->clnt;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 8d6cc4777aae..2f1fe5fc1228 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -45,7 +45,6 @@
45#include <net/9p/transport.h> 45#include <net/9p/transport.h>
46#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h> 47#include <rdma/rdma_cm.h>
48#include <rdma/ib_verbs.h>
49 48
50#define P9_PORT 5640 49#define P9_PORT 5640
51#define P9_RDMA_SQ_DEPTH 32 50#define P9_RDMA_SQ_DEPTH 32
@@ -589,6 +588,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
589 if (IS_ERR(rdma->cm_id)) 588 if (IS_ERR(rdma->cm_id))
590 goto error; 589 goto error;
591 590
591 /* Associate the client with the transport */
592 client->trans = rdma;
593
592 /* Resolve the server's address */ 594 /* Resolve the server's address */
593 rdma->addr.sin_family = AF_INET; 595 rdma->addr.sin_family = AF_INET;
594 rdma->addr.sin_addr.s_addr = in_aton(addr); 596 rdma->addr.sin_addr.s_addr = in_aton(addr);
@@ -669,7 +671,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
669 if (err || (rdma->state != P9_RDMA_CONNECTED)) 671 if (err || (rdma->state != P9_RDMA_CONNECTED))
670 goto error; 672 goto error;
671 673
672 client->trans = rdma;
673 client->status = Connected; 674 client->status = Connected;
674 675
675 return 0; 676 return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index d9038e328cc1..9174c77d3112 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2218,6 +2218,9 @@ int netif_receive_skb(struct sk_buff *skb)
2218 int ret = NET_RX_DROP; 2218 int ret = NET_RX_DROP;
2219 __be16 type; 2219 __be16 type;
2220 2220
2221 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2222 return NET_RX_SUCCESS;
2223
2221 /* if we've gotten here through NAPI, check netpoll */ 2224 /* if we've gotten here through NAPI, check netpoll */
2222 if (netpoll_receive_skb(skb)) 2225 if (netpoll_receive_skb(skb))
2223 return NET_RX_DROP; 2226 return NET_RX_DROP;
diff --git a/net/core/scm.c b/net/core/scm.c
index 10f5c65f6a47..ab242cc1acca 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
75 if (!fpl) 75 if (!fpl)
76 return -ENOMEM; 76 return -ENOMEM;
77 *fplp = fpl; 77 *fplp = fpl;
78 INIT_LIST_HEAD(&fpl->list);
78 fpl->count = 0; 79 fpl->count = 0;
79 } 80 }
80 fpp = &fpl->fp[fpl->count]; 81 fpp = &fpl->fp[fpl->count];
@@ -106,9 +107,25 @@ void __scm_destroy(struct scm_cookie *scm)
106 107
107 if (fpl) { 108 if (fpl) {
108 scm->fp = NULL; 109 scm->fp = NULL;
109 for (i=fpl->count-1; i>=0; i--) 110 if (current->scm_work_list) {
110 fput(fpl->fp[i]); 111 list_add_tail(&fpl->list, current->scm_work_list);
111 kfree(fpl); 112 } else {
113 LIST_HEAD(work_list);
114
115 current->scm_work_list = &work_list;
116
117 list_add(&fpl->list, &work_list);
118 while (!list_empty(&work_list)) {
119 fpl = list_first_entry(&work_list, struct scm_fp_list, list);
120
121 list_del(&fpl->list);
122 for (i=fpl->count-1; i>=0; i--)
123 fput(fpl->fp[i]);
124 kfree(fpl);
125 }
126
127 current->scm_work_list = NULL;
128 }
112 } 129 }
113} 130}
114 131
@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
284 301
285 new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); 302 new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
286 if (new_fpl) { 303 if (new_fpl) {
304 INIT_LIST_HEAD(&new_fpl->list);
287 for (i=fpl->count-1; i>=0; i--) 305 for (i=fpl->count-1; i>=0; i--)
288 get_file(fpl->fp[i]); 306 get_file(fpl->fp[i]);
289 memcpy(new_fpl, fpl, sizeof(*fpl)); 307 memcpy(new_fpl, fpl, sizeof(*fpl));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eccb7165a80c..c5aca0bb116a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1374,8 +1374,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1374 sk->sk_state == TCP_CLOSE || 1374 sk->sk_state == TCP_CLOSE ||
1375 (sk->sk_shutdown & RCV_SHUTDOWN) || 1375 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1376 !timeo || 1376 !timeo ||
1377 signal_pending(current) || 1377 signal_pending(current))
1378 (flags & MSG_PEEK))
1379 break; 1378 break;
1380 } else { 1379 } else {
1381 if (sock_flag(sk, SOCK_DONE)) 1380 if (sock_flag(sk, SOCK_DONE))
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 07735ed280d7..55dc6beab9aa 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -33,6 +33,7 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
33 x->sel.dport_mask = htons(0xffff); 33 x->sel.dport_mask = htons(0xffff);
34 x->sel.sport = xfrm_flowi_sport(fl); 34 x->sel.sport = xfrm_flowi_sport(fl);
35 x->sel.sport_mask = htons(0xffff); 35 x->sel.sport_mask = htons(0xffff);
36 x->sel.family = AF_INET;
36 x->sel.prefixlen_d = 32; 37 x->sel.prefixlen_d = 32;
37 x->sel.prefixlen_s = 32; 38 x->sel.prefixlen_s = 32;
38 x->sel.proto = fl->proto; 39 x->sel.proto = fl->proto;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index eea9542728ca..d9da5eb9dcb2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2483,8 +2483,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2483 if (!idev && dev->mtu >= IPV6_MIN_MTU) 2483 if (!idev && dev->mtu >= IPV6_MIN_MTU)
2484 idev = ipv6_add_dev(dev); 2484 idev = ipv6_add_dev(dev);
2485 2485
2486 if (idev) 2486 if (idev) {
2487 idev->if_flags |= IF_READY; 2487 idev->if_flags |= IF_READY;
2488 run_pending = 1;
2489 }
2488 } else { 2490 } else {
2489 if (!addrconf_qdisc_ok(dev)) { 2491 if (!addrconf_qdisc_ok(dev)) {
2490 /* device is still not ready. */ 2492 /* device is still not ready. */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 71e259e866a1..8b48512ebf6a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -138,6 +138,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
138 int peeked; 138 int peeked;
139 int err; 139 int err;
140 int is_udplite = IS_UDPLITE(sk); 140 int is_udplite = IS_UDPLITE(sk);
141 int is_udp4;
141 142
142 if (addr_len) 143 if (addr_len)
143 *addr_len=sizeof(struct sockaddr_in6); 144 *addr_len=sizeof(struct sockaddr_in6);
@@ -158,6 +159,8 @@ try_again:
158 else if (copied < ulen) 159 else if (copied < ulen)
159 msg->msg_flags |= MSG_TRUNC; 160 msg->msg_flags |= MSG_TRUNC;
160 161
162 is_udp4 = (skb->protocol == htons(ETH_P_IP));
163
161 /* 164 /*
162 * If checksum is needed at all, try to do it while copying the 165 * If checksum is needed at all, try to do it while copying the
163 * data. If the data is truncated, or if we only want a partial 166 * data. If the data is truncated, or if we only want a partial
@@ -180,9 +183,14 @@ try_again:
180 if (err) 183 if (err)
181 goto out_free; 184 goto out_free;
182 185
183 if (!peeked) 186 if (!peeked) {
184 UDP6_INC_STATS_USER(sock_net(sk), 187 if (is_udp4)
185 UDP_MIB_INDATAGRAMS, is_udplite); 188 UDP_INC_STATS_USER(sock_net(sk),
189 UDP_MIB_INDATAGRAMS, is_udplite);
190 else
191 UDP6_INC_STATS_USER(sock_net(sk),
192 UDP_MIB_INDATAGRAMS, is_udplite);
193 }
186 194
187 sock_recv_timestamp(msg, sk, skb); 195 sock_recv_timestamp(msg, sk, skb);
188 196
@@ -196,7 +204,7 @@ try_again:
196 sin6->sin6_flowinfo = 0; 204 sin6->sin6_flowinfo = 0;
197 sin6->sin6_scope_id = 0; 205 sin6->sin6_scope_id = 0;
198 206
199 if (skb->protocol == htons(ETH_P_IP)) 207 if (is_udp4)
200 ipv6_addr_set(&sin6->sin6_addr, 0, 0, 208 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
201 htonl(0xffff), ip_hdr(skb)->saddr); 209 htonl(0xffff), ip_hdr(skb)->saddr);
202 else { 210 else {
@@ -207,7 +215,7 @@ try_again:
207 } 215 }
208 216
209 } 217 }
210 if (skb->protocol == htons(ETH_P_IP)) { 218 if (is_udp4) {
211 if (inet->cmsg_flags) 219 if (inet->cmsg_flags)
212 ip_cmsg_recv(msg, skb); 220 ip_cmsg_recv(msg, skb);
213 } else { 221 } else {
@@ -228,8 +236,14 @@ out:
228 236
229csum_copy_err: 237csum_copy_err:
230 lock_sock(sk); 238 lock_sock(sk);
231 if (!skb_kill_datagram(sk, skb, flags)) 239 if (!skb_kill_datagram(sk, skb, flags)) {
232 UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 240 if (is_udp4)
241 UDP_INC_STATS_USER(sock_net(sk),
242 UDP_MIB_INERRORS, is_udplite);
243 else
244 UDP6_INC_STATS_USER(sock_net(sk),
245 UDP_MIB_INERRORS, is_udplite);
246 }
233 release_sock(sk); 247 release_sock(sk);
234 248
235 if (flags & MSG_DONTWAIT) 249 if (flags & MSG_DONTWAIT)
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 89884a4f23aa..60c78cfc2737 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -34,6 +34,7 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
34 x->sel.dport_mask = htons(0xffff); 34 x->sel.dport_mask = htons(0xffff);
35 x->sel.sport = xfrm_flowi_sport(fl); 35 x->sel.sport = xfrm_flowi_sport(fl);
36 x->sel.sport_mask = htons(0xffff); 36 x->sel.sport_mask = htons(0xffff);
37 x->sel.family = AF_INET6;
37 x->sel.prefixlen_d = 128; 38 x->sel.prefixlen_d = 128;
38 x->sel.prefixlen_s = 128; 39 x->sel.prefixlen_s = 128;
39 x->sel.proto = fl->proto; 40 x->sel.proto = fl->proto;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 9c06b9f86ad4..c39b6a994133 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/rculist.h> 23#include <linux/rculist.h>
24#include <linux/rtnetlink.h>
24 25
25#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_l3proto.h> 27#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -167,10 +168,12 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
167 */ 168 */
168 synchronize_rcu(); 169 synchronize_rcu();
169 170
171 rtnl_lock();
170 spin_lock_bh(&nf_conntrack_lock); 172 spin_lock_bh(&nf_conntrack_lock);
171 for_each_net(net) 173 for_each_net(net)
172 __nf_conntrack_helper_unregister(me, net); 174 __nf_conntrack_helper_unregister(me, net);
173 spin_unlock_bh(&nf_conntrack_lock); 175 spin_unlock_bh(&nf_conntrack_lock);
176 rtnl_unlock();
174} 177}
175EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 178EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
176 179
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index a59a307e685d..592d73344d46 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -22,6 +22,7 @@
22#include <linux/notifier.h> 22#include <linux/notifier.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
25 26
26#include <net/netfilter/nf_conntrack.h> 27#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -221,8 +222,10 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
221 synchronize_rcu(); 222 synchronize_rcu();
222 223
223 /* Remove all contrack entries for this protocol */ 224 /* Remove all contrack entries for this protocol */
225 rtnl_lock();
224 for_each_net(net) 226 for_each_net(net)
225 nf_ct_iterate_cleanup(net, kill_l3proto, proto); 227 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
228 rtnl_unlock();
226} 229}
227EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 230EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
228 231
@@ -333,8 +336,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
333 synchronize_rcu(); 336 synchronize_rcu();
334 337
335 /* Remove all contrack entries for this protocol */ 338 /* Remove all contrack entries for this protocol */
339 rtnl_lock();
336 for_each_net(net) 340 for_each_net(net)
337 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); 341 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
342 rtnl_unlock();
338} 343}
339EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 344EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
340 345
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index f949a482b007..25ba3bd57e66 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -603,7 +603,7 @@ static int rfkill_check_duplicity(const struct rfkill *rfkill)
603 } 603 }
604 604
605 /* 0: first switch of its kind */ 605 /* 0: first switch of its kind */
606 return test_bit(rfkill->type, seen); 606 return (test_bit(rfkill->type, seen)) ? 1 : 0;
607} 607}
608 608
609static int rfkill_add_switch(struct rfkill *rfkill) 609static int rfkill_add_switch(struct rfkill *rfkill)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4d3c6071b9a4..eb90f77bb0e2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_buff *skb)
1302 sock_wfree(skb); 1302 sock_wfree(skb);
1303} 1303}
1304 1304
1305static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1305static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1306{ 1306{
1307 int i; 1307 int i;
1308
1309 /*
1310 * Need to duplicate file references for the sake of garbage
1311 * collection. Otherwise a socket in the fps might become a
1312 * candidate for GC while the skb is not yet queued.
1313 */
1314 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1315 if (!UNIXCB(skb).fp)
1316 return -ENOMEM;
1317
1308 for (i=scm->fp->count-1; i>=0; i--) 1318 for (i=scm->fp->count-1; i>=0; i--)
1309 unix_inflight(scm->fp->fp[i]); 1319 unix_inflight(scm->fp->fp[i]);
1310 UNIXCB(skb).fp = scm->fp;
1311 skb->destructor = unix_destruct_fds; 1320 skb->destructor = unix_destruct_fds;
1312 scm->fp = NULL; 1321 return 0;
1313} 1322}
1314 1323
1315/* 1324/*
@@ -1368,8 +1377,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1368 goto out; 1377 goto out;
1369 1378
1370 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1379 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1371 if (siocb->scm->fp) 1380 if (siocb->scm->fp) {
1372 unix_attach_fds(siocb->scm, skb); 1381 err = unix_attach_fds(siocb->scm, skb);
1382 if (err)
1383 goto out_free;
1384 }
1373 unix_get_secdata(siocb->scm, skb); 1385 unix_get_secdata(siocb->scm, skb);
1374 1386
1375 skb_reset_transport_header(skb); 1387 skb_reset_transport_header(skb);
@@ -1538,8 +1550,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1538 size = min_t(int, size, skb_tailroom(skb)); 1550 size = min_t(int, size, skb_tailroom(skb));
1539 1551
1540 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1552 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1541 if (siocb->scm->fp) 1553 if (siocb->scm->fp) {
1542 unix_attach_fds(siocb->scm, skb); 1554 err = unix_attach_fds(siocb->scm, skb);
1555 if (err) {
1556 kfree_skb(skb);
1557 goto out_err;
1558 }
1559 }
1543 1560
1544 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { 1561 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1545 kfree_skb(skb); 1562 kfree_skb(skb);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a27b84f740b..6d4a9a8de5ef 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
186 */ 186 */
187 struct sock *sk = unix_get_socket(*fp++); 187 struct sock *sk = unix_get_socket(*fp++);
188 if (sk) { 188 if (sk) {
189 hit = true; 189 struct unix_sock *u = unix_sk(sk);
190 func(unix_sk(sk)); 190
191 /*
192 * Ignore non-candidates, they could
193 * have been added to the queues after
194 * starting the garbage collection
195 */
196 if (u->gc_candidate) {
197 hit = true;
198 func(u);
199 }
191 } 200 }
192 } 201 }
193 if (hit && hitlist != NULL) { 202 if (hit && hitlist != NULL) {
@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u)
249{ 258{
250 atomic_long_inc(&u->inflight); 259 atomic_long_inc(&u->inflight);
251 /* 260 /*
252 * If this is still a candidate, move it to the end of the 261 * If this still might be part of a cycle, move it to the end
253 * list, so that it's checked even if it was already passed 262 * of the list, so that it's checked even if it was already
254 * over 263 * passed over
255 */ 264 */
256 if (u->gc_candidate) 265 if (u->gc_maybe_cycle)
257 list_move_tail(&u->link, &gc_candidates); 266 list_move_tail(&u->link, &gc_candidates);
258} 267}
259 268
@@ -267,6 +276,7 @@ void unix_gc(void)
267 struct unix_sock *next; 276 struct unix_sock *next;
268 struct sk_buff_head hitlist; 277 struct sk_buff_head hitlist;
269 struct list_head cursor; 278 struct list_head cursor;
279 LIST_HEAD(not_cycle_list);
270 280
271 spin_lock(&unix_gc_lock); 281 spin_lock(&unix_gc_lock);
272 282
@@ -282,10 +292,14 @@ void unix_gc(void)
282 * 292 *
283 * Holding unix_gc_lock will protect these candidates from 293 * Holding unix_gc_lock will protect these candidates from
284 * being detached, and hence from gaining an external 294 * being detached, and hence from gaining an external
285 * reference. This also means, that since there are no 295 * reference. Since there are no possible receivers, all
286 * possible receivers, the receive queues of these sockets are 296 * buffers currently on the candidates' queues stay there
287 * static during the GC, even though the dequeue is done 297 * during the garbage collection.
288 * before the detach without atomicity guarantees. 298 *
299 * We also know that no new candidate can be added onto the
300 * receive queues. Other, non candidate sockets _can_ be
301 * added to queue, so we must make sure only to touch
302 * candidates.
289 */ 303 */
290 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 304 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
291 long total_refs; 305 long total_refs;
@@ -299,6 +313,7 @@ void unix_gc(void)
299 if (total_refs == inflight_refs) { 313 if (total_refs == inflight_refs) {
300 list_move_tail(&u->link, &gc_candidates); 314 list_move_tail(&u->link, &gc_candidates);
301 u->gc_candidate = 1; 315 u->gc_candidate = 1;
316 u->gc_maybe_cycle = 1;
302 } 317 }
303 } 318 }
304 319
@@ -325,14 +340,24 @@ void unix_gc(void)
325 list_move(&cursor, &u->link); 340 list_move(&cursor, &u->link);
326 341
327 if (atomic_long_read(&u->inflight) > 0) { 342 if (atomic_long_read(&u->inflight) > 0) {
328 list_move_tail(&u->link, &gc_inflight_list); 343 list_move_tail(&u->link, &not_cycle_list);
329 u->gc_candidate = 0; 344 u->gc_maybe_cycle = 0;
330 scan_children(&u->sk, inc_inflight_move_tail, NULL); 345 scan_children(&u->sk, inc_inflight_move_tail, NULL);
331 } 346 }
332 } 347 }
333 list_del(&cursor); 348 list_del(&cursor);
334 349
335 /* 350 /*
351 * not_cycle_list contains those sockets which do not make up a
352 * cycle. Restore these to the inflight list.
353 */
354 while (!list_empty(&not_cycle_list)) {
355 u = list_entry(not_cycle_list.next, struct unix_sock, link);
356 u->gc_candidate = 0;
357 list_move_tail(&u->link, &gc_inflight_list);
358 }
359
360 /*
336 * Now gc_candidates contains only garbage. Restore original 361 * Now gc_candidates contains only garbage. Restore original
337 * inflight counters for these as well, and remove the skbuffs 362 * inflight counters for these as well, and remove the skbuffs
338 * which are creating the cycle(s). 363 * which are creating the cycle(s).
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 25872747762c..058f04f54b90 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -315,9 +315,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
315 return; 315 return;
316 } 316 }
317 317
318 spin_lock(&xfrm_policy_gc_lock); 318 spin_lock_bh(&xfrm_policy_gc_lock);
319 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); 319 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
320 spin_unlock(&xfrm_policy_gc_lock); 320 spin_unlock_bh(&xfrm_policy_gc_lock);
321 321
322 schedule_work(&xfrm_policy_gc_work); 322 schedule_work(&xfrm_policy_gc_work);
323} 323}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4a8a1abb59ee..a278a6f3b991 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1816,7 +1816,7 @@ static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
1816 uk.family = k->family; 1816 uk.family = k->family;
1817 uk.reserved = k->reserved; 1817 uk.reserved = k->reserved;
1818 memcpy(&uk.local, &k->local, sizeof(uk.local)); 1818 memcpy(&uk.local, &k->local, sizeof(uk.local));
1819 memcpy(&uk.remote, &k->local, sizeof(uk.remote)); 1819 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
1820 1820
1821 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 1821 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
1822} 1822}
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index ba6bf5d5abf9..1264b8e2829d 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -15,15 +15,18 @@ set -e
15version=$KERNELRELEASE 15version=$KERNELRELEASE
16revision=`cat .version` 16revision=`cat .version`
17tmpdir="$objtree/debian/tmp" 17tmpdir="$objtree/debian/tmp"
18fwdir="$objtree/debian/fwtmp"
18packagename=linux-$version 19packagename=linux-$version
20fwpackagename=linux-firmware-image
19 21
20if [ "$ARCH" == "um" ] ; then 22if [ "$ARCH" == "um" ] ; then
21 packagename=user-mode-linux-$version 23 packagename=user-mode-linux-$version
22fi 24fi
23 25
24# Setup the directory structure 26# Setup the directory structure
25rm -rf "$tmpdir" 27rm -rf "$tmpdir" "$fwdir"
26mkdir -p "$tmpdir/DEBIAN" "$tmpdir/lib" "$tmpdir/boot" 28mkdir -p "$tmpdir/DEBIAN" "$tmpdir/lib" "$tmpdir/boot"
29mkdir -p "$fwdir/DEBIAN" "$fwdir/lib"
27if [ "$ARCH" == "um" ] ; then 30if [ "$ARCH" == "um" ] ; then
28 mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/share/doc/$packagename" "$tmpdir/usr/bin" 31 mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/share/doc/$packagename" "$tmpdir/usr/bin"
29fi 32fi
@@ -107,6 +110,7 @@ Standards-Version: 3.6.1
107 110
108Package: $packagename 111Package: $packagename
109Provides: kernel-image-$version, linux-image-$version 112Provides: kernel-image-$version, linux-image-$version
113Suggests: $fwpackagename
110Architecture: any 114Architecture: any
111Description: Linux kernel, version $version 115Description: Linux kernel, version $version
112 This package contains the Linux kernel, modules and corresponding other 116 This package contains the Linux kernel, modules and corresponding other
@@ -118,8 +122,24 @@ fi
118chown -R root:root "$tmpdir" 122chown -R root:root "$tmpdir"
119chmod -R go-w "$tmpdir" 123chmod -R go-w "$tmpdir"
120 124
125# Do we have firmware? Move it out of the way and build it into a package.
126if [ -e "$tmpdir/lib/firmware" ]; then
127 mv "$tmpdir/lib/firmware" "$fwdir/lib/"
128
129 cat <<EOF >> debian/control
130
131Package: $fwpackagename
132Architecture: all
133Description: Linux kernel firmware, version $version
134 This package contains firmware from the Linux kernel, version $version
135EOF
136
137 dpkg-gencontrol -isp -p$fwpackagename -P"$fwdir"
138 dpkg --build "$fwdir" ..
139fi
140
121# Perform the final magic 141# Perform the final magic
122dpkg-gencontrol -isp 142dpkg-gencontrol -isp -p$packagename
123dpkg --build "$tmpdir" .. 143dpkg --build "$tmpdir" ..
124 144
125exit 0 145exit 0
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 660beb41f767..ce0aa044e274 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -211,7 +211,7 @@ config SND_GUSCLASSIC
211 211
212config SND_GUSEXTREME 212config SND_GUSEXTREME
213 tristate "Gravis UltraSound Extreme" 213 tristate "Gravis UltraSound Extreme"
214 select SND_HWDEP 214 select SND_OPL3_LIB
215 select SND_MPU401_UART 215 select SND_MPU401_UART
216 select SND_PCM 216 select SND_PCM
217 help 217 help
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 743d77922bce..c39af986bff1 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -483,6 +483,8 @@ static void print_gpio(struct snd_info_buffer *buffer,
483 (gpio & AC_GPIO_UNSOLICITED) ? 1 : 0, 483 (gpio & AC_GPIO_UNSOLICITED) ? 1 : 0,
484 (gpio & AC_GPIO_WAKE) ? 1 : 0); 484 (gpio & AC_GPIO_WAKE) ? 1 : 0);
485 max = gpio & AC_GPIO_IO_COUNT; 485 max = gpio & AC_GPIO_IO_COUNT;
486 if (!max || max > 8)
487 return;
486 enable = snd_hda_codec_read(codec, nid, 0, 488 enable = snd_hda_codec_read(codec, nid, 0,
487 AC_VERB_GET_GPIO_MASK, 0); 489 AC_VERB_GET_GPIO_MASK, 0);
488 direction = snd_hda_codec_read(codec, nid, 0, 490 direction = snd_hda_codec_read(codec, nid, 0,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index d3fd432cb3ea..686c77491dea 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3861,6 +3861,8 @@ static const char *ad1884a_models[AD1884A_MODELS] = {
3861static struct snd_pci_quirk ad1884a_cfg_tbl[] = { 3861static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
3862 SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE), 3862 SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE),
3863 SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE), 3863 SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
3864 SND_PCI_QUIRK(0x103c, 0x30e7, "HP EliteBook 8530p", AD1884A_LAPTOP),
3865 SND_PCI_QUIRK(0x103c, 0x3614, "HP 6730s", AD1884A_LAPTOP),
3864 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD), 3866 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
3865 {} 3867 {}
3866}; 3868};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a4666c96a44f..a378c0145125 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8469,6 +8469,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8469 SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763), 8469 SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763),
8470 SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY), 8470 SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY),
8471 SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2), 8471 SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2),
8472 SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
8472 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG), 8473 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
8473 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), 8474 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
8474 SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL), 8475 SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index d723543beadd..736246f98acc 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4548,11 +4548,20 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4548{ 4548{
4549 struct hdsp *hdsp = (struct hdsp *)hw->private_data; 4549 struct hdsp *hdsp = (struct hdsp *)hw->private_data;
4550 void __user *argp = (void __user *)arg; 4550 void __user *argp = (void __user *)arg;
4551 int err;
4551 4552
4552 switch (cmd) { 4553 switch (cmd) {
4553 case SNDRV_HDSP_IOCTL_GET_PEAK_RMS: { 4554 case SNDRV_HDSP_IOCTL_GET_PEAK_RMS: {
4554 struct hdsp_peak_rms __user *peak_rms = (struct hdsp_peak_rms __user *)arg; 4555 struct hdsp_peak_rms __user *peak_rms = (struct hdsp_peak_rms __user *)arg;
4555 4556
4557 err = hdsp_check_for_iobox(hdsp);
4558 if (err < 0)
4559 return err;
4560
4561 err = hdsp_check_for_firmware(hdsp, 1);
4562 if (err < 0)
4563 return err;
4564
4556 if (!(hdsp->state & HDSP_FirmwareLoaded)) { 4565 if (!(hdsp->state & HDSP_FirmwareLoaded)) {
4557 snd_printk(KERN_ERR "Hammerfall-DSP: firmware needs to be uploaded to the card.\n"); 4566 snd_printk(KERN_ERR "Hammerfall-DSP: firmware needs to be uploaded to the card.\n");
4558 return -EINVAL; 4567 return -EINVAL;
@@ -4572,10 +4581,14 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4572 unsigned long flags; 4581 unsigned long flags;
4573 int i; 4582 int i;
4574 4583
4575 if (!(hdsp->state & HDSP_FirmwareLoaded)) { 4584 err = hdsp_check_for_iobox(hdsp);
4576 snd_printk(KERN_ERR "Hammerfall-DSP: Firmware needs to be uploaded to the card.\n"); 4585 if (err < 0)
4577 return -EINVAL; 4586 return err;
4578 } 4587
4588 err = hdsp_check_for_firmware(hdsp, 1);
4589 if (err < 0)
4590 return err;
4591
4579 spin_lock_irqsave(&hdsp->lock, flags); 4592 spin_lock_irqsave(&hdsp->lock, flags);
4580 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); 4593 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
4581 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); 4594 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
@@ -5045,6 +5058,10 @@ static int __devinit snd_hdsp_create(struct snd_card *card,
5045 /* we wait 2 seconds to let freshly inserted cardbus cards do their hardware init */ 5058 /* we wait 2 seconds to let freshly inserted cardbus cards do their hardware init */
5046 ssleep(2); 5059 ssleep(2);
5047 5060
5061 err = hdsp_check_for_iobox(hdsp);
5062 if (err < 0)
5063 return err;
5064
5048 if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) { 5065 if ((hdsp_read (hdsp, HDSP_statusRegister) & HDSP_DllError) != 0) {
5049#ifdef HDSP_FW_LOADER 5066#ifdef HDSP_FW_LOADER
5050 if ((err = hdsp_request_fw_loader(hdsp)) < 0) 5067 if ((err = hdsp_request_fw_loader(hdsp)) < 0)
@@ -5057,7 +5074,7 @@ static int __devinit snd_hdsp_create(struct snd_card *card,
5057 /* init is complete, we return */ 5074 /* init is complete, we return */
5058 return 0; 5075 return 0;
5059#endif 5076#endif
5060 /* no iobox connected, we defer initialization */ 5077 /* we defer initialization */
5061 snd_printk(KERN_INFO "Hammerfall-DSP: card initialization pending : waiting for firmware\n"); 5078 snd_printk(KERN_INFO "Hammerfall-DSP: card initialization pending : waiting for firmware\n");
5062 if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0) 5079 if ((err = snd_hdsp_create_hwdep(card, hdsp)) < 0)
5063 return err; 5080 return err;