aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-api.tmpl9
-rw-r--r--Documentation/SubmittingPatches10
-rw-r--r--Documentation/accounting/delay-accounting.txt10
-rw-r--r--Documentation/cpu-hotplug.txt12
-rw-r--r--Documentation/devices.txt8
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/initrd.txt16
-rw-r--r--Documentation/kbuild/makefiles.txt14
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/x86_64/boot-options.txt7
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/err_ev7.c8
-rw-r--r--arch/alpha/kernel/osf_sys.c4
-rw-r--r--arch/alpha/kernel/setup.c50
-rw-r--r--arch/alpha/kernel/sys_ruffian.c8
-rw-r--r--arch/alpha/kernel/time.c2
-rw-r--r--arch/arm/configs/ep93xx_defconfig15
-rw-r--r--arch/arm/kernel/head.S6
-rw-r--r--arch/arm/kernel/traps.c7
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c2
-rw-r--r--arch/arm/mach-s3c2410/mach-anubis.c49
-rw-r--r--arch/arm/mach-s3c2410/mach-osiris.c20
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/proc-syms.c8
-rw-r--r--arch/arm/mm/proc-xscale.S30
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/kernel/Makefile3
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.h2
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/kprobes.c9
-rw-r--r--arch/i386/kernel/machine_kexec.c13
-rw-r--r--arch/i386/kernel/nmi.c1
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/smpboot.c62
-rw-r--r--arch/i386/kernel/time.c2
-rw-r--r--arch/i386/kernel/traps.c29
-rw-r--r--arch/i386/kernel/vsyscall.lds.S1
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/gate.lds.S1
-rw-r--r--arch/ia64/kernel/kprobes.c9
-rw-r--r--arch/ia64/kernel/palinfo.c13
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/traps.c7
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile3
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S1
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c58
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/appldata/appldata_base.c10
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/sparc/kernel/time.c74
-rw-r--r--arch/sparc64/mm/fault.c3
-rw-r--r--arch/um/kernel/dyn.lds.S1
-rw-r--r--arch/v850/kernel/setup.c6
-rw-r--r--arch/v850/kernel/v850_ksyms.c16
-rw-r--r--arch/x86_64/defconfig9
-rw-r--r--arch/x86_64/ia32/Makefile1
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/vsyscall.lds1
-rw-r--r--arch/x86_64/kernel/machine_kexec.c13
-rw-r--r--arch/x86_64/kernel/mce.c12
-rw-r--r--arch/x86_64/kernel/mce_amd.c19
-rw-r--r--arch/x86_64/kernel/pci-calgary.c77
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86_64/kernel/tce.c4
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/kernel/traps.c24
-rw-r--r--arch/x86_64/pci/k8-bus.c10
-rw-r--r--arch/xtensa/kernel/traps.c8
-rw-r--r--block/blktrace.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/block/nbd.c19
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/char/hw_random/geode-rng.c6
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/pc8736x_gpio.c1
-rw-r--r--drivers/connector/cn_proc.c20
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide.c5
-rw-r--r--drivers/ide/pci/it821x.c11
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/adbhid.c14
-rw-r--r--drivers/macintosh/via-pmu-backlight.c68
-rw-r--r--drivers/macintosh/via-pmu.c39
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c116
-rw-r--r--drivers/net/via-velocity.c17
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c2
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/pci/search.c46
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c16
-rw-r--r--drivers/pcmcia/pcmcia_resource.c15
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/scsi/NCR53C9x.c16
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/esp.c12
-rw-r--r--drivers/scsi/libata-eh.c69
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/video/Kconfig38
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/aty128fb.c3
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_base.c18
-rw-r--r--drivers/video/aty/radeon_pm.c146
-rw-r--r--drivers/video/aty/radeonfb.h6
-rw-r--r--drivers/video/backlight/Kconfig4
-rw-r--r--drivers/video/console/mdacon.c4
-rw-r--r--drivers/video/fb_notify.c46
-rw-r--r--drivers/video/fbmem.c54
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/riva/fbdev.c7
-rw-r--r--fs/9p/conv.c6
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/coda/file.c4
-rw-r--r--fs/efs/symlink.c3
-rw-r--r--fs/ext3/inode.c19
-rw-r--r--fs/ext3/namei.c15
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/dir.c47
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/inotify_user.c2
-rw-r--r--fs/nfsd/nfsfh.c20
-rw-r--r--fs/partitions/Kconfig2
-rw-r--r--fs/ufs/namei.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c7
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c19
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_vfsops.c2
-rw-r--r--include/asm-arm/arch-iop3xx/iop331-irqs.h4
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-powerpc/backlight.h4
-rw-r--r--include/asm-powerpc/kprobes.h1
-rw-r--r--include/asm-sparc/signal.h2
-rw-r--r--include/asm-sparc64/kprobes.h1
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-sparc64/sfp-machine.h2
-rw-r--r--include/asm-x86_64/calgary.h5
-rw-r--r--include/asm-x86_64/kprobes.h1
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/linux/cn_proc.h3
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/delayacct.h6
-rw-r--r--include/linux/ext3_fs.h9
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/futex.h3
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/jiffies.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/netfilter_bridge.h2
-rw-r--r--include/linux/pmu.h3
-rw-r--r--ipc/msg.c389
-rw-r--r--kernel/delayacct.c8
-rw-r--r--kernel/futex.c121
-rw-r--r--kernel/futex_compat.c34
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/irq/manage.c28
-rw-r--r--kernel/kprobes.c1
-rw-r--r--kernel/rcupdate.c4
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c22
-rw-r--r--kernel/softirq.c22
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/taskstats.c32
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/workqueue.c58
-rw-r--r--lib/zlib_inflate/inflate.c5
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/slab.c4
-rw-r--r--net/bridge/br_netfilter.c5
-rw-r--r--net/ieee80211/Kconfig1
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c28
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ipmr.c19
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c4
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/netfilter/Kconfig4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_queue.c9
-rw-r--r--net/netfilter/xt_physdev.c15
-rw-r--r--net/netfilter/xt_pkttype.c12
-rw-r--r--scripts/Kbuild.include7
-rwxr-xr-xscripts/kernel-doc1
-rw-r--r--security/selinux/ss/policydb.c12
-rw-r--r--security/selinux/ss/services.c4
220 files changed, 1976 insertions, 1172 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 1ae4dc0fd856..f8fe882e33dc 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -59,6 +59,9 @@
59!Iinclude/linux/hrtimer.h 59!Iinclude/linux/hrtimer.h
60!Ekernel/hrtimer.c 60!Ekernel/hrtimer.c
61 </sect1> 61 </sect1>
62 <sect1><title>Workqueues and Kevents</title>
63!Ekernel/workqueue.c
64 </sect1>
62 <sect1><title>Internal Functions</title> 65 <sect1><title>Internal Functions</title>
63!Ikernel/exit.c 66!Ikernel/exit.c
64!Ikernel/signal.c 67!Ikernel/signal.c
@@ -300,7 +303,7 @@ X!Ekernel/module.c
300 </sect1> 303 </sect1>
301 304
302 <sect1><title>Resources Management</title> 305 <sect1><title>Resources Management</title>
303!Ekernel/resource.c 306!Ikernel/resource.c
304 </sect1> 307 </sect1>
305 308
306 <sect1><title>MTRR Handling</title> 309 <sect1><title>MTRR Handling</title>
@@ -312,9 +315,7 @@ X!Ekernel/module.c
312!Edrivers/pci/pci-driver.c 315!Edrivers/pci/pci-driver.c
313!Edrivers/pci/remove.c 316!Edrivers/pci/remove.c
314!Edrivers/pci/pci-acpi.c 317!Edrivers/pci/pci-acpi.c
315<!-- kerneldoc does not understand __devinit 318!Edrivers/pci/search.c
316X!Edrivers/pci/search.c
317 -->
318!Edrivers/pci/msi.c 319!Edrivers/pci/msi.c
319!Edrivers/pci/bus.c 320!Edrivers/pci/bus.c
320<!-- FIXME: Removed for now since no structured comments in source 321<!-- FIXME: Removed for now since no structured comments in source
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index c2c85bcb3d43..2cd7f02ffd0b 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -10,7 +10,9 @@ kernel, the process can sometimes be daunting if you're not familiar
10with "the system." This text is a collection of suggestions which 10with "the system." This text is a collection of suggestions which
11can greatly increase the chances of your change being accepted. 11can greatly increase the chances of your change being accepted.
12 12
13If you are submitting a driver, also read Documentation/SubmittingDrivers. 13Read Documentation/SubmitChecklist for a list of items to check
14before submitting code. If you are submitting a driver, also read
15Documentation/SubmittingDrivers.
14 16
15 17
16 18
@@ -74,9 +76,6 @@ There are a number of scripts which can aid in this:
74Quilt: 76Quilt:
75http://savannah.nongnu.org/projects/quilt 77http://savannah.nongnu.org/projects/quilt
76 78
77Randy Dunlap's patch scripts:
78http://www.xenotime.net/linux/scripts/patching-scripts-002.tar.gz
79
80Andrew Morton's patch scripts: 79Andrew Morton's patch scripts:
81http://www.zip.com.au/~akpm/linux/patches/ 80http://www.zip.com.au/~akpm/linux/patches/
82Instead of these scripts, quilt is the recommended patch management 81Instead of these scripts, quilt is the recommended patch management
@@ -484,7 +483,7 @@ Greg Kroah-Hartman "How to piss off a kernel subsystem maintainer".
484 <http://www.kroah.com/log/2005/10/19/> 483 <http://www.kroah.com/log/2005/10/19/>
485 <http://www.kroah.com/log/2006/01/11/> 484 <http://www.kroah.com/log/2006/01/11/>
486 485
487NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!. 486NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
488 <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2> 487 <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2>
489 488
490Kernel Documentation/CodingStyle 489Kernel Documentation/CodingStyle
@@ -493,4 +492,3 @@ Kernel Documentation/CodingStyle
493Linus Torvald's mail on the canonical patch format: 492Linus Torvald's mail on the canonical patch format:
494 <http://lkml.org/lkml/2005/4/7/183> 493 <http://lkml.org/lkml/2005/4/7/183>
495-- 494--
496Last updated on 17 Nov 2005.
diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt
index be215e58423b..1443cd71d263 100644
--- a/Documentation/accounting/delay-accounting.txt
+++ b/Documentation/accounting/delay-accounting.txt
@@ -64,11 +64,13 @@ Compile the kernel with
64 CONFIG_TASK_DELAY_ACCT=y 64 CONFIG_TASK_DELAY_ACCT=y
65 CONFIG_TASKSTATS=y 65 CONFIG_TASKSTATS=y
66 66
67Enable the accounting at boot time by adding 67Delay accounting is enabled by default at boot up.
68the following to the kernel boot options 68To disable, add
69 delayacct 69 nodelayacct
70to the kernel boot options. The rest of the instructions
71below assume this has not been done.
70 72
71and after the system has booted up, use a utility 73After the system has booted up, use a utility
72similar to getdelays.c to access the delays 74similar to getdelays.c to access the delays
73seen by a given task or a task group (tgid). 75seen by a given task or a task group (tgid).
74The utility also allows a given command to be 76The utility also allows a given command to be
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 1bcf69996c9d..bc107cb157a8 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -251,16 +251,24 @@ A: This is what you would need in your kernel code to receive notifications.
251 return NOTIFY_OK; 251 return NOTIFY_OK;
252 } 252 }
253 253
254 static struct notifier_block foobar_cpu_notifer = 254 static struct notifier_block __cpuinitdata foobar_cpu_notifer =
255 { 255 {
256 .notifier_call = foobar_cpu_callback, 256 .notifier_call = foobar_cpu_callback,
257 }; 257 };
258 258
259You need to call register_cpu_notifier() from your init function.
260Init functions could be of two types:
2611. early init (init function called when only the boot processor is online).
2622. late init (init function called _after_ all the CPUs are online).
259 263
260In your init function, 264For the first case, you should add the following to your init function
261 265
262 register_cpu_notifier(&foobar_cpu_notifier); 266 register_cpu_notifier(&foobar_cpu_notifier);
263 267
268For the second case, you should add the following to your init function
269
270 register_hotcpu_notifier(&foobar_cpu_notifier);
271
264You can fail PREPARE notifiers if something doesn't work to prepare resources. 272You can fail PREPARE notifiers if something doesn't work to prepare resources.
265This will stop the activity and send a following CANCELED event back. 273This will stop the activity and send a following CANCELED event back.
266 274
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 4aaf68fafebe..66c725f530f3 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2565,10 +2565,10 @@ Your cooperation is appreciated.
2565 243 = /dev/usb/dabusb3 Fourth dabusb device 2565 243 = /dev/usb/dabusb3 Fourth dabusb device
2566 2566
2567180 block USB block devices 2567180 block USB block devices
2568 0 = /dev/uba First USB block device 2568 0 = /dev/uba First USB block device
2569 8 = /dev/ubb Second USB block device 2569 8 = /dev/ubb Second USB block device
2570 16 = /dev/ubc Thrid USB block device 2570 16 = /dev/ubc Third USB block device
2571 ... 2571 ...
2572 2572
2573181 char Conrad Electronic parallel port radio clocks 2573181 char Conrad Electronic parallel port radio clocks
2574 0 = /dev/pcfclock0 First Conrad radio clock 2574 0 = /dev/pcfclock0 First Conrad radio clock
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9d3a0775a11d..87851efb0228 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel
258Who: Jean Delvare <khali@linux-fr.org> 258Who: Jean Delvare <khali@linux-fr.org>
259 259
260--------------------------- 260---------------------------
261
262What: Bridge netfilter deferred IPv4/IPv6 output hook calling
263When: January 2007
264Why: The deferred output hooks are a layering violation causing unusual
265 and broken behaviour on bridge devices. Examples of things they
266 break include QoS classifation using the MARK or CLASSIFY targets,
267 the IPsec policy match and connection tracking with VLANs on a
268 bridge. Their only use is to enable bridge output port filtering
269 within iptables with the physdev match, which can also be done by
270 combining iptables and ebtables using netfilter marks. Until it
271 will get removed the hook deferral is disabled by default and is
272 only enabled when needed.
273
274Who: Patrick McHardy <kaber@trash.net>
275
276---------------------------
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index b1b6440237a6..15f1b35deb34 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -72,6 +72,22 @@ initrd adds the following new options:
72 initrd is mounted as root, and the normal boot procedure is followed, 72 initrd is mounted as root, and the normal boot procedure is followed,
73 with the RAM disk still mounted as root. 73 with the RAM disk still mounted as root.
74 74
75Compressed cpio images
76----------------------
77
78Recent kernels have support for populating a ramdisk from a compressed cpio
79archive, on such systems, the creation of a ramdisk image doesn't need to
80involve special block devices or loopbacks, you merely create a directory on
81disk with the desired initrd content, cd to that directory, and run (as an
82example):
83
84find . | cpio --quiet -c -o | gzip -9 -n > /boot/imagefile.img
85
86Examining the contents of an existing image file is just as simple:
87
88mkdir /tmp/imagefile
89cd /tmp/imagefile
90gzip -cd /boot/imagefile.img | cpio -imd --quiet
75 91
76Installation 92Installation
77------------ 93------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 14ef3868a328..0706699c9da9 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -407,6 +407,20 @@ more details, with real examples.
407 The second argument is optional, and if supplied will be used 407 The second argument is optional, and if supplied will be used
408 if first argument is not supported. 408 if first argument is not supported.
409 409
410 ld-option
411 ld-option is used to check if $(CC) when used to link object files
412 supports the given option. An optional second option may be
413 specified if first option are not supported.
414
415 Example:
416 #arch/i386/kernel/Makefile
417 vsyscall-flags += $(call ld-option, -Wl$(comma)--hash-style=sysv)
418
419 In the above example vsyscall-flags will be assigned the option
420 -Wl$(comma)--hash-style=sysv if it is supported by $(CC).
421 The second argument is optional, and if supplied will be used
422 if first argument is not supported.
423
410 cc-option 424 cc-option
411 cc-option is used to check if $(CC) support a given option, and not 425 cc-option is used to check if $(CC) support a given option, and not
412 supported to use an optional second option. 426 supported to use an optional second option.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e11f7728ec6f..b50595a0550f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -448,8 +448,6 @@ running once the system is up.
448 Format: <area>[,<node>] 448 Format: <area>[,<node>]
449 See also Documentation/networking/decnet.txt. 449 See also Documentation/networking/decnet.txt.
450 450
451 delayacct [KNL] Enable per-task delay accounting
452
453 dhash_entries= [KNL] 451 dhash_entries= [KNL]
454 Set number of hash buckets for dentry cache. 452 Set number of hash buckets for dentry cache.
455 453
@@ -1031,6 +1029,8 @@ running once the system is up.
1031 1029
1032 nocache [ARM] 1030 nocache [ARM]
1033 1031
1032 nodelayacct [KNL] Disable per-task delay accounting
1033
1034 nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects. 1034 nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
1035 1035
1036 noexec [IA-64] 1036 noexec [IA-64]
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 6887d44d2661..6da24e7a56cb 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -238,6 +238,13 @@ Debugging
238 pagefaulttrace Dump all page faults. Only useful for extreme debugging 238 pagefaulttrace Dump all page faults. Only useful for extreme debugging
239 and will create a lot of output. 239 and will create a lot of output.
240 240
241 call_trace=[old|both|newfallback|new]
242 old: use old inexact backtracer
243 new: use new exact dwarf2 unwinder
244 both: print entries from both
245 newfallback: use new unwinder but fall back to old if it gets
246 stuck (default)
247
241Misc 248Misc
242 249
243 noreplacement Don't replace instructions with more appropriate ones 250 noreplacement Don't replace instructions with more appropriate ones
diff --git a/MAINTAINERS b/MAINTAINERS
index b2afc7ae965b..32aa30d1504a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -968,6 +968,10 @@ P: Andrey V. Savochkin
968M: saw@saw.sw.com.sg 968M: saw@saw.sw.com.sg
969S: Maintained 969S: Maintained
970 970
971EFS FILESYSTEM
972W: http://aeschi.ch.eu.org/efs/
973S: Orphan
974
971EMU10K1 SOUND DRIVER 975EMU10K1 SOUND DRIVER
972P: James Courtier-Dutton 976P: James Courtier-Dutton
973M: James@superbug.demon.co.uk 977M: James@superbug.demon.co.uk
@@ -1598,7 +1602,7 @@ W: http://jfs.sourceforge.net/
1598T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git 1602T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
1599S: Supported 1603S: Supported
1600 1604
1601JOURNALLING LAYER FOR BLOCK DEVICS (JBD) 1605JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
1602P: Stephen Tweedie, Andrew Morton 1606P: Stephen Tweedie, Andrew Morton
1603M: sct@redhat.com, akpm@osdl.org 1607M: sct@redhat.com, akpm@osdl.org
1604L: ext2-devel@lists.sourceforge.net 1608L: ext2-devel@lists.sourceforge.net
@@ -1642,9 +1646,8 @@ S: Maintained
1642 1646
1643KERNEL JANITORS 1647KERNEL JANITORS
1644P: Several 1648P: Several
1645L: kernel-janitors@osdl.org 1649L: kernel-janitors@lists.osdl.org
1646W: http://www.kerneljanitors.org/ 1650W: http://www.kerneljanitors.org/
1647W: http://sf.net/projects/kernel-janitor/
1648S: Maintained 1651S: Maintained
1649 1652
1650KERNEL NFSD 1653KERNEL NFSD
@@ -1882,6 +1885,12 @@ S: linux-scsi@vger.kernel.org
1882W: http://megaraid.lsilogic.com 1885W: http://megaraid.lsilogic.com
1883S: Maintained 1886S: Maintained
1884 1887
1888MEMORY MANAGEMENT
1889L: linux-mm@kvack.org
1890L: linux-kernel@vger.kernel.org
1891W: http://www.linux-mm.org
1892S: Maintained
1893
1885MEMORY TECHNOLOGY DEVICES (MTD) 1894MEMORY TECHNOLOGY DEVICES (MTD)
1886P: David Woodhouse 1895P: David Woodhouse
1887M: dwmw2@infradead.org 1896M: dwmw2@infradead.org
diff --git a/Makefile b/Makefile
index 1dd58d35d72c..c9b7dbb64c71 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 18 3SUBLEVEL = 18
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c
index bf52ba691957..fed6b3d1b803 100644
--- a/arch/alpha/kernel/err_ev7.c
+++ b/arch/alpha/kernel/err_ev7.c
@@ -274,16 +274,14 @@ ev7_process_pal_subpacket(struct el_subpacket *header)
274struct el_subpacket_handler ev7_pal_subpacket_handler = 274struct el_subpacket_handler ev7_pal_subpacket_handler =
275 SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket); 275 SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
276 276
277void 277void
278ev7_register_error_handlers(void) 278ev7_register_error_handlers(void)
279{ 279{
280 int i; 280 int i;
281 281
282 for(i = 0; 282 for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
283 i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
284 i++) {
285 cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]); 283 cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
286 } 284
287 cdl_register_subpacket_handler(&ev7_pal_subpacket_handler); 285 cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
288} 286}
289 287
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index e15dcf4f3dcd..73c7622b5297 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -623,12 +623,12 @@ osf_sysinfo(int command, char __user *buf, long count)
623 long len, err = -EINVAL; 623 long len, err = -EINVAL;
624 624
625 offset = command-1; 625 offset = command-1;
626 if (offset >= sizeof(sysinfo_table)/sizeof(char *)) { 626 if (offset >= ARRAY_SIZE(sysinfo_table)) {
627 /* Digital UNIX has a few unpublished interfaces here */ 627 /* Digital UNIX has a few unpublished interfaces here */
628 printk("sysinfo(%d)", command); 628 printk("sysinfo(%d)", command);
629 goto out; 629 goto out;
630 } 630 }
631 631
632 down_read(&uts_sem); 632 down_read(&uts_sem);
633 res = sysinfo_table[offset]; 633 res = sysinfo_table[offset];
634 len = strlen(res)+1; 634 len = strlen(res)+1;
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 2cb9c4380113..fd4a8fa0c93d 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -114,8 +114,6 @@ struct alpha_machine_vector alpha_mv;
114int alpha_using_srm; 114int alpha_using_srm;
115#endif 115#endif
116 116
117#define N(a) (sizeof(a)/sizeof(a[0]))
118
119static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, 117static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
120 unsigned long); 118 unsigned long);
121static struct alpha_machine_vector *get_sysvec_byname(const char *); 119static struct alpha_machine_vector *get_sysvec_byname(const char *);
@@ -240,7 +238,7 @@ reserve_std_resources(void)
240 standard_io_resources[0].start = RTC_PORT(0); 238 standard_io_resources[0].start = RTC_PORT(0);
241 standard_io_resources[0].end = RTC_PORT(0) + 0x10; 239 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
242 240
243 for (i = 0; i < N(standard_io_resources); ++i) 241 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
244 request_resource(io, standard_io_resources+i); 242 request_resource(io, standard_io_resources+i);
245} 243}
246 244
@@ -918,13 +916,13 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
918 916
919 /* Search the system tables first... */ 917 /* Search the system tables first... */
920 vec = NULL; 918 vec = NULL;
921 if (type < N(systype_vecs)) { 919 if (type < ARRAY_SIZE(systype_vecs)) {
922 vec = systype_vecs[type]; 920 vec = systype_vecs[type];
923 } else if ((type > ST_API_BIAS) && 921 } else if ((type > ST_API_BIAS) &&
924 (type - ST_API_BIAS) < N(api_vecs)) { 922 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
925 vec = api_vecs[type - ST_API_BIAS]; 923 vec = api_vecs[type - ST_API_BIAS];
926 } else if ((type > ST_UNOFFICIAL_BIAS) && 924 } else if ((type > ST_UNOFFICIAL_BIAS) &&
927 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) { 925 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
928 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; 926 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
929 } 927 }
930 928
@@ -938,11 +936,11 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
938 936
939 switch (type) { 937 switch (type) {
940 case ST_DEC_ALCOR: 938 case ST_DEC_ALCOR:
941 if (member < N(alcor_indices)) 939 if (member < ARRAY_SIZE(alcor_indices))
942 vec = alcor_vecs[alcor_indices[member]]; 940 vec = alcor_vecs[alcor_indices[member]];
943 break; 941 break;
944 case ST_DEC_EB164: 942 case ST_DEC_EB164:
945 if (member < N(eb164_indices)) 943 if (member < ARRAY_SIZE(eb164_indices))
946 vec = eb164_vecs[eb164_indices[member]]; 944 vec = eb164_vecs[eb164_indices[member]];
947 /* PC164 may show as EB164 variation with EV56 CPU, 945 /* PC164 may show as EB164 variation with EV56 CPU,
948 but, since no true EB164 had anything but EV5... */ 946 but, since no true EB164 had anything but EV5... */
@@ -950,24 +948,24 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
950 vec = &pc164_mv; 948 vec = &pc164_mv;
951 break; 949 break;
952 case ST_DEC_EB64P: 950 case ST_DEC_EB64P:
953 if (member < N(eb64p_indices)) 951 if (member < ARRAY_SIZE(eb64p_indices))
954 vec = eb64p_vecs[eb64p_indices[member]]; 952 vec = eb64p_vecs[eb64p_indices[member]];
955 break; 953 break;
956 case ST_DEC_EB66: 954 case ST_DEC_EB66:
957 if (member < N(eb66_indices)) 955 if (member < ARRAY_SIZE(eb66_indices))
958 vec = eb66_vecs[eb66_indices[member]]; 956 vec = eb66_vecs[eb66_indices[member]];
959 break; 957 break;
960 case ST_DEC_MARVEL: 958 case ST_DEC_MARVEL:
961 if (member < N(marvel_indices)) 959 if (member < ARRAY_SIZE(marvel_indices))
962 vec = marvel_vecs[marvel_indices[member]]; 960 vec = marvel_vecs[marvel_indices[member]];
963 break; 961 break;
964 case ST_DEC_TITAN: 962 case ST_DEC_TITAN:
965 vec = titan_vecs[0]; /* default */ 963 vec = titan_vecs[0]; /* default */
966 if (member < N(titan_indices)) 964 if (member < ARRAY_SIZE(titan_indices))
967 vec = titan_vecs[titan_indices[member]]; 965 vec = titan_vecs[titan_indices[member]];
968 break; 966 break;
969 case ST_DEC_TSUNAMI: 967 case ST_DEC_TSUNAMI:
970 if (member < N(tsunami_indices)) 968 if (member < ARRAY_SIZE(tsunami_indices))
971 vec = tsunami_vecs[tsunami_indices[member]]; 969 vec = tsunami_vecs[tsunami_indices[member]];
972 break; 970 break;
973 case ST_DEC_1000: 971 case ST_DEC_1000:
@@ -1039,7 +1037,7 @@ get_sysvec_byname(const char *name)
1039 1037
1040 size_t i; 1038 size_t i;
1041 1039
1042 for (i = 0; i < N(all_vecs); ++i) { 1040 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
1043 struct alpha_machine_vector *mv = all_vecs[i]; 1041 struct alpha_machine_vector *mv = all_vecs[i];
1044 if (strcasecmp(mv->vector_name, name) == 0) 1042 if (strcasecmp(mv->vector_name, name) == 0)
1045 return mv; 1043 return mv;
@@ -1055,13 +1053,13 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1055 1053
1056 /* If not in the tables, make it UNKNOWN, 1054 /* If not in the tables, make it UNKNOWN,
1057 else set type name to family */ 1055 else set type name to family */
1058 if (type < N(systype_names)) { 1056 if (type < ARRAY_SIZE(systype_names)) {
1059 *type_name = systype_names[type]; 1057 *type_name = systype_names[type];
1060 } else if ((type > ST_API_BIAS) && 1058 } else if ((type > ST_API_BIAS) &&
1061 (type - ST_API_BIAS) < N(api_names)) { 1059 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1062 *type_name = api_names[type - ST_API_BIAS]; 1060 *type_name = api_names[type - ST_API_BIAS];
1063 } else if ((type > ST_UNOFFICIAL_BIAS) && 1061 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1064 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) { 1062 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1065 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; 1063 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1066 } else { 1064 } else {
1067 *type_name = sys_unknown; 1065 *type_name = sys_unknown;
@@ -1083,7 +1081,7 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1083 default: /* default to variation "0" for now */ 1081 default: /* default to variation "0" for now */
1084 break; 1082 break;
1085 case ST_DEC_EB164: 1083 case ST_DEC_EB164:
1086 if (member < N(eb164_indices)) 1084 if (member < ARRAY_SIZE(eb164_indices))
1087 *variation_name = eb164_names[eb164_indices[member]]; 1085 *variation_name = eb164_names[eb164_indices[member]];
1088 /* PC164 may show as EB164 variation, but with EV56 CPU, 1086 /* PC164 may show as EB164 variation, but with EV56 CPU,
1089 so, since no true EB164 had anything but EV5... */ 1087 so, since no true EB164 had anything but EV5... */
@@ -1091,32 +1089,32 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1091 *variation_name = eb164_names[1]; /* make it PC164 */ 1089 *variation_name = eb164_names[1]; /* make it PC164 */
1092 break; 1090 break;
1093 case ST_DEC_ALCOR: 1091 case ST_DEC_ALCOR:
1094 if (member < N(alcor_indices)) 1092 if (member < ARRAY_SIZE(alcor_indices))
1095 *variation_name = alcor_names[alcor_indices[member]]; 1093 *variation_name = alcor_names[alcor_indices[member]];
1096 break; 1094 break;
1097 case ST_DEC_EB64P: 1095 case ST_DEC_EB64P:
1098 if (member < N(eb64p_indices)) 1096 if (member < ARRAY_SIZE(eb64p_indices))
1099 *variation_name = eb64p_names[eb64p_indices[member]]; 1097 *variation_name = eb64p_names[eb64p_indices[member]];
1100 break; 1098 break;
1101 case ST_DEC_EB66: 1099 case ST_DEC_EB66:
1102 if (member < N(eb66_indices)) 1100 if (member < ARRAY_SIZE(eb66_indices))
1103 *variation_name = eb66_names[eb66_indices[member]]; 1101 *variation_name = eb66_names[eb66_indices[member]];
1104 break; 1102 break;
1105 case ST_DEC_MARVEL: 1103 case ST_DEC_MARVEL:
1106 if (member < N(marvel_indices)) 1104 if (member < ARRAY_SIZE(marvel_indices))
1107 *variation_name = marvel_names[marvel_indices[member]]; 1105 *variation_name = marvel_names[marvel_indices[member]];
1108 break; 1106 break;
1109 case ST_DEC_RAWHIDE: 1107 case ST_DEC_RAWHIDE:
1110 if (member < N(rawhide_indices)) 1108 if (member < ARRAY_SIZE(rawhide_indices))
1111 *variation_name = rawhide_names[rawhide_indices[member]]; 1109 *variation_name = rawhide_names[rawhide_indices[member]];
1112 break; 1110 break;
1113 case ST_DEC_TITAN: 1111 case ST_DEC_TITAN:
1114 *variation_name = titan_names[0]; /* default */ 1112 *variation_name = titan_names[0]; /* default */
1115 if (member < N(titan_indices)) 1113 if (member < ARRAY_SIZE(titan_indices))
1116 *variation_name = titan_names[titan_indices[member]]; 1114 *variation_name = titan_names[titan_indices[member]];
1117 break; 1115 break;
1118 case ST_DEC_TSUNAMI: 1116 case ST_DEC_TSUNAMI:
1119 if (member < N(tsunami_indices)) 1117 if (member < ARRAY_SIZE(tsunami_indices))
1120 *variation_name = tsunami_names[tsunami_indices[member]]; 1118 *variation_name = tsunami_names[tsunami_indices[member]];
1121 break; 1119 break;
1122 } 1120 }
@@ -1211,7 +1209,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1211 1209
1212 cpu_index = (unsigned) (cpu->type - 1); 1210 cpu_index = (unsigned) (cpu->type - 1);
1213 cpu_name = "Unknown"; 1211 cpu_name = "Unknown";
1214 if (cpu_index < N(cpu_names)) 1212 if (cpu_index < ARRAY_SIZE(cpu_names))
1215 cpu_name = cpu_names[cpu_index]; 1213 cpu_name = cpu_names[cpu_index];
1216 1214
1217 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, 1215 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index 78c30decf3ff..5b99cf3cd69c 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -182,16 +182,16 @@ static unsigned long __init
182ruffian_get_bank_size(unsigned long offset) 182ruffian_get_bank_size(unsigned long offset)
183{ 183{
184 unsigned long bank_addr, bank, ret = 0; 184 unsigned long bank_addr, bank, ret = 0;
185 185
186 /* Valid offsets are: 0x800, 0x840 and 0x880 186 /* Valid offsets are: 0x800, 0x840 and 0x880
187 since Ruffian only uses three banks. */ 187 since Ruffian only uses three banks. */
188 bank_addr = (unsigned long)PYXIS_MCR + offset; 188 bank_addr = (unsigned long)PYXIS_MCR + offset;
189 bank = *(vulp)bank_addr; 189 bank = *(vulp)bank_addr;
190 190
191 /* Check BANK_ENABLE */ 191 /* Check BANK_ENABLE */
192 if (bank & 0x01) { 192 if (bank & 0x01) {
193 static unsigned long size[] __initdata = { 193 static unsigned long size[] __initdata = {
194 0x40000000UL, /* 0x00, 1G */ 194 0x40000000UL, /* 0x00, 1G */
195 0x20000000UL, /* 0x02, 512M */ 195 0x20000000UL, /* 0x02, 512M */
196 0x10000000UL, /* 0x04, 256M */ 196 0x10000000UL, /* 0x04, 256M */
197 0x08000000UL, /* 0x06, 128M */ 197 0x08000000UL, /* 0x06, 128M */
@@ -203,7 +203,7 @@ ruffian_get_bank_size(unsigned long offset)
203 }; 203 };
204 204
205 bank = (bank & 0x1e) >> 1; 205 bank = (bank & 0x1e) >> 1;
206 if (bank < sizeof(size)/sizeof(*size)) 206 if (bank < ARRAY_SIZE(size))
207 ret = size[bank]; 207 ret = size[bank];
208 } 208 }
209 209
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 50eccde2dcd8..b191cc759737 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -233,7 +233,7 @@ validate_cc_value(unsigned long cc)
233 index = cpu->type & 0xffffffff; 233 index = cpu->type & 0xffffffff;
234 234
235 /* If index out of bounds, no way to validate. */ 235 /* If index out of bounds, no way to validate. */
236 if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0])) 236 if (index >= ARRAY_SIZE(cpu_hz))
237 return cc; 237 return cc;
238 238
239 /* If index contains no data, no way to validate. */ 239 /* If index contains no data, no way to validate. */
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index c0de6fcd488a..2948b4589a8b 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc1 3# Linux kernel version: 2.6.18-rc1-git9
4# Sun Jul 9 15:21:30 2006 4# Sat Jul 15 15:08:10 2006
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -30,6 +30,7 @@ CONFIG_SWAP=y
30CONFIG_SYSVIPC=y 30CONFIG_SYSVIPC=y
31# CONFIG_POSIX_MQUEUE is not set 31# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 32# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set
33CONFIG_SYSCTL=y 34CONFIG_SYSCTL=y
34# CONFIG_AUDIT is not set 35# CONFIG_AUDIT is not set
35CONFIG_IKCONFIG=y 36CONFIG_IKCONFIG=y
@@ -749,7 +750,7 @@ CONFIG_VIDEO_V4L2=y
749# USB support 750# USB support
750# 751#
751CONFIG_USB_ARCH_HAS_HCD=y 752CONFIG_USB_ARCH_HAS_HCD=y
752# CONFIG_USB_ARCH_HAS_OHCI is not set 753CONFIG_USB_ARCH_HAS_OHCI=y
753# CONFIG_USB_ARCH_HAS_EHCI is not set 754# CONFIG_USB_ARCH_HAS_EHCI is not set
754CONFIG_USB=y 755CONFIG_USB=y
755CONFIG_USB_DEBUG=y 756CONFIG_USB_DEBUG=y
@@ -766,6 +767,9 @@ CONFIG_USB_DYNAMIC_MINORS=y
766# USB Host Controller Drivers 767# USB Host Controller Drivers
767# 768#
768# CONFIG_USB_ISP116X_HCD is not set 769# CONFIG_USB_ISP116X_HCD is not set
770CONFIG_USB_OHCI_HCD=y
771# CONFIG_USB_OHCI_BIG_ENDIAN is not set
772CONFIG_USB_OHCI_LITTLE_ENDIAN=y
769# CONFIG_USB_SL811_HCD is not set 773# CONFIG_USB_SL811_HCD is not set
770 774
771# 775#
@@ -855,6 +859,7 @@ CONFIG_USB_SERIAL_CONSOLE=y
855CONFIG_USB_SERIAL_PL2303=y 859CONFIG_USB_SERIAL_PL2303=y
856# CONFIG_USB_SERIAL_HP4X is not set 860# CONFIG_USB_SERIAL_HP4X is not set
857# CONFIG_USB_SERIAL_SAFE is not set 861# CONFIG_USB_SERIAL_SAFE is not set
862# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
858# CONFIG_USB_SERIAL_TI is not set 863# CONFIG_USB_SERIAL_TI is not set
859# CONFIG_USB_SERIAL_CYBERJACK is not set 864# CONFIG_USB_SERIAL_CYBERJACK is not set
860# CONFIG_USB_SERIAL_XIRCOM is not set 865# CONFIG_USB_SERIAL_XIRCOM is not set
@@ -871,7 +876,7 @@ CONFIG_USB_SERIAL_PL2303=y
871# CONFIG_USB_LEGOTOWER is not set 876# CONFIG_USB_LEGOTOWER is not set
872# CONFIG_USB_LCD is not set 877# CONFIG_USB_LCD is not set
873# CONFIG_USB_LED is not set 878# CONFIG_USB_LED is not set
874# CONFIG_USB_CY7C63 is not set 879# CONFIG_USB_CYPRESS_CY7C63 is not set
875# CONFIG_USB_CYTHERM is not set 880# CONFIG_USB_CYTHERM is not set
876# CONFIG_USB_PHIDGETKIT is not set 881# CONFIG_USB_PHIDGETKIT is not set
877# CONFIG_USB_PHIDGETSERVO is not set 882# CONFIG_USB_PHIDGETSERVO is not set
@@ -916,6 +921,7 @@ CONFIG_RTC_INTF_DEV=y
916# CONFIG_RTC_DRV_X1205 is not set 921# CONFIG_RTC_DRV_X1205 is not set
917# CONFIG_RTC_DRV_DS1307 is not set 922# CONFIG_RTC_DRV_DS1307 is not set
918# CONFIG_RTC_DRV_DS1553 is not set 923# CONFIG_RTC_DRV_DS1553 is not set
924# CONFIG_RTC_DRV_ISL1208 is not set
919# CONFIG_RTC_DRV_DS1672 is not set 925# CONFIG_RTC_DRV_DS1672 is not set
920# CONFIG_RTC_DRV_DS1742 is not set 926# CONFIG_RTC_DRV_DS1742 is not set
921# CONFIG_RTC_DRV_PCF8563 is not set 927# CONFIG_RTC_DRV_PCF8563 is not set
@@ -1023,7 +1029,6 @@ CONFIG_SUNRPC=y
1023# CONFIG_RPCSEC_GSS_SPKM3 is not set 1029# CONFIG_RPCSEC_GSS_SPKM3 is not set
1024# CONFIG_SMB_FS is not set 1030# CONFIG_SMB_FS is not set
1025# CONFIG_CIFS is not set 1031# CONFIG_CIFS is not set
1026# CONFIG_CIFS_DEBUG2 is not set
1027# CONFIG_NCP_FS is not set 1032# CONFIG_NCP_FS is not set
1028# CONFIG_CODA_FS is not set 1033# CONFIG_CODA_FS is not set
1029# CONFIG_AFS_FS is not set 1034# CONFIG_AFS_FS is not set
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2242f5f7cb7d..4fe386eea4b4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -114,9 +114,9 @@ ENTRY(secondary_startup)
114 * Use the page tables supplied from __cpu_up. 114 * Use the page tables supplied from __cpu_up.
115 */ 115 */
116 adr r4, __secondary_data 116 adr r4, __secondary_data
117 ldmia r4, {r5, r6, r13} @ address to jump to after 117 ldmia r4, {r5, r7, r13} @ address to jump to after
118 sub r4, r4, r5 @ mmu has been enabled 118 sub r4, r4, r5 @ mmu has been enabled
119 ldr r4, [r6, r4] @ get secondary_data.pgdir 119 ldr r4, [r7, r4] @ get secondary_data.pgdir
120 adr lr, __enable_mmu @ return address 120 adr lr, __enable_mmu @ return address
121 add pc, r10, #12 @ initialise processor 121 add pc, r10, #12 @ initialise processor
122 @ (return control reg) 122 @ (return control reg)
@@ -125,7 +125,7 @@ ENTRY(secondary_startup)
125 * r6 = &secondary_data 125 * r6 = &secondary_data
126 */ 126 */
127ENTRY(__secondary_switched) 127ENTRY(__secondary_switched)
128 ldr sp, [r6, #4] @ get secondary_data.stack 128 ldr sp, [r7, #4] @ get secondary_data.stack
129 mov fp, #0 129 mov fp, #0
130 b secondary_start_kernel 130 b secondary_start_kernel
131 131
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 35a052fc177a..4e29dd03e582 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -232,11 +232,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
232 bust_spinlocks(0); 232 bust_spinlocks(0);
233 spin_unlock_irq(&die_lock); 233 spin_unlock_irq(&die_lock);
234 234
235 if (panic_on_oops) { 235 if (panic_on_oops)
236 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 236 panic("Fatal exception: panic_on_oops");
237 ssleep(5);
238 panic("Fatal exception");
239 }
240 237
241 do_exit(SIGSEGV); 238 do_exit(SIGSEGV);
242} 239}
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index 5b64d5c5b967..ef6ccc8993e9 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -8,7 +8,7 @@
8#include <linux/ioport.h> 8#include <linux/ioport.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/tty.h> 11#include <linux/screen_info.h>
12 12
13#include <asm/hardware/dec21285.h> 13#include <asm/hardware/dec21285.h>
14#include <asm/io.h> 14#include <asm/io.h>
diff --git a/arch/arm/mach-s3c2410/mach-anubis.c b/arch/arm/mach-s3c2410/mach-anubis.c
index 4a92d6f92d6b..60641d452db3 100644
--- a/arch/arm/mach-s3c2410/mach-anubis.c
+++ b/arch/arm/mach-s3c2410/mach-anubis.c
@@ -60,11 +60,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
60 .virtual = (u32)S3C24XX_VA_ISA_BYTE, 60 .virtual = (u32)S3C24XX_VA_ISA_BYTE,
61 .pfn = __phys_to_pfn(0x0), 61 .pfn = __phys_to_pfn(0x0),
62 .length = SZ_4M, 62 .length = SZ_4M,
63 .type = MT_DEVICE 63 .type = MT_DEVICE,
64 }, { 64 }, {
65 .virtual = (u32)S3C24XX_VA_ISA_WORD, 65 .virtual = (u32)S3C24XX_VA_ISA_WORD,
66 .pfn = __phys_to_pfn(0x0), 66 .pfn = __phys_to_pfn(0x0),
67 .length = SZ_4M, MT_DEVICE 67 .length = SZ_4M,
68 .type = MT_DEVICE,
68 }, 69 },
69 70
70 /* we could possibly compress the next set down into a set of smaller tables 71 /* we could possibly compress the next set down into a set of smaller tables
@@ -78,36 +79,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
78 .virtual = (u32)ANUBIS_VA_CTRL1, 79 .virtual = (u32)ANUBIS_VA_CTRL1,
79 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL1), 80 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
80 .length = SZ_4K, 81 .length = SZ_4K,
81 .type = MT_DEVICE 82 .type = MT_DEVICE,
82 }, { 83 }, {
83 .virtual = (u32)ANUBIS_VA_CTRL2, 84 .virtual = (u32)ANUBIS_VA_CTRL2,
84 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL2), 85 .pfn = __phys_to_pfn(ANUBIS_PA_CTRL2),
85 .length = SZ_4K, 86 .length = SZ_4K,
86 .type =MT_DEVICE 87 .type = MT_DEVICE,
87 },
88
89 /* IDE drives */
90
91 {
92 .virtual = (u32)ANUBIS_IDEPRI,
93 .pfn = __phys_to_pfn(S3C2410_CS3),
94 .length = SZ_1M,
95 .type = MT_DEVICE
96 }, {
97 .virtual = (u32)ANUBIS_IDEPRIAUX,
98 .pfn = __phys_to_pfn(S3C2410_CS3+(1<<26)),
99 .length = SZ_1M,
100 .type = MT_DEVICE
101 }, {
102 .virtual = (u32)ANUBIS_IDESEC,
103 .pfn = __phys_to_pfn(S3C2410_CS4),
104 .length = SZ_1M,
105 .type = MT_DEVICE
106 }, {
107 .virtual = (u32)ANUBIS_IDESECAUX,
108 .pfn = __phys_to_pfn(S3C2410_CS4+(1<<26)),
109 .length = SZ_1M,
110 .type = MT_DEVICE
111 }, 88 },
112}; 89};
113 90
@@ -126,7 +103,7 @@ static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
126 .name = "pclk", 103 .name = "pclk",
127 .divisor = 1, 104 .divisor = 1,
128 .min_baud = 0, 105 .min_baud = 0,
129 .max_baud = 0. 106 .max_baud = 0,
130 } 107 }
131}; 108};
132 109
@@ -139,7 +116,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
139 .ulcon = ULCON, 116 .ulcon = ULCON,
140 .ufcon = UFCON, 117 .ufcon = UFCON,
141 .clocks = anubis_serial_clocks, 118 .clocks = anubis_serial_clocks,
142 .clocks_size = ARRAY_SIZE(anubis_serial_clocks) 119 .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
143 }, 120 },
144 [1] = { 121 [1] = {
145 .hwport = 2, 122 .hwport = 2,
@@ -148,7 +125,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
148 .ulcon = ULCON, 125 .ulcon = ULCON,
149 .ufcon = UFCON, 126 .ufcon = UFCON,
150 .clocks = anubis_serial_clocks, 127 .clocks = anubis_serial_clocks,
151 .clocks_size = ARRAY_SIZE(anubis_serial_clocks) 128 .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
152 }, 129 },
153}; 130};
154 131
@@ -162,7 +139,7 @@ static struct mtd_partition anubis_default_nand_part[] = {
162 [0] = { 139 [0] = {
163 .name = "Boot Agent", 140 .name = "Boot Agent",
164 .size = SZ_16K, 141 .size = SZ_16K,
165 .offset = 0 142 .offset = 0,
166 }, 143 },
167 [1] = { 144 [1] = {
168 .name = "/boot", 145 .name = "/boot",
@@ -194,21 +171,21 @@ static struct s3c2410_nand_set anubis_nand_sets[] = {
194 .nr_chips = 1, 171 .nr_chips = 1,
195 .nr_map = external_map, 172 .nr_map = external_map,
196 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 173 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
197 .partitions = anubis_default_nand_part 174 .partitions = anubis_default_nand_part,
198 }, 175 },
199 [0] = { 176 [0] = {
200 .name = "chip0", 177 .name = "chip0",
201 .nr_chips = 1, 178 .nr_chips = 1,
202 .nr_map = chip0_map, 179 .nr_map = chip0_map,
203 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 180 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
204 .partitions = anubis_default_nand_part 181 .partitions = anubis_default_nand_part,
205 }, 182 },
206 [2] = { 183 [2] = {
207 .name = "chip1", 184 .name = "chip1",
208 .nr_chips = 1, 185 .nr_chips = 1,
209 .nr_map = chip1_map, 186 .nr_map = chip1_map,
210 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part), 187 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
211 .partitions = anubis_default_nand_part 188 .partitions = anubis_default_nand_part,
212 }, 189 },
213}; 190};
214 191
@@ -313,7 +290,7 @@ static struct s3c24xx_board anubis_board __initdata = {
313 .devices = anubis_devices, 290 .devices = anubis_devices,
314 .devices_count = ARRAY_SIZE(anubis_devices), 291 .devices_count = ARRAY_SIZE(anubis_devices),
315 .clocks = anubis_clocks, 292 .clocks = anubis_clocks,
316 .clocks_count = ARRAY_SIZE(anubis_clocks) 293 .clocks_count = ARRAY_SIZE(anubis_clocks),
317}; 294};
318 295
319static void __init anubis_map_io(void) 296static void __init anubis_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-osiris.c b/arch/arm/mach-s3c2410/mach-osiris.c
index 858fd03c6bc5..e193ba69e652 100644
--- a/arch/arm/mach-s3c2410/mach-osiris.c
+++ b/arch/arm/mach-s3c2410/mach-osiris.c
@@ -67,12 +67,12 @@ static struct map_desc osiris_iodesc[] __initdata = {
67 .virtual = (u32)OSIRIS_VA_CTRL1, 67 .virtual = (u32)OSIRIS_VA_CTRL1,
68 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL1), 68 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL1),
69 .length = SZ_16K, 69 .length = SZ_16K,
70 .type = MT_DEVICE 70 .type = MT_DEVICE,
71 }, { 71 }, {
72 .virtual = (u32)OSIRIS_VA_CTRL2, 72 .virtual = (u32)OSIRIS_VA_CTRL2,
73 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL2), 73 .pfn = __phys_to_pfn(OSIRIS_PA_CTRL2),
74 .length = SZ_16K, 74 .length = SZ_16K,
75 .type = MT_DEVICE 75 .type = MT_DEVICE,
76 }, 76 },
77}; 77};
78 78
@@ -91,7 +91,7 @@ static struct s3c24xx_uart_clksrc osiris_serial_clocks[] = {
91 .name = "pclk", 91 .name = "pclk",
92 .divisor = 1, 92 .divisor = 1,
93 .min_baud = 0, 93 .min_baud = 0,
94 .max_baud = 0. 94 .max_baud = 0,
95 } 95 }
96}; 96};
97 97
@@ -103,7 +103,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
103 .ulcon = ULCON, 103 .ulcon = ULCON,
104 .ufcon = UFCON, 104 .ufcon = UFCON,
105 .clocks = osiris_serial_clocks, 105 .clocks = osiris_serial_clocks,
106 .clocks_size = ARRAY_SIZE(osiris_serial_clocks) 106 .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
107 }, 107 },
108 [1] = { 108 [1] = {
109 .hwport = 1, 109 .hwport = 1,
@@ -112,7 +112,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
112 .ulcon = ULCON, 112 .ulcon = ULCON,
113 .ufcon = UFCON, 113 .ufcon = UFCON,
114 .clocks = osiris_serial_clocks, 114 .clocks = osiris_serial_clocks,
115 .clocks_size = ARRAY_SIZE(osiris_serial_clocks) 115 .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
116 }, 116 },
117}; 117};
118 118
@@ -126,7 +126,7 @@ static struct mtd_partition osiris_default_nand_part[] = {
126 [0] = { 126 [0] = {
127 .name = "Boot Agent", 127 .name = "Boot Agent",
128 .size = SZ_16K, 128 .size = SZ_16K,
129 .offset = 0 129 .offset = 0,
130 }, 130 },
131 [1] = { 131 [1] = {
132 .name = "/boot", 132 .name = "/boot",
@@ -158,21 +158,21 @@ static struct s3c2410_nand_set osiris_nand_sets[] = {
158 .nr_chips = 1, 158 .nr_chips = 1,
159 .nr_map = external_map, 159 .nr_map = external_map,
160 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 160 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
161 .partitions = osiris_default_nand_part 161 .partitions = osiris_default_nand_part,
162 }, 162 },
163 [0] = { 163 [0] = {
164 .name = "chip0", 164 .name = "chip0",
165 .nr_chips = 1, 165 .nr_chips = 1,
166 .nr_map = chip0_map, 166 .nr_map = chip0_map,
167 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 167 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
168 .partitions = osiris_default_nand_part 168 .partitions = osiris_default_nand_part,
169 }, 169 },
170 [2] = { 170 [2] = {
171 .name = "chip1", 171 .name = "chip1",
172 .nr_chips = 1, 172 .nr_chips = 1,
173 .nr_map = chip1_map, 173 .nr_map = chip1_map,
174 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part), 174 .nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
175 .partitions = osiris_default_nand_part 175 .partitions = osiris_default_nand_part,
176 }, 176 },
177}; 177};
178 178
@@ -245,7 +245,7 @@ static struct s3c24xx_board osiris_board __initdata = {
245 .devices = osiris_devices, 245 .devices = osiris_devices,
246 .devices_count = ARRAY_SIZE(osiris_devices), 246 .devices_count = ARRAY_SIZE(osiris_devices),
247 .clocks = osiris_clocks, 247 .clocks = osiris_clocks,
248 .clocks_count = ARRAY_SIZE(osiris_clocks) 248 .clocks_count = ARRAY_SIZE(osiris_clocks),
249}; 249};
250 250
251static void __init osiris_map_io(void) 251static void __init osiris_map_io(void)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index dba7dddfe57d..88a999df0ab3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -363,7 +363,9 @@ EXPORT_SYMBOL(__ioremap);
363 363
364void __iounmap(void __iomem *addr) 364void __iounmap(void __iomem *addr)
365{ 365{
366#ifndef CONFIG_SMP
366 struct vm_struct **p, *tmp; 367 struct vm_struct **p, *tmp;
368#endif
367 unsigned int section_mapping = 0; 369 unsigned int section_mapping = 0;
368 370
369 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); 371 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 6c5f0fe578a5..ab143557e688 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -13,6 +13,7 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/proc-fns.h> 14#include <asm/proc-fns.h>
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/page.h>
16 17
17#ifndef MULTI_CPU 18#ifndef MULTI_CPU
18EXPORT_SYMBOL(cpu_dcache_clean_area); 19EXPORT_SYMBOL(cpu_dcache_clean_area);
@@ -30,6 +31,13 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range);
30EXPORT_SYMBOL(cpu_cache); 31EXPORT_SYMBOL(cpu_cache);
31#endif 32#endif
32 33
34#ifndef MULTI_USER
35EXPORT_SYMBOL(__cpu_clear_user_page);
36EXPORT_SYMBOL(__cpu_copy_user_page);
37#else
38EXPORT_SYMBOL(cpu_user);
39#endif
40
33/* 41/*
34 * No module should need to touch the TLB (and currently 42 * No module should need to touch the TLB (and currently
35 * no modules do. We export this for "loadkernel" support 43 * no modules do. We export this for "loadkernel" support
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 521538671f4c..561bff73a036 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -536,6 +536,11 @@ cpu_80200_name:
536 .asciz "XScale-80200" 536 .asciz "XScale-80200"
537 .size cpu_80200_name, . - cpu_80200_name 537 .size cpu_80200_name, . - cpu_80200_name
538 538
539 .type cpu_80219_name, #object
540cpu_80219_name:
541 .asciz "XScale-80219"
542 .size cpu_80219_name, . - cpu_80219_name
543
539 .type cpu_8032x_name, #object 544 .type cpu_8032x_name, #object
540cpu_8032x_name: 545cpu_8032x_name:
541 .asciz "XScale-IOP8032x Family" 546 .asciz "XScale-IOP8032x Family"
@@ -613,10 +618,33 @@ __80200_proc_info:
613 .long xscale_cache_fns 618 .long xscale_cache_fns
614 .size __80200_proc_info, . - __80200_proc_info 619 .size __80200_proc_info, . - __80200_proc_info
615 620
621 .type __80219_proc_info,#object
622__80219_proc_info:
623 .long 0x69052e20
624 .long 0xffffffe0
625 .long PMD_TYPE_SECT | \
626 PMD_SECT_BUFFERABLE | \
627 PMD_SECT_CACHEABLE | \
628 PMD_SECT_AP_WRITE | \
629 PMD_SECT_AP_READ
630 .long PMD_TYPE_SECT | \
631 PMD_SECT_AP_WRITE | \
632 PMD_SECT_AP_READ
633 b __xscale_setup
634 .long cpu_arch_name
635 .long cpu_elf_name
636 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
637 .long cpu_80219_name
638 .long xscale_processor_functions
639 .long v4wbi_tlb_fns
640 .long xscale_mc_user_fns
641 .long xscale_cache_fns
642 .size __80219_proc_info, . - __80219_proc_info
643
616 .type __8032x_proc_info,#object 644 .type __8032x_proc_info,#object
617__8032x_proc_info: 645__8032x_proc_info:
618 .long 0x69052420 646 .long 0x69052420
619 .long 0xfffff5e0 @ mask should accomodate IOP80219 also 647 .long 0xffffffe0
620 .long PMD_TYPE_SECT | \ 648 .long PMD_TYPE_SECT | \
621 PMD_SECT_BUFFERABLE | \ 649 PMD_SECT_BUFFERABLE | \
622 PMD_SECT_CACHEABLE | \ 650 PMD_SECT_CACHEABLE | \
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index daa75ce4b777..f71fb4a029cb 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -672,7 +672,7 @@ config MTRR
672 See <file:Documentation/mtrr.txt> for more information. 672 See <file:Documentation/mtrr.txt> for more information.
673 673
674config EFI 674config EFI
675 bool "Boot from EFI support (EXPERIMENTAL)" 675 bool "Boot from EFI support"
676 depends on ACPI 676 depends on ACPI
677 default n 677 default n
678 ---help--- 678 ---help---
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 1b452a1665c4..ab98fc21a541 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -59,7 +59,8 @@ quiet_cmd_syscall = SYSCALL $@
59 59
60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH) 60export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
61 61
62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 62vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
63 $(call ld-option, -Wl$(comma)--hash-style=sysv)
63SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags) 64SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
64SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags) 65SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
65 66
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e9f0b928b0a9..5c43be47587f 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -759,7 +759,7 @@ static int __cpuinit cache_sysfs_init(void)
759 if (num_cache_leaves == 0) 759 if (num_cache_leaves == 0)
760 return 0; 760 return 0;
761 761
762 register_cpu_notifier(&cacheinfo_cpu_notifier); 762 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
763 763
764 for_each_online_cpu(i) { 764 for_each_online_cpu(i) {
765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, 765 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
diff --git a/arch/i386/kernel/cpu/mcheck/mce.h b/arch/i386/kernel/cpu/mcheck/mce.h
index dc2416dfef15..84fd4cf7d0fb 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.h
+++ b/arch/i386/kernel/cpu/mcheck/mce.h
@@ -9,6 +9,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c);
9/* Call the installed machine check handler for this CPU setup. */ 9/* Call the installed machine check handler for this CPU setup. */
10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code); 10extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
11 11
12extern int mce_disabled __initdata; 12extern int mce_disabled;
13extern int nr_mce_banks; 13extern int nr_mce_banks;
14 14
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index d9a260f2efb4..37a7d2eaf4a0 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -204,7 +204,7 @@ VM_MASK = 0x00020000
204ENTRY(ret_from_fork) 204ENTRY(ret_from_fork)
205 CFI_STARTPROC 205 CFI_STARTPROC
206 pushl %eax 206 pushl %eax
207 CFI_ADJUST_CFA_OFFSET -4 207 CFI_ADJUST_CFA_OFFSET 4
208 call schedule_tail 208 call schedule_tail
209 GET_THREAD_INFO(%ebp) 209 GET_THREAD_INFO(%ebp)
210 popl %eax 210 popl %eax
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index de2e16e561c0..afe6505ca0b3 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -256,11 +256,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
256 int ret = 0; 256 int ret = 0;
257 kprobe_opcode_t *addr; 257 kprobe_opcode_t *addr;
258 struct kprobe_ctlblk *kcb; 258 struct kprobe_ctlblk *kcb;
259#ifdef CONFIG_PREEMPT
260 unsigned pre_preempt_count = preempt_count();
261#else
262 unsigned pre_preempt_count = 1;
263#endif
264 259
265 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); 260 addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
266 261
@@ -338,13 +333,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
338 return 1; 333 return 1;
339 334
340ss_probe: 335ss_probe:
341 if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){ 336#ifndef CONFIG_PREEMPT
337 if (p->ainsn.boostable == 1 && !p->post_handler){
342 /* Boost up -- we can execute copied instructions directly */ 338 /* Boost up -- we can execute copied instructions directly */
343 reset_current_kprobe(); 339 reset_current_kprobe();
344 regs->eip = (unsigned long)p->ainsn.insn; 340 regs->eip = (unsigned long)p->ainsn.insn;
345 preempt_enable_no_resched(); 341 preempt_enable_no_resched();
346 return 1; 342 return 1;
347 } 343 }
344#endif
348 prepare_singlestep(p, regs); 345 prepare_singlestep(p, regs);
349 kcb->kprobe_status = KPROBE_HIT_SS; 346 kcb->kprobe_status = KPROBE_HIT_SS;
350 return 1; 347 return 1;
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index 511abe52a94e..6b1ae6ba76f0 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -189,14 +189,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
189 memcpy((void *)reboot_code_buffer, relocate_new_kernel, 189 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
190 relocate_new_kernel_size); 190 relocate_new_kernel_size);
191 191
192 /* The segment registers are funny things, they are 192 /* The segment registers are funny things, they have both a
193 * automatically loaded from a table, in memory wherever you 193 * visible and an invisible part. Whenever the visible part is
194 * set them to a specific selector, but this table is never 194 * set to a specific selector, the invisible part is loaded
195 * accessed again you set the segment to a different selector. 195 * with from a table in memory. At no other time is the
196 * 196 * descriptor table in memory accessed.
197 * The more common model is are caches where the behide
198 * the scenes work is done, but is also dropped at arbitrary
199 * times.
200 * 197 *
201 * I take advantage of this here by force loading the 198 * I take advantage of this here by force loading the
202 * segments, before I zap the gdt with an invalid value. 199 * segments, before I zap the gdt with an invalid value.
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2dd928a84645..acb351478e42 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -575,6 +575,7 @@ void touch_nmi_watchdog (void)
575 */ 575 */
576 touch_softlockup_watchdog(); 576 touch_softlockup_watchdog();
577} 577}
578EXPORT_SYMBOL(touch_nmi_watchdog);
578 579
579extern void die_nmi(struct pt_regs *, const char *msg); 580extern void die_nmi(struct pt_regs *, const char *msg);
580 581
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 923bb292f47f..8657c739656a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
690 /* 690 /*
691 * Now maybe handle debug registers and/or IO bitmaps 691 * Now maybe handle debug registers and/or IO bitmaps
692 */ 692 */
693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) 693 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 694 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
695 __switch_to_xtra(next_p, tss); 695 __switch_to_xtra(next_p, tss);
696 696
697 disable_tsc(prev_p, next_p); 697 disable_tsc(prev_p, next_p);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 6f5fea05f1d7..f948419c888a 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -212,14 +212,20 @@ valid_k7:
212 * then we print a warning if not, and always resync. 212 * then we print a warning if not, and always resync.
213 */ 213 */
214 214
215static atomic_t tsc_start_flag = ATOMIC_INIT(0); 215static struct {
216static atomic_t tsc_count_start = ATOMIC_INIT(0); 216 atomic_t start_flag;
217static atomic_t tsc_count_stop = ATOMIC_INIT(0); 217 atomic_t count_start;
218static unsigned long long tsc_values[NR_CPUS]; 218 atomic_t count_stop;
219 unsigned long long values[NR_CPUS];
220} tsc __initdata = {
221 .start_flag = ATOMIC_INIT(0),
222 .count_start = ATOMIC_INIT(0),
223 .count_stop = ATOMIC_INIT(0),
224};
219 225
220#define NR_LOOPS 5 226#define NR_LOOPS 5
221 227
222static void __init synchronize_tsc_bp (void) 228static void __init synchronize_tsc_bp(void)
223{ 229{
224 int i; 230 int i;
225 unsigned long long t0; 231 unsigned long long t0;
@@ -233,7 +239,7 @@ static void __init synchronize_tsc_bp (void)
233 /* convert from kcyc/sec to cyc/usec */ 239 /* convert from kcyc/sec to cyc/usec */
234 one_usec = cpu_khz / 1000; 240 one_usec = cpu_khz / 1000;
235 241
236 atomic_set(&tsc_start_flag, 1); 242 atomic_set(&tsc.start_flag, 1);
237 wmb(); 243 wmb();
238 244
239 /* 245 /*
@@ -250,16 +256,16 @@ static void __init synchronize_tsc_bp (void)
250 /* 256 /*
251 * all APs synchronize but they loop on '== num_cpus' 257 * all APs synchronize but they loop on '== num_cpus'
252 */ 258 */
253 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) 259 while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
254 cpu_relax(); 260 cpu_relax();
255 atomic_set(&tsc_count_stop, 0); 261 atomic_set(&tsc.count_stop, 0);
256 wmb(); 262 wmb();
257 /* 263 /*
258 * this lets the APs save their current TSC: 264 * this lets the APs save their current TSC:
259 */ 265 */
260 atomic_inc(&tsc_count_start); 266 atomic_inc(&tsc.count_start);
261 267
262 rdtscll(tsc_values[smp_processor_id()]); 268 rdtscll(tsc.values[smp_processor_id()]);
263 /* 269 /*
264 * We clear the TSC in the last loop: 270 * We clear the TSC in the last loop:
265 */ 271 */
@@ -269,56 +275,54 @@ static void __init synchronize_tsc_bp (void)
269 /* 275 /*
270 * Wait for all APs to leave the synchronization point: 276 * Wait for all APs to leave the synchronization point:
271 */ 277 */
272 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) 278 while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
273 cpu_relax(); 279 cpu_relax();
274 atomic_set(&tsc_count_start, 0); 280 atomic_set(&tsc.count_start, 0);
275 wmb(); 281 wmb();
276 atomic_inc(&tsc_count_stop); 282 atomic_inc(&tsc.count_stop);
277 } 283 }
278 284
279 sum = 0; 285 sum = 0;
280 for (i = 0; i < NR_CPUS; i++) { 286 for (i = 0; i < NR_CPUS; i++) {
281 if (cpu_isset(i, cpu_callout_map)) { 287 if (cpu_isset(i, cpu_callout_map)) {
282 t0 = tsc_values[i]; 288 t0 = tsc.values[i];
283 sum += t0; 289 sum += t0;
284 } 290 }
285 } 291 }
286 avg = sum; 292 avg = sum;
287 do_div(avg, num_booting_cpus()); 293 do_div(avg, num_booting_cpus());
288 294
289 sum = 0;
290 for (i = 0; i < NR_CPUS; i++) { 295 for (i = 0; i < NR_CPUS; i++) {
291 if (!cpu_isset(i, cpu_callout_map)) 296 if (!cpu_isset(i, cpu_callout_map))
292 continue; 297 continue;
293 delta = tsc_values[i] - avg; 298 delta = tsc.values[i] - avg;
294 if (delta < 0) 299 if (delta < 0)
295 delta = -delta; 300 delta = -delta;
296 /* 301 /*
297 * We report bigger than 2 microseconds clock differences. 302 * We report bigger than 2 microseconds clock differences.
298 */ 303 */
299 if (delta > 2*one_usec) { 304 if (delta > 2*one_usec) {
300 long realdelta; 305 long long realdelta;
306
301 if (!buggy) { 307 if (!buggy) {
302 buggy = 1; 308 buggy = 1;
303 printk("\n"); 309 printk("\n");
304 } 310 }
305 realdelta = delta; 311 realdelta = delta;
306 do_div(realdelta, one_usec); 312 do_div(realdelta, one_usec);
307 if (tsc_values[i] < avg) 313 if (tsc.values[i] < avg)
308 realdelta = -realdelta; 314 realdelta = -realdelta;
309 315
310 if (realdelta > 0) 316 if (realdelta)
311 printk(KERN_INFO "CPU#%d had %ld usecs TSC " 317 printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
312 "skew, fixed it up.\n", i, realdelta); 318 "skew, fixed it up.\n", i, realdelta);
313 } 319 }
314
315 sum += delta;
316 } 320 }
317 if (!buggy) 321 if (!buggy)
318 printk("passed.\n"); 322 printk("passed.\n");
319} 323}
320 324
321static void __init synchronize_tsc_ap (void) 325static void __init synchronize_tsc_ap(void)
322{ 326{
323 int i; 327 int i;
324 328
@@ -327,20 +331,20 @@ static void __init synchronize_tsc_ap (void)
327 * this gets called, so we first wait for the BP to 331 * this gets called, so we first wait for the BP to
328 * finish SMP initialization: 332 * finish SMP initialization:
329 */ 333 */
330 while (!atomic_read(&tsc_start_flag)) 334 while (!atomic_read(&tsc.start_flag))
331 cpu_relax(); 335 cpu_relax();
332 336
333 for (i = 0; i < NR_LOOPS; i++) { 337 for (i = 0; i < NR_LOOPS; i++) {
334 atomic_inc(&tsc_count_start); 338 atomic_inc(&tsc.count_start);
335 while (atomic_read(&tsc_count_start) != num_booting_cpus()) 339 while (atomic_read(&tsc.count_start) != num_booting_cpus())
336 cpu_relax(); 340 cpu_relax();
337 341
338 rdtscll(tsc_values[smp_processor_id()]); 342 rdtscll(tsc.values[smp_processor_id()]);
339 if (i == NR_LOOPS-1) 343 if (i == NR_LOOPS-1)
340 write_tsc(0, 0); 344 write_tsc(0, 0);
341 345
342 atomic_inc(&tsc_count_stop); 346 atomic_inc(&tsc.count_stop);
343 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) 347 while (atomic_read(&tsc.count_stop) != num_booting_cpus())
344 cpu_relax(); 348 cpu_relax();
345 } 349 }
346} 350}
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 8705c0f05788..edd00f6cee37 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
135{ 135{
136 unsigned long pc = instruction_pointer(regs); 136 unsigned long pc = instruction_pointer(regs);
137 137
138 if (in_lock_functions(pc)) 138 if (!user_mode_vm(regs) && in_lock_functions(pc))
139 return *(unsigned long *)(regs->ebp + 4); 139 return *(unsigned long *)(regs->ebp + 4);
140 140
141 return pc; 141 return pc;
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 313ac1f7dc5a..0d4005dc06c5 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
187 if (unwind_init_blocked(&info, task) == 0) 187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl); 188 unw_ret = show_trace_unwind(&info, log_lvl);
189 } 189 }
190 if (unw_ret > 0) { 190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 if (call_trace > 0) 191#ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
193 UNW_PC(&info));
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
196 if (UNW_SP(&info))
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
192 return; 199 return;
193 printk("%sLegacy call trace:\n", log_lvl); 200 else
201 printk("Full inexact backtrace again:\n");
202#else
203 printk("Inexact backtrace:\n");
204#endif
194 } 205 }
195 } 206 }
196 207
@@ -442,11 +453,9 @@ void die(const char * str, struct pt_regs * regs, long err)
442 if (in_interrupt()) 453 if (in_interrupt())
443 panic("Fatal exception in interrupt"); 454 panic("Fatal exception in interrupt");
444 455
445 if (panic_on_oops) { 456 if (panic_on_oops)
446 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 457 panic("Fatal exception: panic_on_oops");
447 ssleep(5); 458
448 panic("Fatal exception");
449 }
450 oops_exit(); 459 oops_exit();
451 do_exit(SIGSEGV); 460 do_exit(SIGSEGV);
452} 461}
@@ -1238,8 +1247,10 @@ static int __init call_trace_setup(char *s)
1238 call_trace = -1; 1247 call_trace = -1;
1239 else if (strcmp(s, "both") == 0) 1248 else if (strcmp(s, "both") == 0)
1240 call_trace = 0; 1249 call_trace = 0;
1241 else if (strcmp(s, "new") == 0) 1250 else if (strcmp(s, "newfallback") == 0)
1242 call_trace = 1; 1251 call_trace = 1;
1252 else if (strcmp(s, "new") == 2)
1253 call_trace = 2;
1243 return 1; 1254 return 1;
1244} 1255}
1245__setup("call_trace=", call_trace_setup); 1256__setup("call_trace=", call_trace_setup);
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index e26975fc68b6..f66cd11adb72 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -10,6 +10,7 @@ SECTIONS
10 . = VDSO_PRELINK + SIZEOF_HEADERS; 10 . = VDSO_PRELINK + SIZEOF_HEADERS;
11 11
12 .hash : { *(.hash) } :text 12 .hash : { *(.hash) } :text
13 .gnu.hash : { *(.gnu.hash) }
13 .dynsym : { *(.dynsym) } 14 .dynsym : { *(.dynsym) }
14 .dynstr : { *(.dynstr) } 15 .dynstr : { *(.dynstr) }
15 .gnu.version : { *(.gnu.version) } 16 .gnu.version : { *(.gnu.version) }
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 9ea35398e10d..0f14a82b856e 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -363,7 +363,7 @@ CONFIG_BLK_DEV_IDECD=y
363# 363#
364CONFIG_IDE_GENERIC=y 364CONFIG_IDE_GENERIC=y
365CONFIG_BLK_DEV_IDEPCI=y 365CONFIG_BLK_DEV_IDEPCI=y
366# CONFIG_IDEPCI_SHARE_IRQ is not set 366CONFIG_IDEPCI_SHARE_IRQ=y
367# CONFIG_BLK_DEV_OFFBOARD is not set 367# CONFIG_BLK_DEV_OFFBOARD is not set
368# CONFIG_BLK_DEV_GENERIC is not set 368# CONFIG_BLK_DEV_GENERIC is not set
369# CONFIG_BLK_DEV_OPTI621 is not set 369# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 6cba55da572a..9001b3fbaa32 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -366,7 +366,7 @@ CONFIG_BLK_DEV_IDESCSI=m
366# CONFIG_IDE_GENERIC is not set 366# CONFIG_IDE_GENERIC is not set
367# CONFIG_BLK_DEV_IDEPNP is not set 367# CONFIG_BLK_DEV_IDEPNP is not set
368CONFIG_BLK_DEV_IDEPCI=y 368CONFIG_BLK_DEV_IDEPCI=y
369# CONFIG_IDEPCI_SHARE_IRQ is not set 369CONFIG_IDEPCI_SHARE_IRQ=y
370# CONFIG_BLK_DEV_OFFBOARD is not set 370# CONFIG_BLK_DEV_OFFBOARD is not set
371CONFIG_BLK_DEV_GENERIC=y 371CONFIG_BLK_DEV_GENERIC=y
372# CONFIG_BLK_DEV_OPTI621 is not set 372# CONFIG_BLK_DEV_OPTI621 is not set
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 0e4553f320bf..ad8215a3c586 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -45,7 +45,8 @@ CPPFLAGS_gate.lds := -P -C -U$(ARCH)
45quiet_cmd_gate = GATE $@ 45quiet_cmd_gate = GATE $@
46 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 46 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
47 47
48GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 48GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
49 $(call ld-option, -Wl$(comma)--hash-style=sysv)
49$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE 50$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
50 $(call if_changed,gate) 51 $(call if_changed,gate)
51 52
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index cc35cddfd4cf..6d198339bf85 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -12,6 +12,7 @@ SECTIONS
12 . = GATE_ADDR + SIZEOF_HEADERS; 12 . = GATE_ADDR + SIZEOF_HEADERS;
13 13
14 .hash : { *(.hash) } :readable 14 .hash : { *(.hash) } :readable
15 .gnu.hash : { *(.gnu.hash) }
15 .dynsym : { *(.dynsym) } 16 .dynsym : { *(.dynsym) }
16 .dynstr : { *(.dynstr) } 17 .dynstr : { *(.dynstr) }
17 .gnu.version : { *(.gnu.version) } 18 .gnu.version : { *(.gnu.version) }
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 00d9c83b8020..781960f80b6f 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -448,11 +448,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
448 return 0; 448 return 0;
449} 449}
450 450
451void __kprobes flush_insn_slot(struct kprobe *p)
452{
453 unsigned long arm_addr;
454
455 arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL;
456 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
457}
458
451void __kprobes arch_arm_kprobe(struct kprobe *p) 459void __kprobes arch_arm_kprobe(struct kprobe *p)
452{ 460{
453 unsigned long addr = (unsigned long)p->addr; 461 unsigned long addr = (unsigned long)p->addr;
454 unsigned long arm_addr = addr & ~0xFULL; 462 unsigned long arm_addr = addr & ~0xFULL;
455 463
464 flush_insn_slot(p);
456 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); 465 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
457 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 466 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
458} 467}
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 3f5bac59209a..ab5b52413e91 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -958,9 +958,9 @@ remove_palinfo_proc_entries(unsigned int hcpu)
958 } 958 }
959} 959}
960 960
961static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 961#ifdef CONFIG_HOTPLUG_CPU
962 unsigned long action, 962static int palinfo_cpu_callback(struct notifier_block *nfb,
963 void *hcpu) 963 unsigned long action, void *hcpu)
964{ 964{
965 unsigned int hotcpu = (unsigned long)hcpu; 965 unsigned int hotcpu = (unsigned long)hcpu;
966 966
@@ -968,20 +968,19 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
968 case CPU_ONLINE: 968 case CPU_ONLINE:
969 create_palinfo_proc_entries(hotcpu); 969 create_palinfo_proc_entries(hotcpu);
970 break; 970 break;
971#ifdef CONFIG_HOTPLUG_CPU
972 case CPU_DEAD: 971 case CPU_DEAD:
973 remove_palinfo_proc_entries(hotcpu); 972 remove_palinfo_proc_entries(hotcpu);
974 break; 973 break;
975#endif
976 } 974 }
977 return NOTIFY_OK; 975 return NOTIFY_OK;
978} 976}
979 977
980static struct notifier_block __cpuinitdata palinfo_cpu_notifier = 978static struct notifier_block palinfo_cpu_notifier =
981{ 979{
982 .notifier_call = palinfo_cpu_callback, 980 .notifier_call = palinfo_cpu_callback,
983 .priority = 0, 981 .priority = 0,
984}; 982};
983#endif
985 984
986static int __init 985static int __init
987palinfo_init(void) 986palinfo_init(void)
@@ -1020,7 +1019,7 @@ palinfo_exit(void)
1020 /* 1019 /*
1021 * Unregister from cpu notifier callbacks 1020 * Unregister from cpu notifier callbacks
1022 */ 1021 */
1023 unregister_cpu_notifier(&palinfo_cpu_notifier); 1022 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1024} 1023}
1025 1024
1026module_init(palinfo_init); 1025module_init(palinfo_init);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index b146f1cfad31..d24fa393b182 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -435,7 +435,7 @@ static int __cpuinit cache_sysfs_init(void)
435 (void *)(long)i); 435 (void *)(long)i);
436 } 436 }
437 437
438 register_cpu_notifier(&cache_cpu_notifier); 438 register_hotcpu_notifier(&cache_cpu_notifier);
439 439
440 return 0; 440 return 0;
441} 441}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e7bbb0f40aa2..5a0420464c6c 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -117,11 +117,8 @@ die (const char *str, struct pt_regs *regs, long err)
117 die.lock_owner = -1; 117 die.lock_owner = -1;
118 spin_unlock_irq(&die.lock); 118 spin_unlock_irq(&die.lock);
119 119
120 if (panic_on_oops) { 120 if (panic_on_oops)
121 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 121 panic("Fatal exception: panic_on_oops");
122 ssleep(5);
123 panic("Fatal exception");
124 }
125 122
126 do_exit(SIGSEGV); 123 do_exit(SIGSEGV);
127} 124}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 9989495a51dd..b3677fc8eef5 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -204,6 +204,7 @@ SECTIONS
204 *(.dynstr) 204 *(.dynstr)
205 *(.dynamic) 205 *(.dynamic)
206 *(.hash) 206 *(.hash)
207 *(.gnu.hash)
207#endif 208#endif
208 } 209 }
209 210
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 010435095550..fec228cd0163 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -278,7 +278,7 @@ static void unregister_cpu_online(unsigned int cpu)
278} 278}
279#endif /* CONFIG_HOTPLUG_CPU */ 279#endif /* CONFIG_HOTPLUG_CPU */
280 280
281static int __devinit sysfs_cpu_notify(struct notifier_block *self, 281static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
282 unsigned long action, void *hcpu) 282 unsigned long action, void *hcpu)
283{ 283{
284 unsigned int cpu = (unsigned int)(long)hcpu; 284 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -296,7 +296,7 @@ static int __devinit sysfs_cpu_notify(struct notifier_block *self,
296 return NOTIFY_OK; 296 return NOTIFY_OK;
297} 297}
298 298
299static struct notifier_block __devinitdata sysfs_cpu_nb = { 299static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
300 .notifier_call = sysfs_cpu_notify, 300 .notifier_call = sysfs_cpu_notify,
301}; 301};
302 302
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3c668078e524..2105767fcc57 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -150,13 +150,9 @@ int die(const char *str, struct pt_regs *regs, long err)
150 if (in_interrupt()) 150 if (in_interrupt())
151 panic("Fatal exception in interrupt"); 151 panic("Fatal exception in interrupt");
152 152
153 if (panic_on_oops) { 153 if (panic_on_oops)
154#ifdef CONFIG_PPC64 154 panic("Fatal exception: panic_on_oops");
155 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 155
156 ssleep(5);
157#endif
158 panic("Fatal exception");
159 }
160 do_exit(err); 156 do_exit(err);
161 157
162 return 0; 158 return 0;
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 8a3bed5f143a..3726358faae8 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -14,7 +14,8 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
14 14
15 15
16EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 16EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin
17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 17EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
18 $(call ld-option, -Wl$(comma)--hash-style=sysv)
18EXTRA_AFLAGS := -D__VDSO32__ -s 19EXTRA_AFLAGS := -D__VDSO32__ -s
19 20
20obj-y += vdso32_wrapper.o 21obj-y += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index f4bad720cb0a..6187af2d54c3 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -14,6 +14,7 @@ SECTIONS
14{ 14{
15 . = VDSO32_LBASE + SIZEOF_HEADERS; 15 . = VDSO32_LBASE + SIZEOF_HEADERS;
16 .hash : { *(.hash) } :text 16 .hash : { *(.hash) } :text
17 .gnu.hash : { *(.gnu.hash) }
17 .dynsym : { *(.dynsym) } 18 .dynsym : { *(.dynsym) }
18 .dynstr : { *(.dynstr) } 19 .dynstr : { *(.dynstr) }
19 .gnu.version : { *(.gnu.version) } 20 .gnu.version : { *(.gnu.version) }
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index ab39988452cc..43af9b2a6f3b 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -8,7 +8,8 @@ targets := $(obj-vdso64) vdso64.so
8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) 8obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
9 9
10EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin 10EXTRA_CFLAGS := -shared -s -fno-common -fno-builtin
11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 11EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
12 $(call ld-option, -Wl$(comma)--hash-style=sysv)
12EXTRA_AFLAGS := -D__VDSO64__ -s 13EXTRA_AFLAGS := -D__VDSO64__ -s
13 14
14obj-y += vdso64_wrapper.o 15obj-y += vdso64_wrapper.o
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4bdf224464ab..4a2b6dc0960c 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -12,6 +12,7 @@ SECTIONS
12{ 12{
13 . = VDSO64_LBASE + SIZEOF_HEADERS; 13 . = VDSO64_LBASE + SIZEOF_HEADERS;
14 .hash : { *(.hash) } :text 14 .hash : { *(.hash) } :text
15 .gnu.hash : { *(.gnu.hash) }
15 .dynsym : { *(.dynsym) } 16 .dynsym : { *(.dynsym) }
16 .dynstr : { *(.dynstr) } 17 .dynstr : { *(.dynstr) }
17 .gnu.version : { *(.gnu.version) } 18 .gnu.version : { *(.gnu.version) }
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index 74eed6b74cd6..d66415491055 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -10,19 +10,32 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/fb.h> 11#include <linux/fb.h>
12#include <linux/backlight.h> 12#include <linux/backlight.h>
13#include <linux/adb.h>
14#include <linux/pmu.h>
15#include <asm/atomic.h>
13#include <asm/prom.h> 16#include <asm/prom.h>
14#include <asm/backlight.h> 17#include <asm/backlight.h>
15 18
16#define OLD_BACKLIGHT_MAX 15 19#define OLD_BACKLIGHT_MAX 15
17 20
18static void pmac_backlight_key_worker(void *data); 21static void pmac_backlight_key_worker(void *data);
22static void pmac_backlight_set_legacy_worker(void *data);
23
19static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); 24static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
25static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
20 26
21/* Although this variable is used in interrupt context, it makes no sense to 27/* Although these variables are used in interrupt context, it makes no sense to
22 * protect it. No user is able to produce enough key events per second and 28 * protect them. No user is able to produce enough key events per second and
23 * notice the errors that might happen. 29 * notice the errors that might happen.
24 */ 30 */
25static int pmac_backlight_key_queued; 31static int pmac_backlight_key_queued;
32static int pmac_backlight_set_legacy_queued;
33
34/* The via-pmu code allows the backlight to be grabbed, in which case the
35 * in-kernel control of the brightness needs to be disabled. This should
36 * only be used by really old PowerBooks.
37 */
38static atomic_t kernel_backlight_disabled = ATOMIC_INIT(0);
26 39
27/* Protect the pmac_backlight variable */ 40/* Protect the pmac_backlight variable */
28DEFINE_MUTEX(pmac_backlight_mutex); 41DEFINE_MUTEX(pmac_backlight_mutex);
@@ -82,6 +95,9 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
82 95
83static void pmac_backlight_key_worker(void *data) 96static void pmac_backlight_key_worker(void *data)
84{ 97{
98 if (atomic_read(&kernel_backlight_disabled))
99 return;
100
85 mutex_lock(&pmac_backlight_mutex); 101 mutex_lock(&pmac_backlight_mutex);
86 if (pmac_backlight) { 102 if (pmac_backlight) {
87 struct backlight_properties *props; 103 struct backlight_properties *props;
@@ -107,8 +123,12 @@ static void pmac_backlight_key_worker(void *data)
107 mutex_unlock(&pmac_backlight_mutex); 123 mutex_unlock(&pmac_backlight_mutex);
108} 124}
109 125
126/* This function is called in interrupt context */
110void pmac_backlight_key(int direction) 127void pmac_backlight_key(int direction)
111{ 128{
129 if (atomic_read(&kernel_backlight_disabled))
130 return;
131
112 /* we can receive multiple interrupts here, but the scheduled work 132 /* we can receive multiple interrupts here, but the scheduled work
113 * will run only once, with the last value 133 * will run only once, with the last value
114 */ 134 */
@@ -116,7 +136,7 @@ void pmac_backlight_key(int direction)
116 schedule_work(&pmac_backlight_key_work); 136 schedule_work(&pmac_backlight_key_work);
117} 137}
118 138
119int pmac_backlight_set_legacy_brightness(int brightness) 139static int __pmac_backlight_set_legacy_brightness(int brightness)
120{ 140{
121 int error = -ENXIO; 141 int error = -ENXIO;
122 142
@@ -145,6 +165,28 @@ int pmac_backlight_set_legacy_brightness(int brightness)
145 return error; 165 return error;
146} 166}
147 167
168static void pmac_backlight_set_legacy_worker(void *data)
169{
170 if (atomic_read(&kernel_backlight_disabled))
171 return;
172
173 __pmac_backlight_set_legacy_brightness(pmac_backlight_set_legacy_queued);
174}
175
176/* This function is called in interrupt context */
177void pmac_backlight_set_legacy_brightness_pmu(int brightness) {
178 if (atomic_read(&kernel_backlight_disabled))
179 return;
180
181 pmac_backlight_set_legacy_queued = brightness;
182 schedule_work(&pmac_backlight_set_legacy_work);
183}
184
185int pmac_backlight_set_legacy_brightness(int brightness)
186{
187 return __pmac_backlight_set_legacy_brightness(brightness);
188}
189
148int pmac_backlight_get_legacy_brightness() 190int pmac_backlight_get_legacy_brightness()
149{ 191{
150 int result = -ENXIO; 192 int result = -ENXIO;
@@ -167,6 +209,16 @@ int pmac_backlight_get_legacy_brightness()
167 return result; 209 return result;
168} 210}
169 211
212void pmac_backlight_disable()
213{
214 atomic_inc(&kernel_backlight_disabled);
215}
216
217void pmac_backlight_enable()
218{
219 atomic_dec(&kernel_backlight_disabled);
220}
221
170EXPORT_SYMBOL_GPL(pmac_backlight); 222EXPORT_SYMBOL_GPL(pmac_backlight);
171EXPORT_SYMBOL_GPL(pmac_backlight_mutex); 223EXPORT_SYMBOL_GPL(pmac_backlight_mutex);
172EXPORT_SYMBOL_GPL(pmac_has_backlight_type); 224EXPORT_SYMBOL_GPL(pmac_has_backlight_type);
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 09c6525cfa61..095fd3323323 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -8,6 +8,7 @@ SECTIONS
8 . = + SIZEOF_HEADERS; 8 . = + SIZEOF_HEADERS;
9 .interp : { *(.interp) } 9 .interp : { *(.interp) }
10 .hash : { *(.hash) } 10 .hash : { *(.hash) }
11 .gnu.hash : { *(.gnu.hash) }
11 .dynsym : { *(.dynsym) } 12 .dynsym : { *(.dynsym) }
12 .dynstr : { *(.dynstr) } 13 .dynstr : { *(.dynstr) }
13 .rel.text : { *(.rel.text) } 14 .rel.text : { *(.rel.text) }
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index d0f82c995af6..6a4b5f9715c9 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -617,7 +617,8 @@ appldata_offline_cpu(int cpu)
617 spin_unlock(&appldata_timer_lock); 617 spin_unlock(&appldata_timer_lock);
618} 618}
619 619
620static int __cpuinit 620#ifdef CONFIG_HOTPLUG_CPU
621static int
621appldata_cpu_notify(struct notifier_block *self, 622appldata_cpu_notify(struct notifier_block *self,
622 unsigned long action, void *hcpu) 623 unsigned long action, void *hcpu)
623{ 624{
@@ -625,20 +626,19 @@ appldata_cpu_notify(struct notifier_block *self,
625 case CPU_ONLINE: 626 case CPU_ONLINE:
626 appldata_online_cpu((long) hcpu); 627 appldata_online_cpu((long) hcpu);
627 break; 628 break;
628#ifdef CONFIG_HOTPLUG_CPU
629 case CPU_DEAD: 629 case CPU_DEAD:
630 appldata_offline_cpu((long) hcpu); 630 appldata_offline_cpu((long) hcpu);
631 break; 631 break;
632#endif
633 default: 632 default:
634 break; 633 break;
635 } 634 }
636 return NOTIFY_OK; 635 return NOTIFY_OK;
637} 636}
638 637
639static struct notifier_block __devinitdata appldata_nb = { 638static struct notifier_block appldata_nb = {
640 .notifier_call = appldata_cpu_notify, 639 .notifier_call = appldata_cpu_notify,
641}; 640};
641#endif
642 642
643/* 643/*
644 * appldata_init() 644 * appldata_init()
@@ -662,7 +662,7 @@ static int __init appldata_init(void)
662 appldata_online_cpu(i); 662 appldata_online_cpu(i);
663 663
664 /* Register cpu hotplug notifier */ 664 /* Register cpu hotplug notifier */
665 register_cpu_notifier(&appldata_nb); 665 register_hotcpu_notifier(&appldata_nb);
666 666
667 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1); 667 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
668#ifdef MODULE 668#ifdef MODULE
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f4dfc10026d2..f1d4591eddbb 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,13 +1,16 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1 3# Linux kernel version: 2.6.18-rc2
4# Mon Apr 3 14:34:15 2006 4# Thu Jul 27 13:51:07 2006
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_LOCKDEP_SUPPORT=y
8CONFIG_STACKTRACE_SUPPORT=y
7CONFIG_RWSEM_XCHGADD_ALGORITHM=y 9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
8CONFIG_GENERIC_HWEIGHT=y 10CONFIG_GENERIC_HWEIGHT=y
9CONFIG_GENERIC_CALIBRATE_DELAY=y 11CONFIG_GENERIC_CALIBRATE_DELAY=y
10CONFIG_S390=y 12CONFIG_S390=y
13CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
11 14
12# 15#
13# Code maturity level options 16# Code maturity level options
@@ -25,6 +28,7 @@ CONFIG_SWAP=y
25CONFIG_SYSVIPC=y 28CONFIG_SYSVIPC=y
26CONFIG_POSIX_MQUEUE=y 29CONFIG_POSIX_MQUEUE=y
27# CONFIG_BSD_PROCESS_ACCT is not set 30# CONFIG_BSD_PROCESS_ACCT is not set
31# CONFIG_TASKSTATS is not set
28CONFIG_SYSCTL=y 32CONFIG_SYSCTL=y
29CONFIG_AUDIT=y 33CONFIG_AUDIT=y
30# CONFIG_AUDITSYSCALL is not set 34# CONFIG_AUDITSYSCALL is not set
@@ -43,10 +47,12 @@ CONFIG_PRINTK=y
43CONFIG_BUG=y 47CONFIG_BUG=y
44CONFIG_ELF_CORE=y 48CONFIG_ELF_CORE=y
45CONFIG_BASE_FULL=y 49CONFIG_BASE_FULL=y
50CONFIG_RT_MUTEXES=y
46CONFIG_FUTEX=y 51CONFIG_FUTEX=y
47CONFIG_EPOLL=y 52CONFIG_EPOLL=y
48CONFIG_SHMEM=y 53CONFIG_SHMEM=y
49CONFIG_SLAB=y 54CONFIG_SLAB=y
55CONFIG_VM_EVENT_COUNTERS=y
50# CONFIG_TINY_SHMEM is not set 56# CONFIG_TINY_SHMEM is not set
51CONFIG_BASE_SMALL=0 57CONFIG_BASE_SMALL=0
52# CONFIG_SLOB is not set 58# CONFIG_SLOB is not set
@@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y
94CONFIG_DEFAULT_MIGRATION_COST=1000000 100CONFIG_DEFAULT_MIGRATION_COST=1000000
95CONFIG_COMPAT=y 101CONFIG_COMPAT=y
96CONFIG_SYSVIPC_COMPAT=y 102CONFIG_SYSVIPC_COMPAT=y
97CONFIG_BINFMT_ELF32=y
98 103
99# 104#
100# Code generation options 105# Code generation options
@@ -115,6 +120,7 @@ CONFIG_FLATMEM=y
115CONFIG_FLAT_NODE_MEM_MAP=y 120CONFIG_FLAT_NODE_MEM_MAP=y
116# CONFIG_SPARSEMEM_STATIC is not set 121# CONFIG_SPARSEMEM_STATIC is not set
117CONFIG_SPLIT_PTLOCK_CPUS=4 122CONFIG_SPLIT_PTLOCK_CPUS=4
123CONFIG_RESOURCES_64BIT=y
118 124
119# 125#
120# I/O subsystem configuration 126# I/O subsystem configuration
@@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
142# CONFIG_APPLDATA_BASE is not set 148# CONFIG_APPLDATA_BASE is not set
143CONFIG_NO_IDLE_HZ=y 149CONFIG_NO_IDLE_HZ=y
144CONFIG_NO_IDLE_HZ_INIT=y 150CONFIG_NO_IDLE_HZ_INIT=y
151CONFIG_S390_HYPFS_FS=y
145CONFIG_KEXEC=y 152CONFIG_KEXEC=y
146 153
147# 154#
@@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y
174# CONFIG_INET_IPCOMP is not set 181# CONFIG_INET_IPCOMP is not set
175# CONFIG_INET_XFRM_TUNNEL is not set 182# CONFIG_INET_XFRM_TUNNEL is not set
176# CONFIG_INET_TUNNEL is not set 183# CONFIG_INET_TUNNEL is not set
184CONFIG_INET_XFRM_MODE_TRANSPORT=y
185CONFIG_INET_XFRM_MODE_TUNNEL=y
177CONFIG_INET_DIAG=y 186CONFIG_INET_DIAG=y
178CONFIG_INET_TCP_DIAG=y 187CONFIG_INET_TCP_DIAG=y
179# CONFIG_TCP_CONG_ADVANCED is not set 188# CONFIG_TCP_CONG_ADVANCED is not set
@@ -186,7 +195,10 @@ CONFIG_IPV6=y
186# CONFIG_INET6_IPCOMP is not set 195# CONFIG_INET6_IPCOMP is not set
187# CONFIG_INET6_XFRM_TUNNEL is not set 196# CONFIG_INET6_XFRM_TUNNEL is not set
188# CONFIG_INET6_TUNNEL is not set 197# CONFIG_INET6_TUNNEL is not set
198CONFIG_INET6_XFRM_MODE_TRANSPORT=y
199CONFIG_INET6_XFRM_MODE_TUNNEL=y
189# CONFIG_IPV6_TUNNEL is not set 200# CONFIG_IPV6_TUNNEL is not set
201# CONFIG_NETWORK_SECMARK is not set
190# CONFIG_NETFILTER is not set 202# CONFIG_NETFILTER is not set
191 203
192# 204#
@@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y
263# Network testing 275# Network testing
264# 276#
265# CONFIG_NET_PKTGEN is not set 277# CONFIG_NET_PKTGEN is not set
278# CONFIG_NET_TCPPROBE is not set
266# CONFIG_HAMRADIO is not set 279# CONFIG_HAMRADIO is not set
267# CONFIG_IRDA is not set 280# CONFIG_IRDA is not set
268# CONFIG_BT is not set 281# CONFIG_BT is not set
@@ -276,6 +289,7 @@ CONFIG_STANDALONE=y
276CONFIG_PREVENT_FIRMWARE_BUILD=y 289CONFIG_PREVENT_FIRMWARE_BUILD=y
277# CONFIG_FW_LOADER is not set 290# CONFIG_FW_LOADER is not set
278# CONFIG_DEBUG_DRIVER is not set 291# CONFIG_DEBUG_DRIVER is not set
292CONFIG_SYS_HYPERVISOR=y
279 293
280# 294#
281# Connector - unified userspace <-> kernelspace linker 295# Connector - unified userspace <-> kernelspace linker
@@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m
334CONFIG_BLK_DEV_RAM=y 348CONFIG_BLK_DEV_RAM=y
335CONFIG_BLK_DEV_RAM_COUNT=16 349CONFIG_BLK_DEV_RAM_COUNT=16
336CONFIG_BLK_DEV_RAM_SIZE=4096 350CONFIG_BLK_DEV_RAM_SIZE=4096
351CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
337CONFIG_BLK_DEV_INITRD=y 352CONFIG_BLK_DEV_INITRD=y
338# CONFIG_CDROM_PKTCDVD is not set 353# CONFIG_CDROM_PKTCDVD is not set
339 354
@@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m
359CONFIG_MD_RAID0=m 374CONFIG_MD_RAID0=m
360CONFIG_MD_RAID1=m 375CONFIG_MD_RAID1=m
361# CONFIG_MD_RAID10 is not set 376# CONFIG_MD_RAID10 is not set
362CONFIG_MD_RAID5=m 377# CONFIG_MD_RAID456 is not set
363# CONFIG_MD_RAID5_RESHAPE is not set
364# CONFIG_MD_RAID6 is not set
365CONFIG_MD_MULTIPATH=m 378CONFIG_MD_MULTIPATH=m
366# CONFIG_MD_FAULTY is not set 379# CONFIG_MD_FAULTY is not set
367CONFIG_BLK_DEV_DM=y 380CONFIG_BLK_DEV_DM=y
@@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m
419# 432#
420# Cryptographic devices 433# Cryptographic devices
421# 434#
422CONFIG_Z90CRYPT=m 435CONFIG_ZCRYPT=m
436# CONFIG_ZCRYPT_MONOLITHIC is not set
423 437
424# 438#
425# Network device support 439# Network device support
@@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y
509# CONFIG_MINIX_FS is not set 523# CONFIG_MINIX_FS is not set
510# CONFIG_ROMFS_FS is not set 524# CONFIG_ROMFS_FS is not set
511CONFIG_INOTIFY=y 525CONFIG_INOTIFY=y
526CONFIG_INOTIFY_USER=y
512# CONFIG_QUOTA is not set 527# CONFIG_QUOTA is not set
513CONFIG_DNOTIFY=y 528CONFIG_DNOTIFY=y
514# CONFIG_AUTOFS_FS is not set 529# CONFIG_AUTOFS_FS is not set
@@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y
614# Instrumentation Support 629# Instrumentation Support
615# 630#
616# CONFIG_PROFILING is not set 631# CONFIG_PROFILING is not set
617# CONFIG_STATISTICS is not set 632CONFIG_STATISTICS=y
633CONFIG_KPROBES=y
618 634
619# 635#
620# Kernel hacking 636# Kernel hacking
621# 637#
638CONFIG_TRACE_IRQFLAGS_SUPPORT=y
622# CONFIG_PRINTK_TIME is not set 639# CONFIG_PRINTK_TIME is not set
623CONFIG_MAGIC_SYSRQ=y 640CONFIG_MAGIC_SYSRQ=y
641# CONFIG_UNUSED_SYMBOLS is not set
624CONFIG_DEBUG_KERNEL=y 642CONFIG_DEBUG_KERNEL=y
625CONFIG_LOG_BUF_SHIFT=17 643CONFIG_LOG_BUF_SHIFT=17
626# CONFIG_DETECT_SOFTLOCKUP is not set 644# CONFIG_DETECT_SOFTLOCKUP is not set
627# CONFIG_SCHEDSTATS is not set 645# CONFIG_SCHEDSTATS is not set
628# CONFIG_DEBUG_SLAB is not set 646# CONFIG_DEBUG_SLAB is not set
629CONFIG_DEBUG_PREEMPT=y 647CONFIG_DEBUG_PREEMPT=y
630CONFIG_DEBUG_MUTEXES=y 648# CONFIG_DEBUG_RT_MUTEXES is not set
649# CONFIG_RT_MUTEX_TESTER is not set
631CONFIG_DEBUG_SPINLOCK=y 650CONFIG_DEBUG_SPINLOCK=y
651CONFIG_DEBUG_MUTEXES=y
652# CONFIG_DEBUG_RWSEMS is not set
653# CONFIG_DEBUG_LOCK_ALLOC is not set
654# CONFIG_PROVE_LOCKING is not set
632CONFIG_DEBUG_SPINLOCK_SLEEP=y 655CONFIG_DEBUG_SPINLOCK_SLEEP=y
656# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
633# CONFIG_DEBUG_KOBJECT is not set 657# CONFIG_DEBUG_KOBJECT is not set
634# CONFIG_DEBUG_INFO is not set 658# CONFIG_DEBUG_INFO is not set
635CONFIG_DEBUG_FS=y 659CONFIG_DEBUG_FS=y
636# CONFIG_DEBUG_VM is not set 660# CONFIG_DEBUG_VM is not set
661# CONFIG_FRAME_POINTER is not set
637# CONFIG_UNWIND_INFO is not set 662# CONFIG_UNWIND_INFO is not set
638CONFIG_FORCED_INLINING=y 663CONFIG_FORCED_INLINING=y
639# CONFIG_RCU_TORTURE_TEST is not set 664# CONFIG_RCU_TORTURE_TEST is not set
@@ -688,3 +713,4 @@ CONFIG_CRYPTO=y
688# CONFIG_CRC16 is not set 713# CONFIG_CRC16 is not set
689CONFIG_CRC32=m 714CONFIG_CRC32=m
690# CONFIG_LIBCRC32C is not set 715# CONFIG_LIBCRC32C is not set
716CONFIG_PLIST=y
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 04eb1eab6e3e..845081b01267 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void)
225 return (data1 == data2); /* Was the write blocked? */ 225 return (data1 == data2); /* Was the write blocked? */
226} 226}
227 227
228static void __init mostek_set_system_time(void)
229{
230 unsigned int year, mon, day, hour, min, sec;
231 struct mostek48t02 *mregs;
232
233 mregs = (struct mostek48t02 *)mstk48t02_regs;
234 if(!mregs) {
235 prom_printf("Something wrong, clock regs not mapped yet.\n");
236 prom_halt();
237 }
238 spin_lock_irq(&mostek_lock);
239 mregs->creg |= MSTK_CREG_READ;
240 sec = MSTK_REG_SEC(mregs);
241 min = MSTK_REG_MIN(mregs);
242 hour = MSTK_REG_HOUR(mregs);
243 day = MSTK_REG_DOM(mregs);
244 mon = MSTK_REG_MONTH(mregs);
245 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
246 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
247 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
248 set_normalized_timespec(&wall_to_monotonic,
249 -xtime.tv_sec, -xtime.tv_nsec);
250 mregs->creg &= ~MSTK_CREG_READ;
251 spin_unlock_irq(&mostek_lock);
252}
253
228/* Probe for the real time clock chip on Sun4 */ 254/* Probe for the real time clock chip on Sun4 */
229static __inline__ void sun4_clock_probe(void) 255static __inline__ void sun4_clock_probe(void)
230{ 256{
@@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void)
273#endif 299#endif
274} 300}
275 301
302#ifndef CONFIG_SUN4
276static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 303static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
277{ 304{
278 struct device_node *dp = op->node; 305 struct device_node *dp = op->node;
@@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
307 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) 334 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
308 kick_start_clock(); 335 kick_start_clock();
309 336
337 mostek_set_system_time();
338
310 return 0; 339 return 0;
311} 340}
312 341
@@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = {
325 354
326 355
327/* Probe for the mostek real time clock chip. */ 356/* Probe for the mostek real time clock chip. */
328static void clock_init(void) 357static int __init clock_init(void)
329{ 358{
330 of_register_driver(&clock_driver, &of_bus_type); 359 return of_register_driver(&clock_driver, &of_bus_type);
331} 360}
332 361
362/* Must be after subsys_initcall() so that busses are probed. Must
363 * be before device_initcall() because things like the RTC driver
364 * need to see the clock registers.
365 */
366fs_initcall(clock_init);
367#endif /* !CONFIG_SUN4 */
368
333void __init sbus_time_init(void) 369void __init sbus_time_init(void)
334{ 370{
335 unsigned int year, mon, day, hour, min, sec;
336 struct mostek48t02 *mregs;
337
338#ifdef CONFIG_SUN4
339 int temp;
340 struct intersil *iregs;
341#endif
342 371
343 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); 372 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
344 btfixup(); 373 btfixup();
345 374
346 if (ARCH_SUN4) 375 if (ARCH_SUN4)
347 sun4_clock_probe(); 376 sun4_clock_probe();
348 else
349 clock_init();
350 377
351 sparc_init_timers(timer_interrupt); 378 sparc_init_timers(timer_interrupt);
352 379
353#ifdef CONFIG_SUN4 380#ifdef CONFIG_SUN4
354 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { 381 if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
355#endif 382 mostek_set_system_time();
356 mregs = (struct mostek48t02 *)mstk48t02_regs;
357 if(!mregs) {
358 prom_printf("Something wrong, clock regs not mapped yet.\n");
359 prom_halt();
360 }
361 spin_lock_irq(&mostek_lock);
362 mregs->creg |= MSTK_CREG_READ;
363 sec = MSTK_REG_SEC(mregs);
364 min = MSTK_REG_MIN(mregs);
365 hour = MSTK_REG_HOUR(mregs);
366 day = MSTK_REG_DOM(mregs);
367 mon = MSTK_REG_MONTH(mregs);
368 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
369 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
370 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
371 set_normalized_timespec(&wall_to_monotonic,
372 -xtime.tv_sec, -xtime.tv_nsec);
373 mregs->creg &= ~MSTK_CREG_READ;
374 spin_unlock_irq(&mostek_lock);
375#ifdef CONFIG_SUN4
376 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { 383 } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
377 /* initialise the intersil on sun4 */ 384 /* initialise the intersil on sun4 */
385 unsigned int year, mon, day, hour, min, sec;
386 int temp;
387 struct intersil *iregs;
378 388
379 iregs=intersil_clock; 389 iregs=intersil_clock;
380 if(!iregs) { 390 if(!iregs) {
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 1605967cce91..55ae802dc0ad 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/kprobes.h> 21#include <linux/kprobes.h>
22#include <linux/kallsyms.h>
22 23
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
@@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
132 133
133 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 134 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
134 regs->tpc); 135 regs->tpc);
136 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
137 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
135 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); 138 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
136 __asm__("mov %%sp, %0" : "=r" (ksp)); 139 __asm__("mov %%sp, %0" : "=r" (ksp));
137 show_stack(current, ksp); 140 show_stack(current, ksp);
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 2517ecb8bf27..68ed24df5c8f 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -26,6 +26,7 @@ SECTIONS
26 26
27 /* Read-only sections, merged into text segment: */ 27 /* Read-only sections, merged into text segment: */
28 .hash : { *(.hash) } 28 .hash : { *(.hash) }
29 .gnu.hash : { *(.gnu.hash) }
29 .dynsym : { *(.dynsym) } 30 .dynsym : { *(.dynsym) }
30 .dynstr : { *(.dynstr) } 31 .dynstr : { *(.dynstr) }
31 .gnu.version : { *(.gnu.version) } 32 .gnu.version : { *(.gnu.version) }
diff --git a/arch/v850/kernel/setup.c b/arch/v850/kernel/setup.c
index 62bdb8d29fc0..1bf672a25692 100644
--- a/arch/v850/kernel/setup.c
+++ b/arch/v850/kernel/setup.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/v850/kernel/setup.c -- Arch-dependent initialization functions 2 * arch/v850/kernel/setup.c -- Arch-dependent initialization functions
3 * 3 *
4 * Copyright (C) 2001,02,03,05 NEC Electronics Corporation 4 * Copyright (C) 2001,02,03,05,06 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03,05 Miles Bader <miles@gnu.org> 5 * Copyright (C) 2001,02,03,05,06 Miles Bader <miles@gnu.org>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
@@ -190,7 +190,7 @@ void free_initmem (void)
190 for (addr = start; addr < end; addr += PAGE_SIZE) { 190 for (addr = start; addr < end; addr += PAGE_SIZE) {
191 struct page *page = virt_to_page (addr); 191 struct page *page = virt_to_page (addr);
192 ClearPageReserved (page); 192 ClearPageReserved (page);
193 set_page_count (page, 1); 193 init_page_count (page);
194 __free_page (page); 194 __free_page (page);
195 total_ram_pages++; 195 total_ram_pages++;
196 } 196 }
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index c03ad6ed61cc..67bc48e57c60 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -21,9 +21,6 @@ EXPORT_SYMBOL (trap_table);
21 21
22/* platform dependent support */ 22/* platform dependent support */
23EXPORT_SYMBOL (kernel_thread); 23EXPORT_SYMBOL (kernel_thread);
24EXPORT_SYMBOL (enable_irq);
25EXPORT_SYMBOL (disable_irq);
26EXPORT_SYMBOL (disable_irq_nosync);
27EXPORT_SYMBOL (__bug); 24EXPORT_SYMBOL (__bug);
28 25
29/* Networking helper routines. */ 26/* Networking helper routines. */
@@ -33,22 +30,9 @@ EXPORT_SYMBOL (ip_compute_csum);
33EXPORT_SYMBOL (ip_fast_csum); 30EXPORT_SYMBOL (ip_fast_csum);
34 31
35/* string / mem functions */ 32/* string / mem functions */
36EXPORT_SYMBOL (strcpy);
37EXPORT_SYMBOL (strncpy);
38EXPORT_SYMBOL (strcat);
39EXPORT_SYMBOL (strncat);
40EXPORT_SYMBOL (strcmp);
41EXPORT_SYMBOL (strncmp);
42EXPORT_SYMBOL (strchr);
43EXPORT_SYMBOL (strlen);
44EXPORT_SYMBOL (strnlen);
45EXPORT_SYMBOL (strrchr);
46EXPORT_SYMBOL (strstr);
47EXPORT_SYMBOL (memset); 33EXPORT_SYMBOL (memset);
48EXPORT_SYMBOL (memcpy); 34EXPORT_SYMBOL (memcpy);
49EXPORT_SYMBOL (memmove); 35EXPORT_SYMBOL (memmove);
50EXPORT_SYMBOL (memcmp);
51EXPORT_SYMBOL (memscan);
52 36
53/* semaphores */ 37/* semaphores */
54EXPORT_SYMBOL (__down); 38EXPORT_SYMBOL (__down);
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 83d389b8ebd8..840d5d93d5cc 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-git22 3# Linux kernel version: 2.6.18-rc2
4# Tue Jul 4 14:24:40 2006 4# Tue Jul 18 17:13:20 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -37,6 +37,7 @@ CONFIG_SWAP=y
37CONFIG_SYSVIPC=y 37CONFIG_SYSVIPC=y
38CONFIG_POSIX_MQUEUE=y 38CONFIG_POSIX_MQUEUE=y
39# CONFIG_BSD_PROCESS_ACCT is not set 39# CONFIG_BSD_PROCESS_ACCT is not set
40# CONFIG_TASKSTATS is not set
40CONFIG_SYSCTL=y 41CONFIG_SYSCTL=y
41# CONFIG_AUDIT is not set 42# CONFIG_AUDIT is not set
42CONFIG_IKCONFIG=y 43CONFIG_IKCONFIG=y
@@ -413,6 +414,7 @@ CONFIG_BLK_DEV_LOOP=y
413CONFIG_BLK_DEV_RAM=y 414CONFIG_BLK_DEV_RAM=y
414CONFIG_BLK_DEV_RAM_COUNT=16 415CONFIG_BLK_DEV_RAM_COUNT=16
415CONFIG_BLK_DEV_RAM_SIZE=4096 416CONFIG_BLK_DEV_RAM_SIZE=4096
417CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
416CONFIG_BLK_DEV_INITRD=y 418CONFIG_BLK_DEV_INITRD=y
417# CONFIG_CDROM_PKTCDVD is not set 419# CONFIG_CDROM_PKTCDVD is not set
418# CONFIG_ATA_OVER_ETH is not set 420# CONFIG_ATA_OVER_ETH is not set
@@ -1195,7 +1197,7 @@ CONFIG_USB_MON=y
1195# CONFIG_USB_LEGOTOWER is not set 1197# CONFIG_USB_LEGOTOWER is not set
1196# CONFIG_USB_LCD is not set 1198# CONFIG_USB_LCD is not set
1197# CONFIG_USB_LED is not set 1199# CONFIG_USB_LED is not set
1198# CONFIG_USB_CY7C63 is not set 1200# CONFIG_USB_CYPRESS_CY7C63 is not set
1199# CONFIG_USB_CYTHERM is not set 1201# CONFIG_USB_CYTHERM is not set
1200# CONFIG_USB_PHIDGETKIT is not set 1202# CONFIG_USB_PHIDGETKIT is not set
1201# CONFIG_USB_PHIDGETSERVO is not set 1203# CONFIG_USB_PHIDGETSERVO is not set
@@ -1373,7 +1375,6 @@ CONFIG_SUNRPC=y
1373# CONFIG_RPCSEC_GSS_SPKM3 is not set 1375# CONFIG_RPCSEC_GSS_SPKM3 is not set
1374# CONFIG_SMB_FS is not set 1376# CONFIG_SMB_FS is not set
1375# CONFIG_CIFS is not set 1377# CONFIG_CIFS is not set
1376# CONFIG_CIFS_DEBUG2 is not set
1377# CONFIG_NCP_FS is not set 1378# CONFIG_NCP_FS is not set
1378# CONFIG_CODA_FS is not set 1379# CONFIG_CODA_FS is not set
1379# CONFIG_AFS_FS is not set 1380# CONFIG_AFS_FS is not set
diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
index 62bc5f56da9e..cdae36435e21 100644
--- a/arch/x86_64/ia32/Makefile
+++ b/arch/x86_64/ia32/Makefile
@@ -23,6 +23,7 @@ targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
23# The DSO images are built using a special linker script 23# The DSO images are built using a special linker script
24quiet_cmd_syscall = SYSCALL $@ 24quiet_cmd_syscall = SYSCALL $@
25 cmd_syscall = $(CC) -m32 -nostdlib -shared -s \ 25 cmd_syscall = $(CC) -m32 -nostdlib -shared -s \
26 $(call ld-option, -Wl$(comma)--hash-style=sysv) \
26 -Wl,-soname=linux-gate.so.1 -o $@ \ 27 -Wl,-soname=linux-gate.so.1 -o $@ \
27 -Wl,-T,$(filter-out FORCE,$^) 28 -Wl,-T,$(filter-out FORCE,$^)
28 29
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 9b5bb413a6e9..5d4a7d125ed0 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target)
103 pushq %rax 103 pushq %rax
104 CFI_ADJUST_CFA_OFFSET 8 104 CFI_ADJUST_CFA_OFFSET 8
105 cld 105 cld
106 SAVE_ARGS 0,0,1 106 SAVE_ARGS 0,0,0
107 /* no need to do an access_ok check here because rbp has been 107 /* no need to do an access_ok check here because rbp has been
108 32bit zero extended */ 108 32bit zero extended */
1091: movl (%rbp),%r9d 1091: movl (%rbp),%r9d
diff --git a/arch/x86_64/ia32/vsyscall.lds b/arch/x86_64/ia32/vsyscall.lds
index f2e75ed4c6c7..1dc86ff5bcb9 100644
--- a/arch/x86_64/ia32/vsyscall.lds
+++ b/arch/x86_64/ia32/vsyscall.lds
@@ -11,6 +11,7 @@ SECTIONS
11 . = VSYSCALL_BASE + SIZEOF_HEADERS; 11 . = VSYSCALL_BASE + SIZEOF_HEADERS;
12 12
13 .hash : { *(.hash) } :text 13 .hash : { *(.hash) } :text
14 .gnu.hash : { *(.gnu.hash) }
14 .dynsym : { *(.dynsym) } 15 .dynsym : { *(.dynsym) }
15 .dynstr : { *(.dynstr) } 16 .dynstr : { *(.dynstr) }
16 .gnu.version : { *(.gnu.version) } 17 .gnu.version : { *(.gnu.version) }
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 83fb24a02821..106076b370fc 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -207,14 +207,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
207 __flush_tlb(); 207 __flush_tlb();
208 208
209 209
210 /* The segment registers are funny things, they are 210 /* The segment registers are funny things, they have both a
211 * automatically loaded from a table, in memory wherever you 211 * visible and an invisible part. Whenever the visible part is
212 * set them to a specific selector, but this table is never 212 * set to a specific selector, the invisible part is loaded
213 * accessed again unless you set the segment to a different selector. 213 * with from a table in memory. At no other time is the
214 * 214 * descriptor table in memory accessed.
215 * The more common model are caches where the behide
216 * the scenes work is done, but is also dropped at arbitrary
217 * times.
218 * 215 *
219 * I take advantage of this here by force loading the 216 * I take advantage of this here by force loading the
220 * segments, before I zap the gdt with an invalid value. 217 * segments, before I zap the gdt with an invalid value.
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 88845674c661..4e017fb30fb3 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -615,7 +615,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
615} 615}
616 616
617#ifdef CONFIG_HOTPLUG_CPU 617#ifdef CONFIG_HOTPLUG_CPU
618static __cpuinit void mce_remove_device(unsigned int cpu) 618static void mce_remove_device(unsigned int cpu)
619{ 619{
620 int i; 620 int i;
621 621
@@ -626,10 +626,9 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
626 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval); 626 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
627 sysdev_unregister(&per_cpu(device_mce,cpu)); 627 sysdev_unregister(&per_cpu(device_mce,cpu));
628} 628}
629#endif
630 629
631/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 630/* Get notified when a cpu comes on/off. Be hotplug friendly. */
632static __cpuinit int 631static int
633mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 632mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
634{ 633{
635 unsigned int cpu = (unsigned long)hcpu; 634 unsigned int cpu = (unsigned long)hcpu;
@@ -638,18 +637,17 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
638 case CPU_ONLINE: 637 case CPU_ONLINE:
639 mce_create_device(cpu); 638 mce_create_device(cpu);
640 break; 639 break;
641#ifdef CONFIG_HOTPLUG_CPU
642 case CPU_DEAD: 640 case CPU_DEAD:
643 mce_remove_device(cpu); 641 mce_remove_device(cpu);
644 break; 642 break;
645#endif
646 } 643 }
647 return NOTIFY_OK; 644 return NOTIFY_OK;
648} 645}
649 646
650static struct notifier_block __cpuinitdata mce_cpu_notifier = { 647static struct notifier_block mce_cpu_notifier = {
651 .notifier_call = mce_cpu_callback, 648 .notifier_call = mce_cpu_callback,
652}; 649};
650#endif
653 651
654static __init int mce_init_device(void) 652static __init int mce_init_device(void)
655{ 653{
@@ -664,7 +662,7 @@ static __init int mce_init_device(void)
664 mce_create_device(i); 662 mce_create_device(i);
665 } 663 }
666 664
667 register_cpu_notifier(&mce_cpu_notifier); 665 register_hotcpu_notifier(&mce_cpu_notifier);
668 misc_register(&mce_log_device); 666 misc_register(&mce_log_device);
669 return err; 667 return err;
670} 668}
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index db2acbf7ad28..883fe747f64c 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -558,7 +558,7 @@ out:
558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it. 558 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
559 */ 559 */
560 560
561static __cpuinit void deallocate_threshold_block(unsigned int cpu, 561static void deallocate_threshold_block(unsigned int cpu,
562 unsigned int bank) 562 unsigned int bank)
563{ 563{
564 struct threshold_block *pos = NULL; 564 struct threshold_block *pos = NULL;
@@ -578,7 +578,7 @@ static __cpuinit void deallocate_threshold_block(unsigned int cpu,
578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 578 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
579} 579}
580 580
581static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank) 581static void threshold_remove_bank(unsigned int cpu, int bank)
582{ 582{
583 int i = 0; 583 int i = 0;
584 struct threshold_bank *b; 584 struct threshold_bank *b;
@@ -618,7 +618,7 @@ free_out:
618 per_cpu(threshold_banks, cpu)[bank] = NULL; 618 per_cpu(threshold_banks, cpu)[bank] = NULL;
619} 619}
620 620
621static __cpuinit void threshold_remove_device(unsigned int cpu) 621static void threshold_remove_device(unsigned int cpu)
622{ 622{
623 unsigned int bank; 623 unsigned int bank;
624 624
@@ -629,14 +629,8 @@ static __cpuinit void threshold_remove_device(unsigned int cpu)
629 } 629 }
630} 630}
631 631
632#else /* !CONFIG_HOTPLUG_CPU */
633static void threshold_remove_device(unsigned int cpu)
634{
635}
636#endif
637
638/* get notified when a cpu comes on/off */ 632/* get notified when a cpu comes on/off */
639static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 633static int threshold_cpu_callback(struct notifier_block *nfb,
640 unsigned long action, void *hcpu) 634 unsigned long action, void *hcpu)
641{ 635{
642 /* cpu was unsigned int to begin with */ 636 /* cpu was unsigned int to begin with */
@@ -659,9 +653,10 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
659 return NOTIFY_OK; 653 return NOTIFY_OK;
660} 654}
661 655
662static struct notifier_block threshold_cpu_notifier __cpuinitdata = { 656static struct notifier_block threshold_cpu_notifier = {
663 .notifier_call = threshold_cpu_callback, 657 .notifier_call = threshold_cpu_callback,
664}; 658};
659#endif /* CONFIG_HOTPLUG_CPU */
665 660
666static __init int threshold_init_device(void) 661static __init int threshold_init_device(void)
667{ 662{
@@ -673,7 +668,7 @@ static __init int threshold_init_device(void)
673 if (err) 668 if (err)
674 return err; 669 return err;
675 } 670 }
676 register_cpu_notifier(&threshold_cpu_notifier); 671 register_hotcpu_notifier(&threshold_cpu_notifier);
677 return 0; 672 return 0;
678} 673}
679 674
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index e71ed53b08fb..146924ba5df5 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -85,7 +85,8 @@
85#define CSR_AGENT_MASK 0xffe0ffff 85#define CSR_AGENT_MASK 0xffe0ffff
86 86
87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */ 87#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
88#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */ 88#define MAX_NUM_CHASSIS 8 /* max number of chassis */
89#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */
89#define PHBS_PER_CALGARY 4 90#define PHBS_PER_CALGARY 4
90 91
91/* register offsets in Calgary's internal register space */ 92/* register offsets in Calgary's internal register space */
@@ -110,7 +111,8 @@ static const unsigned long phb_offsets[] = {
110 0xB000 /* PHB3 */ 111 0xB000 /* PHB3 */
111}; 112};
112 113
113void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES]; 114static char bus_to_phb[MAX_PHB_BUS_NUM];
115void* tce_table_kva[MAX_PHB_BUS_NUM];
114unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; 116unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
115static int translate_empty_slots __read_mostly = 0; 117static int translate_empty_slots __read_mostly = 0;
116static int calgary_detected __read_mostly = 0; 118static int calgary_detected __read_mostly = 0;
@@ -119,7 +121,7 @@ static int calgary_detected __read_mostly = 0;
119 * the bitmap of PHBs the user requested that we disable 121 * the bitmap of PHBs the user requested that we disable
120 * translation on. 122 * translation on.
121 */ 123 */
122static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM); 124static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM);
123 125
124static void tce_cache_blast(struct iommu_table *tbl); 126static void tce_cache_blast(struct iommu_table *tbl);
125 127
@@ -452,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = {
452 454
453static inline int busno_to_phbid(unsigned char num) 455static inline int busno_to_phbid(unsigned char num)
454{ 456{
455 return bus_to_phb(num) % PHBS_PER_CALGARY; 457 return bus_to_phb[num];
456} 458}
457 459
458static inline unsigned long split_queue_offset(unsigned char num) 460static inline unsigned long split_queue_offset(unsigned char num)
@@ -812,7 +814,7 @@ static int __init calgary_init(void)
812 int i, ret = -ENODEV; 814 int i, ret = -ENODEV;
813 struct pci_dev *dev = NULL; 815 struct pci_dev *dev = NULL;
814 816
815 for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) { 817 for (i = 0; i < MAX_PHB_BUS_NUM; i++) {
816 dev = pci_get_device(PCI_VENDOR_ID_IBM, 818 dev = pci_get_device(PCI_VENDOR_ID_IBM,
817 PCI_DEVICE_ID_IBM_CALGARY, 819 PCI_DEVICE_ID_IBM_CALGARY,
818 dev); 820 dev);
@@ -822,7 +824,7 @@ static int __init calgary_init(void)
822 calgary_init_one_nontraslated(dev); 824 calgary_init_one_nontraslated(dev);
823 continue; 825 continue;
824 } 826 }
825 if (!tce_table_kva[i] && !translate_empty_slots) { 827 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) {
826 pci_dev_put(dev); 828 pci_dev_put(dev);
827 continue; 829 continue;
828 } 830 }
@@ -842,7 +844,7 @@ error:
842 pci_dev_put(dev); 844 pci_dev_put(dev);
843 continue; 845 continue;
844 } 846 }
845 if (!tce_table_kva[i] && !translate_empty_slots) 847 if (!tce_table_kva[dev->bus->number] && !translate_empty_slots)
846 continue; 848 continue;
847 calgary_disable_translation(dev); 849 calgary_disable_translation(dev);
848 calgary_free_tar(dev); 850 calgary_free_tar(dev);
@@ -876,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram)
876void __init detect_calgary(void) 878void __init detect_calgary(void)
877{ 879{
878 u32 val; 880 u32 val;
879 int bus, table_idx; 881 int bus;
880 void *tbl; 882 void *tbl;
881 int detected = 0; 883 int calgary_found = 0;
884 int phb = -1;
882 885
883 /* 886 /*
884 * if the user specified iommu=off or iommu=soft or we found 887 * if the user specified iommu=off or iommu=soft or we found
@@ -889,38 +892,46 @@ void __init detect_calgary(void)
889 892
890 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE); 893 specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
891 894
892 for (bus = 0, table_idx = 0; 895 for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
893 bus <= num_online_nodes() * MAX_PHB_BUS_NUM; 896 int dev;
894 bus++) { 897
895 BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM); 898 tce_table_kva[bus] = NULL;
899 bus_to_phb[bus] = -1;
900
896 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY) 901 if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
897 continue; 902 continue;
903
904 /*
905 * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
906 * it is connected to releative to the clagary chip.
907 */
908 phb = (phb + 1) % PHBS_PER_CALGARY;
909
898 if (test_bit(bus, translation_disabled)) { 910 if (test_bit(bus, translation_disabled)) {
899 printk(KERN_INFO "Calgary: translation is disabled for " 911 printk(KERN_INFO "Calgary: translation is disabled for "
900 "PHB 0x%x\n", bus); 912 "PHB 0x%x\n", bus);
901 /* skip this phb, don't allocate a tbl for it */ 913 /* skip this phb, don't allocate a tbl for it */
902 tce_table_kva[table_idx] = NULL;
903 table_idx++;
904 continue; 914 continue;
905 } 915 }
906 /* 916 /*
907 * scan the first slot of the PCI bus to see if there 917 * Scan the slots of the PCI bus to see if there is a device present.
908 * are any devices present 918 * The parent bus will be the zero-ith device, so start at 1.
909 */ 919 */
910 val = read_pci_config(bus, 1, 0, 0); 920 for (dev = 1; dev < 8; dev++) {
911 if (val != 0xffffffff || translate_empty_slots) { 921 val = read_pci_config(bus, dev, 0, 0);
912 tbl = alloc_tce_table(); 922 if (val != 0xffffffff || translate_empty_slots) {
913 if (!tbl) 923 tbl = alloc_tce_table();
914 goto cleanup; 924 if (!tbl)
915 detected = 1; 925 goto cleanup;
916 } else 926 tce_table_kva[bus] = tbl;
917 tbl = NULL; 927 bus_to_phb[bus] = phb;
918 928 calgary_found = 1;
919 tce_table_kva[table_idx] = tbl; 929 break;
920 table_idx++; 930 }
931 }
921 } 932 }
922 933
923 if (detected) { 934 if (calgary_found) {
924 iommu_detected = 1; 935 iommu_detected = 1;
925 calgary_detected = 1; 936 calgary_detected = 1;
926 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. " 937 printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
@@ -929,9 +940,9 @@ void __init detect_calgary(void)
929 return; 940 return;
930 941
931cleanup: 942cleanup:
932 for (--table_idx; table_idx >= 0; --table_idx) 943 for (--bus; bus >= 0; --bus)
933 if (tce_table_kva[table_idx]) 944 if (tce_table_kva[bus])
934 free_tce_table(tce_table_kva[table_idx]); 945 free_tce_table(tce_table_kva[bus]);
935} 946}
936 947
937int __init calgary_iommu_init(void) 948int __init calgary_iommu_init(void)
@@ -1002,7 +1013,7 @@ static int __init calgary_parse_options(char *p)
1002 if (p == endp) 1013 if (p == endp)
1003 break; 1014 break;
1004 1015
1005 if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) { 1016 if (bridge < MAX_PHB_BUS_NUM) {
1006 printk(KERN_INFO "Calgary: disabling " 1017 printk(KERN_INFO "Calgary: disabling "
1007 "translation for PHB 0x%x\n", bridge); 1018 "translation for PHB 0x%x\n", bridge);
1008 set_bit(bridge, translation_disabled); 1019 set_bit(bridge, translation_disabled);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index ebdb77fe2057..6a55f87ba97f 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
31void pci_swiotlb_init(void) 31void pci_swiotlb_init(void)
32{ 32{
33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 33 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
34 if (!iommu_detected && !no_iommu && 34 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 35 swiotlb = 1;
36 if (swiotlb_force)
37 swiotlb = 1;
37 if (swiotlb) { 38 if (swiotlb) {
38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init(); 40 swiotlb_init();
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index d3a9e79e954c..5530dda3f27a 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -96,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size)
96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) 96static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
97{ 97{
98 unsigned int bitmapsz; 98 unsigned int bitmapsz;
99 unsigned int tce_table_index;
100 unsigned long bmppages; 99 unsigned long bmppages;
101 int ret; 100 int ret;
102 101
@@ -105,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
105 /* set the tce table size - measured in entries */ 104 /* set the tce table size - measured in entries */
106 tbl->it_size = table_size_to_number_of_entries(specified_table_size); 105 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
107 106
108 tce_table_index = bus_to_phb(tbl->it_busno); 107 tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number];
109 tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
110 if (!tbl->it_base) { 108 if (!tbl->it_base) {
111 printk(KERN_ERR "Calgary: iommu_table_setparms: " 109 printk(KERN_ERR "Calgary: iommu_table_setparms: "
112 "no table allocated?!\n"); 110 "no table allocated?!\n");
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index b9ff75992c16..7a9b18224182 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -28,6 +28,7 @@
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#ifdef CONFIG_ACPI 29#ifdef CONFIG_ACPI
30#include <acpi/achware.h> /* for PM timer frequency */ 30#include <acpi/achware.h> /* for PM timer frequency */
31#include <acpi/acpi_bus.h>
31#endif 32#endif
32#include <asm/8253pit.h> 33#include <asm/8253pit.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs)
193 is just accounted to the spinlock function. 194 is just accounted to the spinlock function.
194 Better would be to write these functions in assembler again 195 Better would be to write these functions in assembler again
195 and check exactly. */ 196 and check exactly. */
196 if (in_lock_functions(pc)) { 197 if (!user_mode(regs) && in_lock_functions(pc)) {
197 char *v = *(char **)regs->rsp; 198 char *v = *(char **)regs->rsp;
198 if ((v >= _stext && v <= _etext) || 199 if ((v >= _stext && v <= _etext) ||
199 (v >= _sinittext && v <= _einittext) || 200 (v >= _sinittext && v <= _einittext) ||
@@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void)
953#ifdef CONFIG_SMP 954#ifdef CONFIG_SMP
954 if (apic_is_clustered_box()) 955 if (apic_is_clustered_box())
955 return 1; 956 return 1;
956 /* Intel systems are normally all synchronized. Exceptions
957 are handled in the check above. */
958 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
959 return 0;
960#endif 957#endif
958 /* Most intel systems have synchronized TSCs except for
959 multi node systems */
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
961#ifdef CONFIG_ACPI
962 /* But TSC doesn't tick in C3 so don't use it there */
963 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
964 return 1;
965#endif
966 return 0;
967 }
968
961 /* Assume multi socket systems are not synchronized */ 969 /* Assume multi socket systems are not synchronized */
962 return num_present_cpus() > 1; 970 return num_present_cpus() > 1;
963} 971}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index eb39a2775236..4e9938dee060 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
254{ 254{
255 const unsigned cpu = safe_smp_processor_id(); 255 const unsigned cpu = safe_smp_processor_id();
256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 256 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
257 int i = 11;
258 unsigned used = 0; 257 unsigned used = 0;
259 258
260 printk("\nCall Trace:\n"); 259 printk("\nCall Trace:\n");
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
275 if (unwind_init_blocked(&info, tsk) == 0) 274 if (unwind_init_blocked(&info, tsk) == 0)
276 unw_ret = show_trace_unwind(&info, NULL); 275 unw_ret = show_trace_unwind(&info, NULL);
277 } 276 }
278 if (unw_ret > 0) { 277 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
279 if (call_trace > 0) 278#ifdef CONFIG_STACK_UNWIND
279 unsigned long rip = info.regs.rip;
280 print_symbol("DWARF2 unwinder stuck at %s\n", rip);
281 if (call_trace == 1) {
282 printk("Leftover inexact backtrace:\n");
283 stack = (unsigned long *)info.regs.rsp;
284 } else if (call_trace > 1)
280 return; 285 return;
281 printk("Legacy call trace:"); 286 else
282 i = 18; 287 printk("Full inexact backtrace again:\n");
288#else
289 printk("Inexact backtrace:\n");
290#endif
283 } 291 }
284 } 292 }
285 293
@@ -521,7 +529,7 @@ void __kprobes oops_end(unsigned long flags)
521 /* Nest count reaches zero, release the lock. */ 529 /* Nest count reaches zero, release the lock. */
522 spin_unlock_irqrestore(&die_lock, flags); 530 spin_unlock_irqrestore(&die_lock, flags);
523 if (panic_on_oops) 531 if (panic_on_oops)
524 panic("Oops"); 532 panic("Fatal exception: panic_on_oops");
525} 533}
526 534
527void __kprobes __die(const char * str, struct pt_regs * regs, long err) 535void __kprobes __die(const char * str, struct pt_regs * regs, long err)
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s)
1118 call_trace = -1; 1126 call_trace = -1;
1119 else if (strcmp(s, "both") == 0) 1127 else if (strcmp(s, "both") == 0)
1120 call_trace = 0; 1128 call_trace = 0;
1121 else if (strcmp(s, "new") == 0) 1129 else if (strcmp(s, "newfallback") == 0)
1122 call_trace = 1; 1130 call_trace = 1;
1131 else if (strcmp(s, "new") == 0)
1132 call_trace = 2;
1123 return 1; 1133 return 1;
1124} 1134}
1125__setup("call_trace=", call_trace_setup); 1135__setup("call_trace=", call_trace_setup);
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index b50a7c7c47f8..3acf60ded2a0 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -2,7 +2,6 @@
2#include <linux/pci.h> 2#include <linux/pci.h>
3#include <asm/mpspec.h> 3#include <asm/mpspec.h>
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/k8.h>
6 5
7/* 6/*
8 * This discovers the pcibus <-> node mapping on AMD K8. 7 * This discovers the pcibus <-> node mapping on AMD K8.
@@ -19,6 +18,7 @@
19#define NR_LDT_BUS_NUMBER_REGISTERS 3 18#define NR_LDT_BUS_NUMBER_REGISTERS 3
20#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) 19#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
21#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) 20#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
21#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
22 22
23/** 23/**
24 * fill_mp_bus_to_cpumask() 24 * fill_mp_bus_to_cpumask()
@@ -28,7 +28,8 @@
28__init static int 28__init static int
29fill_mp_bus_to_cpumask(void) 29fill_mp_bus_to_cpumask(void)
30{ 30{
31 int i, j, k; 31 struct pci_dev *nb_dev = NULL;
32 int i, j;
32 u32 ldtbus, nid; 33 u32 ldtbus, nid;
33 static int lbnr[3] = { 34 static int lbnr[3] = {
34 LDT_BUS_NUMBER_REGISTER_0, 35 LDT_BUS_NUMBER_REGISTER_0,
@@ -36,9 +37,8 @@ fill_mp_bus_to_cpumask(void)
36 LDT_BUS_NUMBER_REGISTER_2 37 LDT_BUS_NUMBER_REGISTER_2
37 }; 38 };
38 39
39 cache_k8_northbridges(); 40 while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
40 for (k = 0; k < num_k8_northbridges; k++) { 41 PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
41 struct pci_dev *nb_dev = k8_northbridges[k];
42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); 42 pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
43 43
44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { 44 for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 27e409089a7b..9734960a2451 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -487,11 +487,9 @@ void die(const char * str, struct pt_regs * regs, long err)
487 if (in_interrupt()) 487 if (in_interrupt())
488 panic("Fatal exception in interrupt"); 488 panic("Fatal exception in interrupt");
489 489
490 if (panic_on_oops) { 490 if (panic_on_oops)
491 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 491 panic("Fatal exception: panic_on_oops");
492 ssleep(5); 492
493 panic("Fatal exception");
494 }
495 do_exit(err); 493 do_exit(err);
496} 494}
497 495
diff --git a/block/blktrace.c b/block/blktrace.c
index b8c0702777ff..265f7a830619 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
80#define trace_sync_bit(rw) \ 80#define trace_sync_bit(rw) \
81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) 81 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
82#define trace_ahead_bit(rw) \ 82#define trace_ahead_bit(rw) \
83 (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) 83 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
84 84
85/* 85/*
86 * The worker for the various blk_add_trace*() types. Fills out a 86 * The worker for the various blk_add_trace*() types. Fills out a
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 102ebc2c5c34..aae3123bf3ee 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
936 * seeks. so allow a little bit of time for him to submit a new rq 936 * seeks. so allow a little bit of time for him to submit a new rq
937 */ 937 */
938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 938 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
939 sl = 2; 939 sl = min(sl, msecs_to_jiffies(2));
940 940
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 return 1; 942 return 1;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index c2d621632383..3ef9d514b916 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -139,7 +139,7 @@ static int __cpuinit topology_sysfs_init(void)
139 (void *)(long)i); 139 (void *)(long)i);
140 } 140 }
141 141
142 register_cpu_notifier(&topology_cpu_notifier); 142 register_hotcpu_notifier(&topology_cpu_notifier);
143 143
144 return 0; 144 return 0;
145} 145}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
1233 } 1233 }
1234} 1234}
1235 1235
1236static void cciss_check_queues(ctlr_info_t *h)
1237{
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278}
1279
1236static void cciss_softirq_done(struct request *rq) 1280static void cciss_softirq_done(struct request *rq)
1237{ 1281{
1238 CommandList_struct *cmd = rq->completion_data; 1282 CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
1264 spin_lock_irqsave(&h->lock, flags); 1308 spin_lock_irqsave(&h->lock, flags);
1265 end_that_request_last(rq, rq->errors); 1309 end_that_request_last(rq, rq->errors);
1266 cmd_free(h, cmd, 1); 1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1267 spin_unlock_irqrestore(&h->lock, flags); 1312 spin_unlock_irqrestore(&h->lock, flags);
1268} 1313}
1269 1314
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2528 CommandList_struct *c; 2573 CommandList_struct *c;
2529 unsigned long flags; 2574 unsigned long flags;
2530 __u32 a, a1, a2; 2575 __u32 a, a1, a2;
2531 int j;
2532 int start_queue = h->next_to_run;
2533 2576
2534 if (interrupt_not_for_us(h)) 2577 if (interrupt_not_for_us(h))
2535 return IRQ_NONE; 2578 return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2588 } 2631 }
2589 } 2632 }
2590 2633
2591 /* check to see if we have maxed out the number of commands that can
2592 * be placed on the queue. If so then exit. We do this check here
2593 * in case the interrupt we serviced was from an ioctl and did not
2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2602 */
2603 for (j = 0; j < h->highest_lun + 1; j++) {
2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2605 /* make sure the disk has been added and the drive is real
2606 * because this can be called from the middle of init_one.
2607 */
2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2609 continue;
2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2611
2612 /* check to see if we have maxed out the number of commands
2613 * that can be placed on the queue.
2614 */
2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2616 if (curr_queue == start_queue) {
2617 h->next_to_run =
2618 (start_queue + 1) % (h->highest_lun + 1);
2619 goto cleanup;
2620 } else {
2621 h->next_to_run = curr_queue;
2622 goto cleanup;
2623 }
2624 } else {
2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2626 }
2627 }
2628
2629 cleanup:
2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2634 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2631 return IRQ_HANDLED; 2635 return IRQ_HANDLED;
2632} 2636}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 0a1b1ea36ddc..bdbade9a5cf5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -300,6 +300,15 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
300 lo->disk->disk_name, result); 300 lo->disk->disk_name, result);
301 goto harderror; 301 goto harderror;
302 } 302 }
303
304 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
305 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
306 lo->disk->disk_name,
307 (unsigned long)ntohl(reply.magic));
308 result = -EPROTO;
309 goto harderror;
310 }
311
303 req = nbd_find_request(lo, reply.handle); 312 req = nbd_find_request(lo, reply.handle);
304 if (unlikely(IS_ERR(req))) { 313 if (unlikely(IS_ERR(req))) {
305 result = PTR_ERR(req); 314 result = PTR_ERR(req);
@@ -312,13 +321,6 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
312 goto harderror; 321 goto harderror;
313 } 322 }
314 323
315 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
316 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
317 lo->disk->disk_name,
318 (unsigned long)ntohl(reply.magic));
319 result = -EPROTO;
320 goto harderror;
321 }
322 if (ntohl(reply.error)) { 324 if (ntohl(reply.error)) {
323 printk(KERN_ERR "%s: Other side returned error (%d)\n", 325 printk(KERN_ERR "%s: Other side returned error (%d)\n",
324 lo->disk->disk_name, ntohl(reply.error)); 326 lo->disk->disk_name, ntohl(reply.error));
@@ -339,7 +341,8 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
339 printk(KERN_ERR "%s: Receive data failed (result %d)\n", 341 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
340 lo->disk->disk_name, 342 lo->disk->disk_name,
341 result); 343 result);
342 goto harderror; 344 req->errors++;
345 return req;
343 } 346 }
344 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 347 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
345 lo->disk->disk_name, req, bvec->bv_len); 348 lo->disk->disk_name, req, bvec->bv_len);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index bde2c64b6346..451b996bba91 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2577,19 +2577,19 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
2577 case PKT_CTRL_CMD_SETUP: 2577 case PKT_CTRL_CMD_SETUP:
2578 if (!capable(CAP_SYS_ADMIN)) 2578 if (!capable(CAP_SYS_ADMIN))
2579 return -EPERM; 2579 return -EPERM;
2580 mutex_lock(&ctl_mutex); 2580 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2581 ret = pkt_setup_dev(&ctrl_cmd); 2581 ret = pkt_setup_dev(&ctrl_cmd);
2582 mutex_unlock(&ctl_mutex); 2582 mutex_unlock(&ctl_mutex);
2583 break; 2583 break;
2584 case PKT_CTRL_CMD_TEARDOWN: 2584 case PKT_CTRL_CMD_TEARDOWN:
2585 if (!capable(CAP_SYS_ADMIN)) 2585 if (!capable(CAP_SYS_ADMIN))
2586 return -EPERM; 2586 return -EPERM;
2587 mutex_lock(&ctl_mutex); 2587 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2588 ret = pkt_remove_dev(&ctrl_cmd); 2588 ret = pkt_remove_dev(&ctrl_cmd);
2589 mutex_unlock(&ctl_mutex); 2589 mutex_unlock(&ctl_mutex);
2590 break; 2590 break;
2591 case PKT_CTRL_CMD_STATUS: 2591 case PKT_CTRL_CMD_STATUS:
2592 mutex_lock(&ctl_mutex); 2592 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2593 pkt_get_status(&ctrl_cmd); 2593 pkt_get_status(&ctrl_cmd);
2594 mutex_unlock(&ctl_mutex); 2594 mutex_unlock(&ctl_mutex);
2595 break; 2595 break;
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index be61f22ee7bb..d37ced0d132b 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -107,10 +107,14 @@ found:
107 if (err) { 107 if (err) {
108 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 108 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
109 err); 109 err);
110 goto out; 110 goto err_unmap;
111 } 111 }
112out: 112out:
113 return err; 113 return err;
114
115err_unmap:
116 iounmap(mem);
117 goto out;
114} 118}
115 119
116static void __exit mod_exit(void) 120static void __exit mod_exit(void)
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 6594bd5645f4..ccd7e7102234 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -164,7 +164,7 @@ static int __init mod_init(void)
164 if (err) { 164 if (err) {
165 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 165 printk(KERN_ERR PFX "RNG registering failed (%d)\n",
166 err); 166 err);
167 goto out; 167 goto err_unmap;
168 } 168 }
169out: 169out:
170 return err; 170 return err;
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 645eb81cb5a9..84e5a68635f1 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -221,7 +221,6 @@ static struct nsc_gpio_ops pc8736x_gpio_ops = {
221 .gpio_change = pc8736x_gpio_change, 221 .gpio_change = pc8736x_gpio_change,
222 .gpio_current = pc8736x_gpio_current 222 .gpio_current = pc8736x_gpio_current
223}; 223};
224EXPORT_SYMBOL(pc8736x_gpio_ops);
225 224
226static int pc8736x_gpio_open(struct inode *inode, struct file *file) 225static int pc8736x_gpio_open(struct inode *inode, struct file *file)
227{ 226{
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 498aa37bca22..3ece69231343 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -51,6 +51,7 @@ void proc_fork_connector(struct task_struct *task)
51 struct cn_msg *msg; 51 struct cn_msg *msg;
52 struct proc_event *ev; 52 struct proc_event *ev;
53 __u8 buffer[CN_PROC_MSG_SIZE]; 53 __u8 buffer[CN_PROC_MSG_SIZE];
54 struct timespec ts;
54 55
55 if (atomic_read(&proc_event_num_listeners) < 1) 56 if (atomic_read(&proc_event_num_listeners) < 1)
56 return; 57 return;
@@ -58,7 +59,8 @@ void proc_fork_connector(struct task_struct *task)
58 msg = (struct cn_msg*)buffer; 59 msg = (struct cn_msg*)buffer;
59 ev = (struct proc_event*)msg->data; 60 ev = (struct proc_event*)msg->data;
60 get_seq(&msg->seq, &ev->cpu); 61 get_seq(&msg->seq, &ev->cpu);
61 ktime_get_ts(&ev->timestamp); /* get high res monotonic timestamp */ 62 ktime_get_ts(&ts); /* get high res monotonic timestamp */
63 ev->timestamp_ns = timespec_to_ns(&ts);
62 ev->what = PROC_EVENT_FORK; 64 ev->what = PROC_EVENT_FORK;
63 ev->event_data.fork.parent_pid = task->real_parent->pid; 65 ev->event_data.fork.parent_pid = task->real_parent->pid;
64 ev->event_data.fork.parent_tgid = task->real_parent->tgid; 66 ev->event_data.fork.parent_tgid = task->real_parent->tgid;
@@ -76,6 +78,7 @@ void proc_exec_connector(struct task_struct *task)
76{ 78{
77 struct cn_msg *msg; 79 struct cn_msg *msg;
78 struct proc_event *ev; 80 struct proc_event *ev;
81 struct timespec ts;
79 __u8 buffer[CN_PROC_MSG_SIZE]; 82 __u8 buffer[CN_PROC_MSG_SIZE];
80 83
81 if (atomic_read(&proc_event_num_listeners) < 1) 84 if (atomic_read(&proc_event_num_listeners) < 1)
@@ -84,7 +87,8 @@ void proc_exec_connector(struct task_struct *task)
84 msg = (struct cn_msg*)buffer; 87 msg = (struct cn_msg*)buffer;
85 ev = (struct proc_event*)msg->data; 88 ev = (struct proc_event*)msg->data;
86 get_seq(&msg->seq, &ev->cpu); 89 get_seq(&msg->seq, &ev->cpu);
87 ktime_get_ts(&ev->timestamp); 90 ktime_get_ts(&ts); /* get high res monotonic timestamp */
91 ev->timestamp_ns = timespec_to_ns(&ts);
88 ev->what = PROC_EVENT_EXEC; 92 ev->what = PROC_EVENT_EXEC;
89 ev->event_data.exec.process_pid = task->pid; 93 ev->event_data.exec.process_pid = task->pid;
90 ev->event_data.exec.process_tgid = task->tgid; 94 ev->event_data.exec.process_tgid = task->tgid;
@@ -100,6 +104,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
100 struct cn_msg *msg; 104 struct cn_msg *msg;
101 struct proc_event *ev; 105 struct proc_event *ev;
102 __u8 buffer[CN_PROC_MSG_SIZE]; 106 __u8 buffer[CN_PROC_MSG_SIZE];
107 struct timespec ts;
103 108
104 if (atomic_read(&proc_event_num_listeners) < 1) 109 if (atomic_read(&proc_event_num_listeners) < 1)
105 return; 110 return;
@@ -118,7 +123,8 @@ void proc_id_connector(struct task_struct *task, int which_id)
118 } else 123 } else
119 return; 124 return;
120 get_seq(&msg->seq, &ev->cpu); 125 get_seq(&msg->seq, &ev->cpu);
121 ktime_get_ts(&ev->timestamp); 126 ktime_get_ts(&ts); /* get high res monotonic timestamp */
127 ev->timestamp_ns = timespec_to_ns(&ts);
122 128
123 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 129 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
124 msg->ack = 0; /* not used */ 130 msg->ack = 0; /* not used */
@@ -131,6 +137,7 @@ void proc_exit_connector(struct task_struct *task)
131 struct cn_msg *msg; 137 struct cn_msg *msg;
132 struct proc_event *ev; 138 struct proc_event *ev;
133 __u8 buffer[CN_PROC_MSG_SIZE]; 139 __u8 buffer[CN_PROC_MSG_SIZE];
140 struct timespec ts;
134 141
135 if (atomic_read(&proc_event_num_listeners) < 1) 142 if (atomic_read(&proc_event_num_listeners) < 1)
136 return; 143 return;
@@ -138,7 +145,8 @@ void proc_exit_connector(struct task_struct *task)
138 msg = (struct cn_msg*)buffer; 145 msg = (struct cn_msg*)buffer;
139 ev = (struct proc_event*)msg->data; 146 ev = (struct proc_event*)msg->data;
140 get_seq(&msg->seq, &ev->cpu); 147 get_seq(&msg->seq, &ev->cpu);
141 ktime_get_ts(&ev->timestamp); 148 ktime_get_ts(&ts); /* get high res monotonic timestamp */
149 ev->timestamp_ns = timespec_to_ns(&ts);
142 ev->what = PROC_EVENT_EXIT; 150 ev->what = PROC_EVENT_EXIT;
143 ev->event_data.exit.process_pid = task->pid; 151 ev->event_data.exit.process_pid = task->pid;
144 ev->event_data.exit.process_tgid = task->tgid; 152 ev->event_data.exit.process_tgid = task->tgid;
@@ -164,6 +172,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
164 struct cn_msg *msg; 172 struct cn_msg *msg;
165 struct proc_event *ev; 173 struct proc_event *ev;
166 __u8 buffer[CN_PROC_MSG_SIZE]; 174 __u8 buffer[CN_PROC_MSG_SIZE];
175 struct timespec ts;
167 176
168 if (atomic_read(&proc_event_num_listeners) < 1) 177 if (atomic_read(&proc_event_num_listeners) < 1)
169 return; 178 return;
@@ -171,7 +180,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
171 msg = (struct cn_msg*)buffer; 180 msg = (struct cn_msg*)buffer;
172 ev = (struct proc_event*)msg->data; 181 ev = (struct proc_event*)msg->data;
173 msg->seq = rcvd_seq; 182 msg->seq = rcvd_seq;
174 ktime_get_ts(&ev->timestamp); 183 ktime_get_ts(&ts); /* get high res monotonic timestamp */
184 ev->timestamp_ns = timespec_to_ns(&ts);
175 ev->cpu = -1; 185 ev->cpu = -1;
176 ev->what = PROC_EVENT_NONE; 186 ev->what = PROC_EVENT_NONE;
177 ev->event_data.ack.err = err; 187 ev->event_data.ack.err = err;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
364 if (ret != 1) \ 364 if (ret != 1) \
365 return -EINVAL; \ 365 return -EINVAL; \
366 \ 366 \
367 lock_cpu_hotplug(); \
367 mutex_lock(&policy->lock); \ 368 mutex_lock(&policy->lock); \
368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \
369 policy->user_policy.object = policy->object; \ 370 policy->user_policy.object = policy->object; \
370 mutex_unlock(&policy->lock); \ 371 mutex_unlock(&policy->lock); \
372 unlock_cpu_hotplug(); \
371 \ 373 \
372 return ret ? ret : count; \ 374 return ret ? ret : count; \
373} 375}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1197 *********************************************************************/ 1199 *********************************************************************/
1198 1200
1199 1201
1202/* Must be called with lock_cpu_hotplug held */
1200int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1201 unsigned int target_freq, 1204 unsigned int target_freq,
1202 unsigned int relation) 1205 unsigned int relation)
1203{ 1206{
1204 int retval = -EINVAL; 1207 int retval = -EINVAL;
1205 1208
1206 lock_cpu_hotplug();
1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1208 target_freq, relation); 1210 target_freq, relation);
1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1212 retval = cpufreq_driver->target(policy, target_freq, relation);
1211 1213
1212 unlock_cpu_hotplug();
1213
1214 return retval; 1214 return retval;
1215} 1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1225 if (!policy) 1225 if (!policy)
1226 return -EINVAL; 1226 return -EINVAL;
1227 1227
1228 lock_cpu_hotplug();
1228 mutex_lock(&policy->lock); 1229 mutex_lock(&policy->lock);
1229 1230
1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1231 1232
1232 mutex_unlock(&policy->lock); 1233 mutex_unlock(&policy->lock);
1234 unlock_cpu_hotplug();
1233 1235
1234 cpufreq_cpu_put(policy); 1236 cpufreq_cpu_put(policy);
1235 return ret; 1237 return ret;
1236} 1238}
1237EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 1240
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1239 1245
1240static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1241{ 1247{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1257} 1263}
1258 1264
1259 1265
1260int cpufreq_governor(unsigned int cpu, unsigned int event)
1261{
1262 int ret = 0;
1263 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264
1265 if (!policy)
1266 return -EINVAL;
1267
1268 mutex_lock(&policy->lock);
1269 ret = __cpufreq_governor(policy, event);
1270 mutex_unlock(&policy->lock);
1271
1272 cpufreq_cpu_put(policy);
1273 return ret;
1274}
1275EXPORT_SYMBOL_GPL(cpufreq_governor);
1276
1277
1278int cpufreq_register_governor(struct cpufreq_governor *governor) 1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1279{ 1267{
1280 struct cpufreq_governor *t; 1268 struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1342EXPORT_SYMBOL(cpufreq_get_policy); 1330EXPORT_SYMBOL(cpufreq_get_policy);
1343 1331
1344 1332
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1345static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1346{ 1337{
1347 int ret = 0; 1338 int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1436 if (!data) 1427 if (!data)
1437 return -EINVAL; 1428 return -EINVAL;
1438 1429
1430 lock_cpu_hotplug();
1431
1439 /* lock this CPU */ 1432 /* lock this CPU */
1440 mutex_lock(&data->lock); 1433 mutex_lock(&data->lock);
1441 1434
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1446 data->user_policy.governor = data->governor; 1439 data->user_policy.governor = data->governor;
1447 1440
1448 mutex_unlock(&data->lock); 1441 mutex_unlock(&data->lock);
1442
1443 unlock_cpu_hotplug();
1449 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1450 1445
1451 return ret; 1446 return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
1469 if (!data) 1464 if (!data)
1470 return -ENODEV; 1465 return -ENODEV;
1471 1466
1467 lock_cpu_hotplug();
1472 mutex_lock(&data->lock); 1468 mutex_lock(&data->lock);
1473 1469
1474 dprintk("updating policy for CPU %u\n", cpu); 1470 dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
1494 ret = __cpufreq_set_policy(data, &policy); 1490 ret = __cpufreq_set_policy(data, &policy);
1495 1491
1496 mutex_unlock(&data->lock); 1492 mutex_unlock(&data->lock);
1497 1493 unlock_cpu_hotplug();
1498 cpufreq_cpu_put(data); 1494 cpufreq_cpu_put(data);
1499 return ret; 1495 return ret;
1500} 1496}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_LIMITS: 527 case CPUFREQ_GOV_LIMITS:
528 lock_cpu_hotplug();
529 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
530 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
531 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
537 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
538 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
539 unlock_cpu_hotplug();
540 break; 538 break;
541 } 539 }
542 return 0; 540 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 178f0c547eb7..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -309,7 +309,9 @@ static void do_dbs_timer(void *data)
309 if (!dbs_info->enable) 309 if (!dbs_info->enable)
310 return; 310 return;
311 311
312 lock_cpu_hotplug();
312 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
313 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
314 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
315} 317}
@@ -412,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
412 break; 414 break;
413 415
414 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
415 lock_cpu_hotplug();
416 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
417 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
418 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -423,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
423 policy->min, 424 policy->min,
424 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
425 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
426 unlock_cpu_hotplug();
427 break; 427 break;
428 } 428 }
429 return 0; 429 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpu.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
74 lock_cpu_hotplug();
73 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[policy->cpu]) 76 if (!cpu_is_managed[policy->cpu])
75 goto err; 77 goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
92 94
93 err: 95 err:
94 mutex_unlock(&userspace_mutex); 96 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
95 return ret; 98 return ret;
96} 99}
97 100
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 53bba41f29bc..b6fb167e20f6 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -682,6 +682,7 @@ config BLK_DEV_SVWKS
682config BLK_DEV_SGIIOC4 682config BLK_DEV_SGIIOC4
683 tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support" 683 tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support"
684 depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4 684 depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4
685 select IDEPCI_SHARE_IRQ
685 help 686 help
686 This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4 687 This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4
687 chipset, which has one channel and can support two devices. 688 chipset, which has one channel and can support two devices.
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f712e4cfd9dc..7cf3eb023521 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
776 * not available so we don't need to recheck that. 776 * not available so we don't need to recheck that.
777 */ 777 */
778 capacity = idedisk_capacity(drive); 778 capacity = idedisk_capacity(drive);
779 barrier = ide_id_has_flush_cache(id) && 779 barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
780 (drive->addressing == 0 || capacity <= (1ULL << 28) || 780 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
781 ide_id_has_flush_cache_ext(id)); 781 ide_id_has_flush_cache_ext(id));
782 782
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 98918fb6b2ce..7c3a13e1cf64 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
750 goto bug_dma_off; 750 goto bug_dma_off;
751 printk(", DMA"); 751 printk(", DMA");
752 } else if (id->field_valid & 1) { 752 } else if (id->field_valid & 1) {
753 printk(", BUG"); 753 goto bug_dma_off;
754 } 754 }
755 return; 755 return;
756bug_dma_off: 756bug_dma_off:
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 657165297dc7..77703acaec17 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -23,6 +23,7 @@
23#include <linux/hdreg.h> 23#include <linux/hdreg.h>
24#include <linux/ide.h> 24#include <linux/ide.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/nmi.h>
26 27
27#include <asm/byteorder.h> 28#include <asm/byteorder.h>
28#include <asm/irq.h> 29#include <asm/irq.h>
@@ -1243,6 +1244,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1243 if (stat == 0xff) 1244 if (stat == 0xff)
1244 return -ENODEV; 1245 return -ENODEV;
1245 touch_softlockup_watchdog(); 1246 touch_softlockup_watchdog();
1247 touch_nmi_watchdog();
1246 } 1248 }
1247 return -EBUSY; 1249 return -EBUSY;
1248} 1250}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 05fbd9298db7..defd4b4bd374 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
1539 const char *hd_words[] = { 1539 const char *hd_words[] = {
1540 "none", "noprobe", "nowerr", "cdrom", "serialize", 1540 "none", "noprobe", "nowerr", "cdrom", "serialize",
1541 "autotune", "noautotune", "minus8", "swapdata", "bswap", 1541 "autotune", "noautotune", "minus8", "swapdata", "bswap",
1542 "minus11", "remap", "remap63", "scsi", NULL }; 1542 "noflush", "remap", "remap63", "scsi", NULL };
1543 unit = s[2] - 'a'; 1543 unit = s[2] - 'a';
1544 hw = unit / MAX_DRIVES; 1544 hw = unit / MAX_DRIVES;
1545 unit = unit % MAX_DRIVES; 1545 unit = unit % MAX_DRIVES;
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
1578 case -10: /* "bswap" */ 1578 case -10: /* "bswap" */
1579 drive->bswap = 1; 1579 drive->bswap = 1;
1580 goto done; 1580 goto done;
1581 case -11: /* noflush */
1582 drive->noflush = 1;
1583 goto done;
1581 case -12: /* "remap" */ 1584 case -12: /* "remap" */
1582 drive->remap_0_to_1 = 1; 1585 drive->remap_0_to_1 = 1;
1583 goto done; 1586 goto done;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3cb04424d351..e9bad185968a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
498{ 498{
499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); 499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
500 500
501 config_it821x_chipset_for_pio(drive, !speed); 501 if (speed) {
502 it821x_tune_chipset(drive, speed); 502 config_it821x_chipset_for_pio(drive, 0);
503 return ide_dma_enable(drive); 503 it821x_tune_chipset(drive, speed);
504
505 return ide_dma_enable(drive);
506 }
507
508 return 0;
504} 509}
505 510
506/** 511/**
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 44708b5e582d..d5d649f5ccdb 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -124,8 +124,6 @@ config PMAC_BACKLIGHT
124 bool "Backlight control for LCD screens" 124 bool "Backlight control for LCD screens"
125 depends on ADB_PMU && FB = y && (BROKEN || !PPC64) 125 depends on ADB_PMU && FB = y && (BROKEN || !PPC64)
126 select FB_BACKLIGHT 126 select FB_BACKLIGHT
127 select BACKLIGHT_CLASS_DEVICE
128 select BACKLIGHT_LCD_SUPPORT
129 help 127 help
130 Say Y here to enable Macintosh specific extensions of the generic 128 Say Y here to enable Macintosh specific extensions of the generic
131 backlight code. With this enabled, the brightness keys on older 129 backlight code. With this enabled, the brightness keys on older
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index 545be1ed6927..c69d23bb255e 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -45,14 +45,11 @@
45#include <linux/pmu.h> 45#include <linux/pmu.h>
46 46
47#include <asm/machdep.h> 47#include <asm/machdep.h>
48#include <asm/backlight.h>
48#ifdef CONFIG_PPC_PMAC 49#ifdef CONFIG_PPC_PMAC
49#include <asm/pmac_feature.h> 50#include <asm/pmac_feature.h>
50#endif 51#endif
51 52
52#ifdef CONFIG_PMAC_BACKLIGHT
53#include <asm/backlight.h>
54#endif
55
56MODULE_AUTHOR("Franz Sirl <Franz.Sirl-kernel@lauterbach.com>"); 53MODULE_AUTHOR("Franz Sirl <Franz.Sirl-kernel@lauterbach.com>");
57 54
58#define KEYB_KEYREG 0 /* register # for key up/down data */ 55#define KEYB_KEYREG 0 /* register # for key up/down data */
@@ -237,11 +234,6 @@ static struct adb_ids keyboard_ids;
237static struct adb_ids mouse_ids; 234static struct adb_ids mouse_ids;
238static struct adb_ids buttons_ids; 235static struct adb_ids buttons_ids;
239 236
240#ifdef CONFIG_PMAC_BACKLIGHT
241/* Exported to via-pmu.c */
242int disable_kernel_backlight = 0;
243#endif /* CONFIG_PMAC_BACKLIGHT */
244
245/* Kind of keyboard, see Apple technote 1152 */ 237/* Kind of keyboard, see Apple technote 1152 */
246#define ADB_KEYBOARD_UNKNOWN 0 238#define ADB_KEYBOARD_UNKNOWN 0
247#define ADB_KEYBOARD_ANSI 0x0100 239#define ADB_KEYBOARD_ANSI 0x0100
@@ -527,7 +519,7 @@ adbhid_buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int auto
527 519
528 case 0xa: /* brightness decrease */ 520 case 0xa: /* brightness decrease */
529#ifdef CONFIG_PMAC_BACKLIGHT 521#ifdef CONFIG_PMAC_BACKLIGHT
530 if (!disable_kernel_backlight && down) 522 if (down)
531 pmac_backlight_key_down(); 523 pmac_backlight_key_down();
532#endif 524#endif
533 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSDOWN, down); 525 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSDOWN, down);
@@ -535,7 +527,7 @@ adbhid_buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int auto
535 527
536 case 0x9: /* brightness increase */ 528 case 0x9: /* brightness increase */
537#ifdef CONFIG_PMAC_BACKLIGHT 529#ifdef CONFIG_PMAC_BACKLIGHT
538 if (!disable_kernel_backlight && down) 530 if (down)
539 pmac_backlight_key_up(); 531 pmac_backlight_key_up();
540#endif 532#endif
541 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSUP, down); 533 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSUP, down);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index b42d05f2aaff..d3f8d75bcbb4 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -15,8 +15,9 @@
15 15
16#define MAX_PMU_LEVEL 0xFF 16#define MAX_PMU_LEVEL 0xFF
17 17
18static struct device_node *vias;
19static struct backlight_properties pmu_backlight_data; 18static struct backlight_properties pmu_backlight_data;
19static spinlock_t pmu_backlight_lock;
20static int sleeping;
20 21
21static int pmu_backlight_get_level_brightness(struct fb_info *info, 22static int pmu_backlight_get_level_brightness(struct fb_info *info,
22 int level) 23 int level)
@@ -40,23 +41,36 @@ static int pmu_backlight_update_status(struct backlight_device *bd)
40{ 41{
41 struct fb_info *info = class_get_devdata(&bd->class_dev); 42 struct fb_info *info = class_get_devdata(&bd->class_dev);
42 struct adb_request req; 43 struct adb_request req;
43 int pmulevel, level = bd->props->brightness; 44 unsigned long flags;
45 int level = bd->props->brightness;
44 46
45 if (vias == NULL) 47 spin_lock_irqsave(&pmu_backlight_lock, flags);
46 return -ENODEV; 48
49 /* Don't update brightness when sleeping */
50 if (sleeping)
51 goto out;
47 52
48 if (bd->props->power != FB_BLANK_UNBLANK || 53 if (bd->props->power != FB_BLANK_UNBLANK ||
49 bd->props->fb_blank != FB_BLANK_UNBLANK) 54 bd->props->fb_blank != FB_BLANK_UNBLANK)
50 level = 0; 55 level = 0;
51 56
52 pmulevel = pmu_backlight_get_level_brightness(info, level); 57 if (level > 0) {
58 int pmulevel = pmu_backlight_get_level_brightness(info, level);
53 59
54 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel); 60 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel);
55 pmu_wait_complete(&req); 61 pmu_wait_complete(&req);
56 62
57 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 63 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
58 PMU_POW_BACKLIGHT | (level > 0 ? PMU_POW_ON : PMU_POW_OFF)); 64 PMU_POW_BACKLIGHT | PMU_POW_ON);
59 pmu_wait_complete(&req); 65 pmu_wait_complete(&req);
66 } else {
67 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
68 PMU_POW_BACKLIGHT | PMU_POW_OFF);
69 pmu_wait_complete(&req);
70 }
71
72out:
73 spin_unlock_irqrestore(&pmu_backlight_lock, flags);
60 74
61 return 0; 75 return 0;
62} 76}
@@ -73,15 +87,39 @@ static struct backlight_properties pmu_backlight_data = {
73 .max_brightness = (FB_BACKLIGHT_LEVELS - 1), 87 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
74}; 88};
75 89
76void __init pmu_backlight_init(struct device_node *in_vias) 90#ifdef CONFIG_PM
91static int pmu_backlight_sleep_call(struct pmu_sleep_notifier *self, int when)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave(&pmu_backlight_lock, flags);
96
97 switch (when) {
98 case PBOOK_SLEEP_REQUEST:
99 sleeping = 1;
100 break;
101 case PBOOK_WAKE:
102 sleeping = 0;
103 break;
104 }
105
106 spin_unlock_irqrestore(&pmu_backlight_lock, flags);
107
108 return PBOOK_SLEEP_OK;
109}
110
111static struct pmu_sleep_notifier pmu_backlight_sleep_notif = {
112 .notifier_call = pmu_backlight_sleep_call,
113};
114#endif
115
116void __init pmu_backlight_init()
77{ 117{
78 struct backlight_device *bd; 118 struct backlight_device *bd;
79 struct fb_info *info; 119 struct fb_info *info;
80 char name[10]; 120 char name[10];
81 int level, autosave; 121 int level, autosave;
82 122
83 vias = in_vias;
84
85 /* Special case for the old PowerBook since I can't test on it */ 123 /* Special case for the old PowerBook since I can't test on it */
86 autosave = 124 autosave =
87 machine_is_compatible("AAPL,3400/2400") || 125 machine_is_compatible("AAPL,3400/2400") ||
@@ -141,6 +179,10 @@ void __init pmu_backlight_init(struct device_node *in_vias)
141 pmac_backlight = bd; 179 pmac_backlight = bd;
142 mutex_unlock(&pmac_backlight_mutex); 180 mutex_unlock(&pmac_backlight_mutex);
143 181
182#ifdef CONFIG_PM
183 pmu_register_sleep_notifier(&pmu_backlight_sleep_notif);
184#endif
185
144 printk("pmubl: Backlight initialized (%s)\n", name); 186 printk("pmubl: Backlight initialized (%s)\n", name);
145 187
146 return; 188 return;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 06ca80bfd6b9..ea386801e215 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -16,7 +16,6 @@
16 * a sleep or a freq. switch 16 * a sleep or a freq. switch
17 * - Move sleep code out of here to pmac_pm, merge into new 17 * - Move sleep code out of here to pmac_pm, merge into new
18 * common PM infrastructure 18 * common PM infrastructure
19 * - Move backlight code out as well
20 * - Save/Restore PCI space properly 19 * - Save/Restore PCI space properly
21 * 20 *
22 */ 21 */
@@ -60,9 +59,7 @@
60#include <asm/mmu_context.h> 59#include <asm/mmu_context.h>
61#include <asm/cputable.h> 60#include <asm/cputable.h>
62#include <asm/time.h> 61#include <asm/time.h>
63#ifdef CONFIG_PMAC_BACKLIGHT
64#include <asm/backlight.h> 62#include <asm/backlight.h>
65#endif
66 63
67#include "via-pmu-event.h" 64#include "via-pmu-event.h"
68 65
@@ -177,10 +174,6 @@ static int query_batt_timer = BATTERY_POLLING_COUNT;
177static struct adb_request batt_req; 174static struct adb_request batt_req;
178static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES]; 175static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
179 176
180#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
181extern int disable_kernel_backlight;
182#endif /* defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) */
183
184int __fake_sleep; 177int __fake_sleep;
185int asleep; 178int asleep;
186BLOCKING_NOTIFIER_HEAD(sleep_notifier_list); 179BLOCKING_NOTIFIER_HEAD(sleep_notifier_list);
@@ -466,7 +459,7 @@ static int __init via_pmu_dev_init(void)
466 459
467#ifdef CONFIG_PMAC_BACKLIGHT 460#ifdef CONFIG_PMAC_BACKLIGHT
468 /* Initialize backlight */ 461 /* Initialize backlight */
469 pmu_backlight_init(vias); 462 pmu_backlight_init();
470#endif 463#endif
471 464
472#ifdef CONFIG_PPC32 465#ifdef CONFIG_PPC32
@@ -1403,11 +1396,8 @@ next:
1403 else if ((1 << pirq) & PMU_INT_SNDBRT) { 1396 else if ((1 << pirq) & PMU_INT_SNDBRT) {
1404#ifdef CONFIG_PMAC_BACKLIGHT 1397#ifdef CONFIG_PMAC_BACKLIGHT
1405 if (len == 3) 1398 if (len == 3)
1406#ifdef CONFIG_INPUT_ADBHID 1399 pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4);
1407 if (!disable_kernel_backlight) 1400#endif
1408#endif /* CONFIG_INPUT_ADBHID */
1409 pmac_backlight_set_legacy_brightness(data[1] >> 4);
1410#endif /* CONFIG_PMAC_BACKLIGHT */
1411 } 1401 }
1412 /* Tick interrupt */ 1402 /* Tick interrupt */
1413 else if ((1 << pirq) & PMU_INT_TICK) { 1403 else if ((1 << pirq) & PMU_INT_TICK) {
@@ -2414,7 +2404,7 @@ struct pmu_private {
2414 spinlock_t lock; 2404 spinlock_t lock;
2415#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2405#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2416 int backlight_locker; 2406 int backlight_locker;
2417#endif /* defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) */ 2407#endif
2418}; 2408};
2419 2409
2420static LIST_HEAD(all_pmu_pvt); 2410static LIST_HEAD(all_pmu_pvt);
@@ -2464,7 +2454,7 @@ pmu_open(struct inode *inode, struct file *file)
2464 spin_lock_irqsave(&all_pvt_lock, flags); 2454 spin_lock_irqsave(&all_pvt_lock, flags);
2465#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2455#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2466 pp->backlight_locker = 0; 2456 pp->backlight_locker = 0;
2467#endif /* defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) */ 2457#endif
2468 list_add(&pp->list, &all_pmu_pvt); 2458 list_add(&pp->list, &all_pmu_pvt);
2469 spin_unlock_irqrestore(&all_pvt_lock, flags); 2459 spin_unlock_irqrestore(&all_pvt_lock, flags);
2470 file->private_data = pp; 2460 file->private_data = pp;
@@ -2559,13 +2549,12 @@ pmu_release(struct inode *inode, struct file *file)
2559 spin_lock_irqsave(&all_pvt_lock, flags); 2549 spin_lock_irqsave(&all_pvt_lock, flags);
2560 list_del(&pp->list); 2550 list_del(&pp->list);
2561 spin_unlock_irqrestore(&all_pvt_lock, flags); 2551 spin_unlock_irqrestore(&all_pvt_lock, flags);
2552
2562#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2553#if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2563 if (pp->backlight_locker) { 2554 if (pp->backlight_locker)
2564 spin_lock_irqsave(&pmu_lock, flags); 2555 pmac_backlight_enable();
2565 disable_kernel_backlight--; 2556#endif
2566 spin_unlock_irqrestore(&pmu_lock, flags); 2557
2567 }
2568#endif /* defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) */
2569 kfree(pp); 2558 kfree(pp);
2570 } 2559 }
2571 unlock_kernel(); 2560 unlock_kernel();
@@ -2642,18 +2631,18 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2642#ifdef CONFIG_INPUT_ADBHID 2631#ifdef CONFIG_INPUT_ADBHID
2643 case PMU_IOC_GRAB_BACKLIGHT: { 2632 case PMU_IOC_GRAB_BACKLIGHT: {
2644 struct pmu_private *pp = filp->private_data; 2633 struct pmu_private *pp = filp->private_data;
2645 unsigned long flags;
2646 2634
2647 if (pp->backlight_locker) 2635 if (pp->backlight_locker)
2648 return 0; 2636 return 0;
2637
2649 pp->backlight_locker = 1; 2638 pp->backlight_locker = 1;
2650 spin_lock_irqsave(&pmu_lock, flags); 2639 pmac_backlight_disable();
2651 disable_kernel_backlight++; 2640
2652 spin_unlock_irqrestore(&pmu_lock, flags);
2653 return 0; 2641 return 0;
2654 } 2642 }
2655#endif /* CONFIG_INPUT_ADBHID */ 2643#endif /* CONFIG_INPUT_ADBHID */
2656#endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */ 2644#endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */
2645
2657 case PMU_IOC_GET_MODEL: 2646 case PMU_IOC_GET_MODEL:
2658 return put_user(pmu_kind, argp); 2647 return put_user(pmu_kind, argp);
2659 case PMU_IOC_HAS_ADB: 2648 case PMU_IOC_HAS_ADB:
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 07ca9480a6fe..c3e52c806b13 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
620 return -ENXIO; 620 return -ENXIO;
621 } 621 }
622 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 622 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
623 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 623 myri10ge_dummy_rdma(mgp, 1);
624 624
625 return 0; 625 return 0;
626} 626}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..7de9a07b2ac2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 516/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 517static inline u32 hwkhz(const struct skge_hw *hw)
518{ 518{
519 if (hw->chip_id == CHIP_ID_GENESIS) 519 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 520}
524 521
525/* Chip HZ to microseconds */ 522/* Chip HZ to microseconds */
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
1537{ 1537{
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || 1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) { 1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(sdev)); 1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; 1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6; 1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); 1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
1547 1547
1548static int __exit sunlance_sun4_remove(void) 1548static int __exit sunlance_sun4_remove(void)
1549{ 1549{
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); 1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1551 struct net_device *net_dev = lp->dev;
1552 1552
1553 unregister_netdevice(net_dev); 1553 unregister_netdevice(net_dev);
1554 1554
1555 lance_free_hwresources(root_lance_dev); 1555 lance_free_hwresources(lp);
1556 1556
1557 free_netdev(net_dev); 1557 free_netdev(net_dev);
1558 1558
1559 dev_set_drvdata(&sun4_sdev->dev, NULL); 1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1560 1560
1561 return 0; 1561 return 0;
1562} 1562}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ce6f3be86da0..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.62" 71#define DRV_MODULE_VERSION "3.63"
72#define DRV_MODULE_RELDATE "June 30, 2006" 72#define DRV_MODULE_RELDATE "July 25, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3590static int tg3_init_hw(struct tg3 *, int); 3590static int tg3_init_hw(struct tg3 *, int);
3591static int tg3_halt(struct tg3 *, int, int); 3591static int tg3_halt(struct tg3 *, int, int);
3592 3592
3593/* Restart hardware after configuration changes, self-test, etc.
3594 * Invoked with tp->lock held.
3595 */
3596static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597{
3598 int err;
3599
3600 err = tg3_init_hw(tp, reset_phy);
3601 if (err) {
3602 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603 "aborting.\n", tp->dev->name);
3604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605 tg3_full_unlock(tp);
3606 del_timer_sync(&tp->timer);
3607 tp->irq_sync = 0;
3608 netif_poll_enable(tp->dev);
3609 dev_close(tp->dev);
3610 tg3_full_lock(tp, 0);
3611 }
3612 return err;
3613}
3614
3593#ifdef CONFIG_NET_POLL_CONTROLLER 3615#ifdef CONFIG_NET_POLL_CONTROLLER
3594static void tg3_poll_controller(struct net_device *dev) 3616static void tg3_poll_controller(struct net_device *dev)
3595{ 3617{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
3630 } 3652 }
3631 3653
3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3654 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3633 tg3_init_hw(tp, 1); 3655 if (tg3_init_hw(tp, 1))
3656 goto out;
3634 3657
3635 tg3_netif_start(tp); 3658 tg3_netif_start(tp);
3636 3659
3637 if (restart_timer) 3660 if (restart_timer)
3638 mod_timer(&tp->timer, jiffies + 1); 3661 mod_timer(&tp->timer, jiffies + 1);
3639 3662
3663out:
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3664 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641 3665
3642 tg3_full_unlock(tp); 3666 tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4124static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4148static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4125{ 4149{
4126 struct tg3 *tp = netdev_priv(dev); 4150 struct tg3 *tp = netdev_priv(dev);
4151 int err;
4127 4152
4128 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4153 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4129 return -EINVAL; 4154 return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4144 4169
4145 tg3_set_mtu(dev, tp, new_mtu); 4170 tg3_set_mtu(dev, tp, new_mtu);
4146 4171
4147 tg3_init_hw(tp, 0); 4172 err = tg3_restart_hw(tp, 0);
4148 4173
4149 tg3_netif_start(tp); 4174 if (!err)
4175 tg3_netif_start(tp);
4150 4176
4151 tg3_full_unlock(tp); 4177 tg3_full_unlock(tp);
4152 4178
4153 return 0; 4179 return err;
4154} 4180}
4155 4181
4156/* Free up pending packets in all rx/tx rings. 4182/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
4232 * end up in the driver. tp->{tx,}lock are held and thus 4258 * end up in the driver. tp->{tx,}lock are held and thus
4233 * we may not sleep. 4259 * we may not sleep.
4234 */ 4260 */
4235static void tg3_init_rings(struct tg3 *tp) 4261static int tg3_init_rings(struct tg3 *tp)
4236{ 4262{
4237 u32 i; 4263 u32 i;
4238 4264
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
4281 4307
4282 /* Now allocate fresh SKBs for each rx ring. */ 4308 /* Now allocate fresh SKBs for each rx ring. */
4283 for (i = 0; i < tp->rx_pending; i++) { 4309 for (i = 0; i < tp->rx_pending; i++) {
4284 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4310 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4285 -1, i) < 0) 4311 printk(KERN_WARNING PFX
4312 "%s: Using a smaller RX standard ring, "
4313 "only %d out of %d buffers were allocated "
4314 "successfully.\n",
4315 tp->dev->name, i, tp->rx_pending);
4316 if (i == 0)
4317 return -ENOMEM;
4318 tp->rx_pending = i;
4286 break; 4319 break;
4320 }
4287 } 4321 }
4288 4322
4289 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4323 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4290 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4324 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4291 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4325 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4292 -1, i) < 0) 4326 -1, i) < 0) {
4327 printk(KERN_WARNING PFX
4328 "%s: Using a smaller RX jumbo ring, "
4329 "only %d out of %d buffers were "
4330 "allocated successfully.\n",
4331 tp->dev->name, i, tp->rx_jumbo_pending);
4332 if (i == 0) {
4333 tg3_free_rings(tp);
4334 return -ENOMEM;
4335 }
4336 tp->rx_jumbo_pending = i;
4293 break; 4337 break;
4338 }
4294 } 4339 }
4295 } 4340 }
4341 return 0;
4296} 4342}
4297 4343
4298/* 4344/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5815{ 5861{
5816 struct tg3 *tp = netdev_priv(dev); 5862 struct tg3 *tp = netdev_priv(dev);
5817 struct sockaddr *addr = p; 5863 struct sockaddr *addr = p;
5864 int err = 0;
5818 5865
5819 if (!is_valid_ether_addr(addr->sa_data)) 5866 if (!is_valid_ether_addr(addr->sa_data))
5820 return -EINVAL; 5867 return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5832 tg3_full_lock(tp, 1); 5879 tg3_full_lock(tp, 1);
5833 5880
5834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5835 tg3_init_hw(tp, 0); 5882 err = tg3_restart_hw(tp, 0);
5836 5883 if (!err)
5837 tg3_netif_start(tp); 5884 tg3_netif_start(tp);
5838 tg3_full_unlock(tp); 5885 tg3_full_unlock(tp);
5839 } else { 5886 } else {
5840 spin_lock_bh(&tp->lock); 5887 spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5842 spin_unlock_bh(&tp->lock); 5889 spin_unlock_bh(&tp->lock);
5843 } 5890 }
5844 5891
5845 return 0; 5892 return err;
5846} 5893}
5847 5894
5848/* tp->lock is held. */ 5895/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5942 * can only do this after the hardware has been 5989 * can only do this after the hardware has been
5943 * successfully reset. 5990 * successfully reset.
5944 */ 5991 */
5945 tg3_init_rings(tp); 5992 err = tg3_init_rings(tp);
5993 if (err)
5994 return err;
5946 5995
5947 /* This value is determined during the probe time DMA 5996 /* This value is determined during the probe time DMA
5948 * engine test, tg3_test_dma. 5997 * engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7956static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 8005static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7957{ 8006{
7958 struct tg3 *tp = netdev_priv(dev); 8007 struct tg3 *tp = netdev_priv(dev);
7959 int irq_sync = 0; 8008 int irq_sync = 0, err = 0;
7960 8009
7961 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 8010 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7962 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 8011 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7980 8029
7981 if (netif_running(dev)) { 8030 if (netif_running(dev)) {
7982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7983 tg3_init_hw(tp, 1); 8032 err = tg3_restart_hw(tp, 1);
7984 tg3_netif_start(tp); 8033 if (!err)
8034 tg3_netif_start(tp);
7985 } 8035 }
7986 8036
7987 tg3_full_unlock(tp); 8037 tg3_full_unlock(tp);
7988 8038
7989 return 0; 8039 return err;
7990} 8040}
7991 8041
7992static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8042static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8001static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8051static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8002{ 8052{
8003 struct tg3 *tp = netdev_priv(dev); 8053 struct tg3 *tp = netdev_priv(dev);
8004 int irq_sync = 0; 8054 int irq_sync = 0, err = 0;
8005 8055
8006 if (netif_running(dev)) { 8056 if (netif_running(dev)) {
8007 tg3_netif_stop(tp); 8057 tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8025 8075
8026 if (netif_running(dev)) { 8076 if (netif_running(dev)) {
8027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8028 tg3_init_hw(tp, 1); 8078 err = tg3_restart_hw(tp, 1);
8029 tg3_netif_start(tp); 8079 if (!err)
8080 tg3_netif_start(tp);
8030 } 8081 }
8031 8082
8032 tg3_full_unlock(tp); 8083 tg3_full_unlock(tp);
8033 8084
8034 return 0; 8085 return err;
8035} 8086}
8036 8087
8037static u32 tg3_get_rx_csum(struct net_device *dev) 8088static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
8666 if (!netif_running(tp->dev)) 8717 if (!netif_running(tp->dev))
8667 return TG3_LOOPBACK_FAILED; 8718 return TG3_LOOPBACK_FAILED;
8668 8719
8669 tg3_reset_hw(tp, 1); 8720 err = tg3_reset_hw(tp, 1);
8721 if (err)
8722 return TG3_LOOPBACK_FAILED;
8670 8723
8671 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8724 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8672 err |= TG3_MAC_LOOPBACK_FAILED; 8725 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8793 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8741 if (netif_running(dev)) { 8794 if (netif_running(dev)) {
8742 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8795 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8743 tg3_init_hw(tp, 1); 8796 if (!tg3_restart_hw(tp, 1))
8744 tg3_netif_start(tp); 8797 tg3_netif_start(tp);
8745 } 8798 }
8746 8799
8747 tg3_full_unlock(tp); 8800 tg3_full_unlock(tp);
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11699 tg3_full_lock(tp, 0); 11752 tg3_full_lock(tp, 0);
11700 11753
11701 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11754 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11702 tg3_init_hw(tp, 1); 11755 if (tg3_restart_hw(tp, 1))
11756 goto out;
11703 11757
11704 tp->timer.expires = jiffies + tp->timer_offset; 11758 tp->timer.expires = jiffies + tp->timer_offset;
11705 add_timer(&tp->timer); 11759 add_timer(&tp->timer);
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11707 netif_device_attach(dev); 11761 netif_device_attach(dev);
11708 tg3_netif_start(tp); 11762 tg3_netif_start(tp);
11709 11763
11764out:
11710 tg3_full_unlock(tp); 11765 tg3_full_unlock(tp);
11711 } 11766 }
11712 11767
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
11733 tg3_full_lock(tp, 0); 11788 tg3_full_lock(tp, 0);
11734 11789
11735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11790 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11736 tg3_init_hw(tp, 1); 11791 err = tg3_restart_hw(tp, 1);
11792 if (err)
11793 goto out;
11737 11794
11738 tp->timer.expires = jiffies + tp->timer_offset; 11795 tp->timer.expires = jiffies + tp->timer_offset;
11739 add_timer(&tp->timer); 11796 add_timer(&tp->timer);
11740 11797
11741 tg3_netif_start(tp); 11798 tg3_netif_start(tp);
11742 11799
11800out:
11743 tg3_full_unlock(tp); 11801 tg3_full_unlock(tp);
11744 11802
11745 return 0; 11803 return err;
11746} 11804}
11747 11805
11748static struct pci_driver tg3_driver = { 11806static struct pci_driver tg3_driver = {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f5b0078eb4ad..aa9cd92f46b2 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
2742 2742
2743 if (PHYSR0 & PHYSR0_SPDG) 2743 if (PHYSR0 & PHYSR0_SPDG)
2744 status |= VELOCITY_SPEED_1000; 2744 status |= VELOCITY_SPEED_1000;
2745 if (PHYSR0 & PHYSR0_SPD10) 2745 else if (PHYSR0 & PHYSR0_SPD10)
2746 status |= VELOCITY_SPEED_10; 2746 status |= VELOCITY_SPEED_10;
2747 else 2747 else
2748 status |= VELOCITY_SPEED_100; 2748 status |= VELOCITY_SPEED_100;
@@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
2851 u32 status; 2851 u32 status;
2852 status = check_connection_type(vptr->mac_regs); 2852 status = check_connection_type(vptr->mac_regs);
2853 2853
2854 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 2854 cmd->supported = SUPPORTED_TP |
2855 if (status & VELOCITY_SPEED_100) 2855 SUPPORTED_Autoneg |
2856 SUPPORTED_10baseT_Half |
2857 SUPPORTED_10baseT_Full |
2858 SUPPORTED_100baseT_Half |
2859 SUPPORTED_100baseT_Full |
2860 SUPPORTED_1000baseT_Half |
2861 SUPPORTED_1000baseT_Full;
2862 if (status & VELOCITY_SPEED_1000)
2863 cmd->speed = SPEED_1000;
2864 else if (status & VELOCITY_SPEED_100)
2856 cmd->speed = SPEED_100; 2865 cmd->speed = SPEED_100;
2857 else 2866 else
2858 cmd->speed = SPEED_10; 2867 cmd->speed = SPEED_10;
@@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev)
2896{ 2905{
2897 struct velocity_info *vptr = netdev_priv(dev); 2906 struct velocity_info *vptr = netdev_priv(dev);
2898 struct mac_regs __iomem * regs = vptr->mac_regs; 2907 struct mac_regs __iomem * regs = vptr->mac_regs;
2899 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1; 2908 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2900} 2909}
2901 2910
2902static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2911static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
449 select CRYPTO 449 select CRYPTO
450 select CRYPTO_AES
450 ---help--- 451 ---help---
451 This is the standard Linux driver to support Cisco/Aironet PCMCIA 452 This is the standard Linux driver to support Cisco/Aironet PCMCIA
452 802.11 wireless cards. This driver is the same as the Aironet 453 802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 3889f79e7128..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3701 } 3701 }
3702 if (sec->flags & SEC_AUTH_MODE) { 3702 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode; 3703 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode); 3704 dprintk(", .auth_mode = %d", sec->auth_mode);
3705 } 3705 }
3706 dprintk("\n"); 3706 dprintk("\n");
3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2875 if (orinoco_lock(priv, &flags) != 0) 2875 if (orinoco_lock(priv, &flags) != 0)
2876 return -EBUSY; 2876 return -EBUSY;
2877 2877
2878 if (erq->pointer) { 2878 if (erq->length > 0) {
2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
2880 index = priv->tx_key; 2880 index = priv->tx_key;
2881 2881
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2918 if (erq->flags & IW_ENCODE_RESTRICTED) 2918 if (erq->flags & IW_ENCODE_RESTRICTED)
2919 restricted = 1; 2919 restricted = 1;
2920 2920
2921 if (erq->pointer) { 2921 if (erq->pointer && erq->length > 0) {
2922 priv->keys[index].len = cpu_to_le16(xlen); 2922 priv->keys[index].len = cpu_to_le16(xlen);
2923 memset(priv->keys[index].data, 0, 2923 memset(priv->keys[index].data, 0,
2924 sizeof(priv->keys[index].data)); 2924 sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
1820 zd->dev->name); 1820 zd->dev->name);
1821 1821
1822 usb_set_intfdata(interface, zd); 1822 usb_set_intfdata(interface, zd);
1823 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1824 zd1201_disable(zd); /* interfering with all the wifis in range */
1823 return 0; 1825 return 0;
1824 1826
1825err_net: 1827err_net:
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 622b3f8ba820..f8ae2b7db0a7 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -61,7 +61,7 @@ struct pci_bus * __devinit pci_find_bus(int domain, int busnr)
61 * @from: Previous PCI bus found, or %NULL for new search. 61 * @from: Previous PCI bus found, or %NULL for new search.
62 * 62 *
63 * Iterates through the list of known PCI busses. A new search is 63 * Iterates through the list of known PCI busses. A new search is
64 * initiated by passing %NULL to the @from argument. Otherwise if 64 * initiated by passing %NULL as the @from argument. Otherwise if
65 * @from is not %NULL, searches continue from next device on the 65 * @from is not %NULL, searches continue from next device on the
66 * global list. 66 * global list.
67 */ 67 */
@@ -148,13 +148,14 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
148 * @from: Previous PCI device found in search, or %NULL for new search. 148 * @from: Previous PCI device found in search, or %NULL for new search.
149 * 149 *
150 * Iterates through the list of known PCI devices. If a PCI device is 150 * Iterates through the list of known PCI devices. If a PCI device is
151 * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its 151 * found with a matching @vendor, @device, @ss_vendor and @ss_device, a
152 * device structure is returned. Otherwise, %NULL is returned. 152 * pointer to its device structure is returned. Otherwise, %NULL is returned.
153 * A new search is initiated by passing %NULL to the @from argument. 153 * A new search is initiated by passing %NULL as the @from argument.
154 * Otherwise if @from is not %NULL, searches continue from next device on the global list. 154 * Otherwise if @from is not %NULL, searches continue from next device
155 * on the global list.
155 * 156 *
156 * NOTE: Do not use this function anymore, use pci_get_subsys() instead, as 157 * NOTE: Do not use this function any more; use pci_get_subsys() instead, as
157 * the pci device returned by this function can disappear at any moment in 158 * the PCI device returned by this function can disappear at any moment in
158 * time. 159 * time.
159 */ 160 */
160static struct pci_dev * pci_find_subsys(unsigned int vendor, 161static struct pci_dev * pci_find_subsys(unsigned int vendor,
@@ -191,14 +192,15 @@ exit:
191 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 192 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
192 * @from: Previous PCI device found in search, or %NULL for new search. 193 * @from: Previous PCI device found in search, or %NULL for new search.
193 * 194 *
194 * Iterates through the list of known PCI devices. If a PCI device is 195 * Iterates through the list of known PCI devices. If a PCI device is found
195 * found with a matching @vendor and @device, a pointer to its device structure is 196 * with a matching @vendor and @device, a pointer to its device structure is
196 * returned. Otherwise, %NULL is returned. 197 * returned. Otherwise, %NULL is returned.
197 * A new search is initiated by passing %NULL to the @from argument. 198 * A new search is initiated by passing %NULL as the @from argument.
198 * Otherwise if @from is not %NULL, searches continue from next device on the global list. 199 * Otherwise if @from is not %NULL, searches continue from next device
200 * on the global list.
199 * 201 *
200 * NOTE: Do not use this function anymore, use pci_get_device() instead, as 202 * NOTE: Do not use this function any more; use pci_get_device() instead, as
201 * the pci device returned by this function can disappear at any moment in 203 * the PCI device returned by this function can disappear at any moment in
202 * time. 204 * time.
203 */ 205 */
204struct pci_dev * 206struct pci_dev *
@@ -215,11 +217,11 @@ pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *
215 * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids 217 * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
216 * @from: Previous PCI device found in search, or %NULL for new search. 218 * @from: Previous PCI device found in search, or %NULL for new search.
217 * 219 *
218 * Iterates through the list of known PCI devices. If a PCI device is 220 * Iterates through the list of known PCI devices. If a PCI device is found
219 * found with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its 221 * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
220 * device structure is returned, and the reference count to the device is 222 * device structure is returned, and the reference count to the device is
221 * incremented. Otherwise, %NULL is returned. A new search is initiated by 223 * incremented. Otherwise, %NULL is returned. A new search is initiated by
222 * passing %NULL to the @from argument. Otherwise if @from is not %NULL, 224 * passing %NULL as the @from argument. Otherwise if @from is not %NULL,
223 * searches continue from next device on the global list. 225 * searches continue from next device on the global list.
224 * The reference count for @from is always decremented if it is not %NULL. 226 * The reference count for @from is always decremented if it is not %NULL.
225 */ 227 */
@@ -262,7 +264,7 @@ exit:
262 * found with a matching @vendor and @device, the reference count to the 264 * found with a matching @vendor and @device, the reference count to the
263 * device is incremented and a pointer to its device structure is returned. 265 * device is incremented and a pointer to its device structure is returned.
264 * Otherwise, %NULL is returned. A new search is initiated by passing %NULL 266 * Otherwise, %NULL is returned. A new search is initiated by passing %NULL
265 * to the @from argument. Otherwise if @from is not %NULL, searches continue 267 * as the @from argument. Otherwise if @from is not %NULL, searches continue
266 * from next device on the global list. The reference count for @from is 268 * from next device on the global list. The reference count for @from is
267 * always decremented if it is not %NULL. 269 * always decremented if it is not %NULL.
268 */ 270 */
@@ -279,11 +281,13 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
279 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 281 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
280 * @from: Previous PCI device found in search, or %NULL for new search. 282 * @from: Previous PCI device found in search, or %NULL for new search.
281 * 283 *
282 * Iterates through the list of known PCI devices in the reverse order of pci_find_device(). 284 * Iterates through the list of known PCI devices in the reverse order of
285 * pci_find_device().
283 * If a PCI device is found with a matching @vendor and @device, a pointer to 286 * If a PCI device is found with a matching @vendor and @device, a pointer to
284 * its device structure is returned. Otherwise, %NULL is returned. 287 * its device structure is returned. Otherwise, %NULL is returned.
285 * A new search is initiated by passing %NULL to the @from argument. 288 * A new search is initiated by passing %NULL as the @from argument.
286 * Otherwise if @from is not %NULL, searches continue from previous device on the global list. 289 * Otherwise if @from is not %NULL, searches continue from previous device
290 * on the global list.
287 */ 291 */
288struct pci_dev * 292struct pci_dev *
289pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from) 293pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from)
@@ -317,7 +321,7 @@ exit:
317 * found with a matching @class, the reference count to the device is 321 * found with a matching @class, the reference count to the device is
318 * incremented and a pointer to its device structure is returned. 322 * incremented and a pointer to its device structure is returned.
319 * Otherwise, %NULL is returned. 323 * Otherwise, %NULL is returned.
320 * A new search is initiated by passing %NULL to the @from argument. 324 * A new search is initiated by passing %NULL as the @from argument.
321 * Otherwise if @from is not %NULL, searches continue from next device 325 * Otherwise if @from is not %NULL, searches continue from next device
322 * on the global list. The reference count for @from is always decremented 326 * on the global list. The reference count for @from is always decremented
323 * if it is not %NULL. 327 * if it is not %NULL.
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 738b1ef595a3..9ad18e62658d 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -601,12 +601,8 @@ static int ds_ioctl(struct inode * inode, struct file * file,
601 ret = CS_BAD_ARGS; 601 ret = CS_BAD_ARGS;
602 else { 602 else {
603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); 603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
604 if (p_dev == NULL) 604 ret = pccard_get_configuration_info(s, p_dev, &buf->config);
605 ret = CS_BAD_ARGS; 605 pcmcia_put_dev(p_dev);
606 else {
607 ret = pccard_get_configuration_info(s, p_dev, &buf->config);
608 pcmcia_put_dev(p_dev);
609 }
610 } 606 }
611 break; 607 break;
612 case DS_GET_FIRST_TUPLE: 608 case DS_GET_FIRST_TUPLE:
@@ -636,12 +632,8 @@ static int ds_ioctl(struct inode * inode, struct file * file,
636 ret = CS_BAD_ARGS; 632 ret = CS_BAD_ARGS;
637 else { 633 else {
638 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); 634 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
639 if (p_dev == NULL) 635 ret = pccard_get_status(s, p_dev, &buf->status);
640 ret = CS_BAD_ARGS; 636 pcmcia_put_dev(p_dev);
641 else {
642 ret = pccard_get_status(s, p_dev, &buf->status);
643 pcmcia_put_dev(p_dev);
644 }
645 } 637 }
646 break; 638 break;
647 case DS_VALIDATE_CIS: 639 case DS_VALIDATE_CIS:
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 7bf25b88ea31..c8323399e9e4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -245,10 +245,17 @@ int pccard_get_configuration_info(struct pcmcia_socket *s,
245 return CS_SUCCESS; 245 return CS_SUCCESS;
246 } 246 }
247 247
248 /* !!! This is a hack !!! */ 248 config->Attributes = c->Attributes | CONF_VALID_CLIENT;
249 memcpy(&config->Attributes, &c->Attributes, sizeof(config_t)); 249 config->Vcc = s->socket.Vcc;
250 config->Attributes |= CONF_VALID_CLIENT; 250 config->Vpp1 = config->Vpp2 = s->socket.Vpp;
251 config->CardValues = c->CardValues; 251 config->IntType = c->IntType;
252 config->ConfigBase = c->ConfigBase;
253 config->Status = c->Status;
254 config->Pin = c->Pin;
255 config->Copy = c->Copy;
256 config->Option = c->Option;
257 config->ExtStatus = c->ExtStatus;
258 config->Present = config->CardValues = c->CardValues;
252 config->IRQAttributes = c->irq.Attributes; 259 config->IRQAttributes = c->irq.Attributes;
253 config->AssignedIRQ = s->irq.AssignedIRQ; 260 config->AssignedIRQ = s->irq.AssignedIRQ;
254 config->BasePort1 = c->io.BasePort1; 261 config->BasePort1 = c->io.BasePort1;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index b154b3f52cbe..551f58e29810 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -346,7 +346,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
346 dev->flags = node->flags; 346 dev->flags = node->flags;
347 if (!(dev->flags & PNPBIOS_NO_CONFIG)) 347 if (!(dev->flags & PNPBIOS_NO_CONFIG))
348 dev->capabilities |= PNP_CONFIGURABLE; 348 dev->capabilities |= PNP_CONFIGURABLE;
349 if (!(dev->flags & PNPBIOS_NO_DISABLE)) 349 if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev))
350 dev->capabilities |= PNP_DISABLE; 350 dev->capabilities |= PNP_DISABLE;
351 dev->capabilities |= PNP_READ; 351 dev->capabilities |= PNP_READ;
352 if (pnpbios_is_dynamic(dev)) 352 if (pnpbios_is_dynamic(dev))
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..3cba6c9fab11 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
152 struct ccwgroup_device *gdev; 152 struct ccwgroup_device *gdev;
153 int i; 153 int i;
154 int rc; 154 int rc;
155 int del_drvdata;
156 155
157 if (argc > 256) /* disallow dumb users */ 156 if (argc > 256) /* disallow dumb users */
158 return -EINVAL; 157 return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
163 162
164 atomic_set(&gdev->onoff, 0); 163 atomic_set(&gdev->onoff, 0);
165 164
166 del_drvdata = 0;
167 for (i = 0; i < argc; i++) { 165 for (i = 0; i < argc; i++) {
168 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 166 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
169 167
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root,
180 rc = -EINVAL; 178 rc = -EINVAL;
181 goto free_dev; 179 goto free_dev;
182 } 180 }
183 }
184 for (i = 0; i < argc; i++)
185 gdev->cdev[i]->dev.driver_data = gdev; 181 gdev->cdev[i]->dev.driver_data = gdev;
186 del_drvdata = 1; 182 }
187 183
188 gdev->creator_id = creator_id; 184 gdev->creator_id = creator_id;
189 gdev->count = argc; 185 gdev->count = argc;
@@ -226,9 +222,9 @@ error:
226free_dev: 222free_dev:
227 for (i = 0; i < argc; i++) 223 for (i = 0; i < argc; i++)
228 if (gdev->cdev[i]) { 224 if (gdev->cdev[i]) {
229 put_device(&gdev->cdev[i]->dev); 225 if (gdev->cdev[i]->dev.driver_data == gdev)
230 if (del_drvdata)
231 gdev->cdev[i]->dev.driver_data = NULL; 226 gdev->cdev[i]->dev.driver_data = NULL;
227 put_device(&gdev->cdev[i]->dev);
232 } 228 }
233 kfree(gdev); 229 kfree(gdev);
234 return rc; 230 return rc;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..7a39e0b0386c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
152 if (cdev->private->iretry) { 152 if (cdev->private->iretry) {
153 cdev->private->iretry--; 153 cdev->private->iretry--;
154 ret = cio_halt(sch); 154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret; 155 if (ret != -EBUSY)
156 return (ret == 0) ? -EBUSY : ret;
156 } 157 }
157 /* halt io unsuccessful. */ 158 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 085db4826e0e..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
2152 */ 2152 */
2153static int esp_should_clear_sync(Scsi_Cmnd *sp) 2153static int esp_should_clear_sync(Scsi_Cmnd *sp)
2154{ 2154{
2155 unchar cmd1 = sp->cmnd[0]; 2155 unchar cmd = sp->cmnd[0];
2156 unchar cmd2 = sp->data_cmnd[0];
2157 2156
2158 /* These cases are for spinning up a disk and 2157 /* These cases are for spinning up a disk and
2159 * waiting for that spinup to complete. 2158 * waiting for that spinup to complete.
2160 */ 2159 */
2161 if(cmd1 == START_STOP || 2160 if(cmd == START_STOP)
2162 cmd2 == START_STOP)
2163 return 0; 2161 return 0;
2164 2162
2165 if(cmd1 == TEST_UNIT_READY || 2163 if(cmd == TEST_UNIT_READY)
2166 cmd2 == TEST_UNIT_READY)
2167 return 0; 2164 return 0;
2168 2165
2169 /* One more special case for SCSI tape drives, 2166 /* One more special case for SCSI tape drives,
2170 * this is what is used to probe the device for 2167 * this is what is used to probe the device for
2171 * completion of a rewind or tape load operation. 2168 * completion of a rewind or tape load operation.
2172 */ 2169 */
2173 if(sp->device->type == TYPE_TAPE) { 2170 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2174 if(cmd1 == MODE_SENSE || 2171 return 0;
2175 cmd2 == MODE_SENSE)
2176 return 0;
2177 }
2178 2172
2179 return 1; 2173 return 1;
2180} 2174}
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2427 info->stats.aborts += 1; 2427 info->stats.aborts += 1;
2428 2428
2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
2430 __scsi_print_command(SCpnt->data_cmnd); 2430 __scsi_print_command(SCpnt->cmnd);
2431 2431
2432 print_debug_list(); 2432 print_debug_list();
2433 fas216_dumpstate(info); 2433 fas216_dumpstate(info);
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index eaf64c7e54e7..98bd22714d0d 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp)
2754 */ 2754 */
2755static int esp_should_clear_sync(struct scsi_cmnd *sp) 2755static int esp_should_clear_sync(struct scsi_cmnd *sp)
2756{ 2756{
2757 u8 cmd1 = sp->cmnd[0]; 2757 u8 cmd = sp->cmnd[0];
2758 u8 cmd2 = sp->data_cmnd[0];
2759 2758
2760 /* These cases are for spinning up a disk and 2759 /* These cases are for spinning up a disk and
2761 * waiting for that spinup to complete. 2760 * waiting for that spinup to complete.
2762 */ 2761 */
2763 if (cmd1 == START_STOP || 2762 if (cmd == START_STOP)
2764 cmd2 == START_STOP)
2765 return 0; 2763 return 0;
2766 2764
2767 if (cmd1 == TEST_UNIT_READY || 2765 if (cmd == TEST_UNIT_READY)
2768 cmd2 == TEST_UNIT_READY)
2769 return 0; 2766 return 0;
2770 2767
2771 /* One more special case for SCSI tape drives, 2768 /* One more special case for SCSI tape drives,
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
2773 * completion of a rewind or tape load operation. 2770 * completion of a rewind or tape load operation.
2774 */ 2771 */
2775 if (sp->device->type == TYPE_TAPE) { 2772 if (sp->device->type == TYPE_TAPE) {
2776 if (cmd1 == MODE_SENSE || 2773 if (cmd == MODE_SENSE)
2777 cmd2 == MODE_SENSE)
2778 return 0; 2774 return 0;
2779 } 2775 }
2780 2776
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 4b6aa30f4d68..29f59345305d 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -764,12 +764,27 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764 unsigned int action) 764 unsigned int action)
765{ 765{
766 unsigned long flags; 766 unsigned long flags;
767 struct ata_eh_info *ehi = &ap->eh_info;
768 struct ata_eh_context *ehc = &ap->eh_context;
767 769
768 spin_lock_irqsave(ap->lock, flags); 770 spin_lock_irqsave(ap->lock, flags);
769 771
770 ata_eh_clear_action(dev, &ap->eh_info, action); 772 /* Reset is represented by combination of actions and EHI
773 * flags. Suck in all related bits before clearing eh_info to
774 * avoid losing requested action.
775 */
776 if (action & ATA_EH_RESET_MASK) {
777 ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778 ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779
780 /* make sure all reset actions are cleared & clear EHI flags */
781 action |= ATA_EH_RESET_MASK;
782 ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783 }
784
785 ata_eh_clear_action(dev, ehi, action);
771 786
772 if (!(ap->eh_context.i.flags & ATA_EHI_QUIET)) 787 if (!(ehc->i.flags & ATA_EHI_QUIET))
773 ap->pflags |= ATA_PFLAG_RECOVERED; 788 ap->pflags |= ATA_PFLAG_RECOVERED;
774 789
775 spin_unlock_irqrestore(ap->lock, flags); 790 spin_unlock_irqrestore(ap->lock, flags);
@@ -790,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
790static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 805static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
791 unsigned int action) 806 unsigned int action)
792{ 807{
808 /* if reset is complete, clear all reset actions & reset modifier */
809 if (action & ATA_EH_RESET_MASK) {
810 action |= ATA_EH_RESET_MASK;
811 ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812 }
813
793 ata_eh_clear_action(dev, &ap->eh_context.i, action); 814 ata_eh_clear_action(dev, &ap->eh_context.i, action);
794} 815}
795 816
@@ -1276,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1276static void ata_eh_autopsy(struct ata_port *ap) 1297static void ata_eh_autopsy(struct ata_port *ap)
1277{ 1298{
1278 struct ata_eh_context *ehc = &ap->eh_context; 1299 struct ata_eh_context *ehc = &ap->eh_context;
1279 unsigned int action = ehc->i.action;
1280 struct ata_device *failed_dev = NULL;
1281 unsigned int all_err_mask = 0; 1300 unsigned int all_err_mask = 0;
1282 int tag, is_io = 0; 1301 int tag, is_io = 0;
1283 u32 serror; 1302 u32 serror;
@@ -1294,7 +1313,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1294 ehc->i.serror |= serror; 1313 ehc->i.serror |= serror;
1295 ata_eh_analyze_serror(ap); 1314 ata_eh_analyze_serror(ap);
1296 } else if (rc != -EOPNOTSUPP) 1315 } else if (rc != -EOPNOTSUPP)
1297 action |= ATA_EH_HARDRESET; 1316 ehc->i.action |= ATA_EH_HARDRESET;
1298 1317
1299 /* analyze NCQ failure */ 1318 /* analyze NCQ failure */
1300 ata_eh_analyze_ncq_error(ap); 1319 ata_eh_analyze_ncq_error(ap);
@@ -1315,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1315 qc->err_mask |= ehc->i.err_mask; 1334 qc->err_mask |= ehc->i.err_mask;
1316 1335
1317 /* analyze TF */ 1336 /* analyze TF */
1318 action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1337 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1319 1338
1320 /* DEV errors are probably spurious in case of ATA_BUS error */ 1339 /* DEV errors are probably spurious in case of ATA_BUS error */
1321 if (qc->err_mask & AC_ERR_ATA_BUS) 1340 if (qc->err_mask & AC_ERR_ATA_BUS)
@@ -1329,11 +1348,11 @@ static void ata_eh_autopsy(struct ata_port *ap)
1329 /* SENSE_VALID trumps dev/unknown error and revalidation */ 1348 /* SENSE_VALID trumps dev/unknown error and revalidation */
1330 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1349 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1331 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1350 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1332 action &= ~ATA_EH_REVALIDATE; 1351 ehc->i.action &= ~ATA_EH_REVALIDATE;
1333 } 1352 }
1334 1353
1335 /* accumulate error info */ 1354 /* accumulate error info */
1336 failed_dev = qc->dev; 1355 ehc->i.dev = qc->dev;
1337 all_err_mask |= qc->err_mask; 1356 all_err_mask |= qc->err_mask;
1338 if (qc->flags & ATA_QCFLAG_IO) 1357 if (qc->flags & ATA_QCFLAG_IO)
1339 is_io = 1; 1358 is_io = 1;
@@ -1342,25 +1361,22 @@ static void ata_eh_autopsy(struct ata_port *ap)
1342 /* enforce default EH actions */ 1361 /* enforce default EH actions */
1343 if (ap->pflags & ATA_PFLAG_FROZEN || 1362 if (ap->pflags & ATA_PFLAG_FROZEN ||
1344 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1363 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1345 action |= ATA_EH_SOFTRESET; 1364 ehc->i.action |= ATA_EH_SOFTRESET;
1346 else if (all_err_mask) 1365 else if (all_err_mask)
1347 action |= ATA_EH_REVALIDATE; 1366 ehc->i.action |= ATA_EH_REVALIDATE;
1348 1367
1349 /* if we have offending qcs and the associated failed device */ 1368 /* if we have offending qcs and the associated failed device */
1350 if (failed_dev) { 1369 if (ehc->i.dev) {
1351 /* speed down */ 1370 /* speed down */
1352 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); 1371 ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372 all_err_mask);
1353 1373
1354 /* perform per-dev EH action only on the offending device */ 1374 /* perform per-dev EH action only on the offending device */
1355 ehc->i.dev_action[failed_dev->devno] |= 1375 ehc->i.dev_action[ehc->i.dev->devno] |=
1356 action & ATA_EH_PERDEV_MASK; 1376 ehc->i.action & ATA_EH_PERDEV_MASK;
1357 action &= ~ATA_EH_PERDEV_MASK; 1377 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1358 } 1378 }
1359 1379
1360 /* record autopsy result */
1361 ehc->i.dev = failed_dev;
1362 ehc->i.action |= action;
1363
1364 DPRINTK("EXIT\n"); 1380 DPRINTK("EXIT\n");
1365} 1381}
1366 1382
@@ -1483,6 +1499,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1483 ata_reset_fn_t reset; 1499 ata_reset_fn_t reset;
1484 int i, did_followup_srst, rc; 1500 int i, did_followup_srst, rc;
1485 1501
1502 /* about to reset */
1503 ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504
1486 /* Determine which reset to use and record in ehc->i.action. 1505 /* Determine which reset to use and record in ehc->i.action.
1487 * prereset() may examine and modify it. 1506 * prereset() may examine and modify it.
1488 */ 1507 */
@@ -1531,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1531 ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 1550 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1532 reset == softreset ? "soft" : "hard"); 1551 reset == softreset ? "soft" : "hard");
1533 1552
1534 /* reset */ 1553 /* mark that this EH session started with reset */
1535 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1536 ehc->i.flags |= ATA_EHI_DID_RESET; 1554 ehc->i.flags |= ATA_EHI_DID_RESET;
1537 1555
1538 rc = ata_do_reset(ap, reset, classes); 1556 rc = ata_do_reset(ap, reset, classes);
@@ -1595,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1595 postreset(ap, classes); 1613 postreset(ap, classes);
1596 1614
1597 /* reset successful, schedule revalidation */ 1615 /* reset successful, schedule revalidation */
1598 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); 1616 ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1599 ehc->i.action |= ATA_EH_REVALIDATE; 1617 ehc->i.action |= ATA_EH_REVALIDATE;
1600 } 1618 }
1601 1619
@@ -1848,15 +1866,16 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
1848 for (i = 0; i < ata_port_max_devices(ap); i++) { 1866 for (i = 0; i < ata_port_max_devices(ap); i++) {
1849 struct ata_device *dev = &ap->device[i]; 1867 struct ata_device *dev = &ap->device[i];
1850 1868
1851 if (ata_dev_absent(dev) || ata_dev_ready(dev)) 1869 if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1852 break; 1870 break;
1853 } 1871 }
1854 1872
1855 if (i == ata_port_max_devices(ap)) 1873 if (i == ata_port_max_devices(ap))
1856 return 1; 1874 return 1;
1857 1875
1858 /* always thaw frozen port and recover failed devices */ 1876 /* thaw frozen port, resume link and recover failed devices */
1859 if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap)) 1877 if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878 (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1860 return 0; 1879 return 0;
1861 1880
1862 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1881 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 64631bd38952..4776f4e55839 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 }, 270 board_20619 },
271 271
272/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be
274 * divined.
275 */
276#if 0
272 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
273 board_20771 }, 278 board_20771 },
279#endif
280
274 { } /* terminate list */ 281 { } /* terminate list */
275}; 282};
276 283
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
110 sshdr.asc, sshdr.ascq); 110 sshdr.asc, sshdr.ascq);
111 break; 111 break;
112 case NOT_READY: /* This happens if there is no disc in drive */ 112 case NOT_READY: /* This happens if there is no disc in drive */
113 if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { 113 if (sdev->removable)
114 printk(KERN_INFO "Device not ready. Make sure"
115 " there is a disc in the drive.\n");
116 break; 114 break;
117 }
118 case UNIT_ATTENTION: 115 case UNIT_ATTENTION:
119 if (sdev->removable) { 116 if (sdev->removable) {
120 sdev->changed = 1; 117 sdev->changed = 1;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6533b0f39231..c40b9b8b1e7e 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -86,9 +86,11 @@ config FB_MACMODES
86 default n 86 default n
87 87
88config FB_BACKLIGHT 88config FB_BACKLIGHT
89 bool 89 bool
90 depends on FB 90 depends on FB
91 default n 91 select BACKLIGHT_LCD_SUPPORT
92 select BACKLIGHT_CLASS_DEVICE
93 default n
92 94
93config FB_MODE_HELPERS 95config FB_MODE_HELPERS
94 bool "Enable Video Mode Handling Helpers" 96 bool "Enable Video Mode Handling Helpers"
@@ -420,7 +422,7 @@ config FB_OF
420 422
421config FB_CONTROL 423config FB_CONTROL
422 bool "Apple \"control\" display support" 424 bool "Apple \"control\" display support"
423 depends on (FB = y) && PPC_PMAC 425 depends on (FB = y) && PPC_PMAC && PPC32
424 select FB_CFB_FILLRECT 426 select FB_CFB_FILLRECT
425 select FB_CFB_COPYAREA 427 select FB_CFB_COPYAREA
426 select FB_CFB_IMAGEBLIT 428 select FB_CFB_IMAGEBLIT
@@ -431,7 +433,7 @@ config FB_CONTROL
431 433
432config FB_PLATINUM 434config FB_PLATINUM
433 bool "Apple \"platinum\" display support" 435 bool "Apple \"platinum\" display support"
434 depends on (FB = y) && PPC_PMAC 436 depends on (FB = y) && PPC_PMAC && PPC32
435 select FB_CFB_FILLRECT 437 select FB_CFB_FILLRECT
436 select FB_CFB_COPYAREA 438 select FB_CFB_COPYAREA
437 select FB_CFB_IMAGEBLIT 439 select FB_CFB_IMAGEBLIT
@@ -442,7 +444,7 @@ config FB_PLATINUM
442 444
443config FB_VALKYRIE 445config FB_VALKYRIE
444 bool "Apple \"valkyrie\" display support" 446 bool "Apple \"valkyrie\" display support"
445 depends on (FB = y) && (MAC || PPC_PMAC) 447 depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
446 select FB_CFB_FILLRECT 448 select FB_CFB_FILLRECT
447 select FB_CFB_COPYAREA 449 select FB_CFB_COPYAREA
448 select FB_CFB_IMAGEBLIT 450 select FB_CFB_IMAGEBLIT
@@ -453,7 +455,7 @@ config FB_VALKYRIE
453 455
454config FB_CT65550 456config FB_CT65550
455 bool "Chips 65550 display support" 457 bool "Chips 65550 display support"
456 depends on (FB = y) && PPC 458 depends on (FB = y) && PPC32
457 select FB_CFB_FILLRECT 459 select FB_CFB_FILLRECT
458 select FB_CFB_COPYAREA 460 select FB_CFB_COPYAREA
459 select FB_CFB_IMAGEBLIT 461 select FB_CFB_IMAGEBLIT
@@ -721,10 +723,8 @@ config FB_NVIDIA_I2C
721 723
722config FB_NVIDIA_BACKLIGHT 724config FB_NVIDIA_BACKLIGHT
723 bool "Support for backlight control" 725 bool "Support for backlight control"
724 depends on FB_NVIDIA && PPC_PMAC 726 depends on FB_NVIDIA && PMAC_BACKLIGHT
725 select FB_BACKLIGHT 727 select FB_BACKLIGHT
726 select BACKLIGHT_LCD_SUPPORT
727 select BACKLIGHT_CLASS_DEVICE
728 default y 728 default y
729 help 729 help
730 Say Y here if you want to control the backlight of your display. 730 Say Y here if you want to control the backlight of your display.
@@ -769,10 +769,8 @@ config FB_RIVA_DEBUG
769 769
770config FB_RIVA_BACKLIGHT 770config FB_RIVA_BACKLIGHT
771 bool "Support for backlight control" 771 bool "Support for backlight control"
772 depends on FB_RIVA && PPC_PMAC 772 depends on FB_RIVA && PMAC_BACKLIGHT
773 select FB_BACKLIGHT 773 select FB_BACKLIGHT
774 select BACKLIGHT_LCD_SUPPORT
775 select BACKLIGHT_CLASS_DEVICE
776 default y 774 default y
777 help 775 help
778 Say Y here if you want to control the backlight of your display. 776 Say Y here if you want to control the backlight of your display.
@@ -1025,10 +1023,8 @@ config FB_RADEON_I2C
1025 1023
1026config FB_RADEON_BACKLIGHT 1024config FB_RADEON_BACKLIGHT
1027 bool "Support for backlight control" 1025 bool "Support for backlight control"
1028 depends on FB_RADEON && PPC_PMAC 1026 depends on FB_RADEON && PMAC_BACKLIGHT
1029 select FB_BACKLIGHT 1027 select FB_BACKLIGHT
1030 select BACKLIGHT_LCD_SUPPORT
1031 select BACKLIGHT_CLASS_DEVICE
1032 default y 1028 default y
1033 help 1029 help
1034 Say Y here if you want to control the backlight of your display. 1030 Say Y here if you want to control the backlight of your display.
@@ -1059,10 +1055,8 @@ config FB_ATY128
1059 1055
1060config FB_ATY128_BACKLIGHT 1056config FB_ATY128_BACKLIGHT
1061 bool "Support for backlight control" 1057 bool "Support for backlight control"
1062 depends on FB_ATY128 && PPC_PMAC 1058 depends on FB_ATY128 && PMAC_BACKLIGHT
1063 select FB_BACKLIGHT 1059 select FB_BACKLIGHT
1064 select BACKLIGHT_LCD_SUPPORT
1065 select BACKLIGHT_CLASS_DEVICE
1066 default y 1060 default y
1067 help 1061 help
1068 Say Y here if you want to control the backlight of your display. 1062 Say Y here if you want to control the backlight of your display.
@@ -1111,10 +1105,8 @@ config FB_ATY_GX
1111 1105
1112config FB_ATY_BACKLIGHT 1106config FB_ATY_BACKLIGHT
1113 bool "Support for backlight control" 1107 bool "Support for backlight control"
1114 depends on FB_ATY && PPC_PMAC 1108 depends on FB_ATY && PMAC_BACKLIGHT
1115 select FB_BACKLIGHT 1109 select FB_BACKLIGHT
1116 select BACKLIGHT_LCD_SUPPORT
1117 select BACKLIGHT_CLASS_DEVICE
1118 default y 1110 default y
1119 help 1111 help
1120 Say Y here if you want to control the backlight of your display. 1112 Say Y here if you want to control the backlight of your display.
@@ -1620,7 +1612,7 @@ if FB || SGI_NEWPORT_CONSOLE
1620 source "drivers/video/logo/Kconfig" 1612 source "drivers/video/logo/Kconfig"
1621endif 1613endif
1622 1614
1623if FB && SYSFS 1615if SYSFS
1624 source "drivers/video/backlight/Kconfig" 1616 source "drivers/video/backlight/Kconfig"
1625endif 1617endif
1626 1618
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 95563c9c6b9c..481c6c9695f8 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -4,6 +4,7 @@
4 4
5# Each configuration option enables a list of files. 5# Each configuration option enables a list of files.
6 6
7obj-y += fb_notify.o
7obj-$(CONFIG_FB) += fb.o 8obj-$(CONFIG_FB) += fb.o
8fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ 9fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
9 modedb.o fbcvt.o 10 modedb.o fbcvt.o
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index c64a717e2d4b..8b08121b390b 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -455,7 +455,10 @@ static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par);
455static void wait_for_fifo(u16 entries, struct aty128fb_par *par); 455static void wait_for_fifo(u16 entries, struct aty128fb_par *par);
456static void wait_for_idle(struct aty128fb_par *par); 456static void wait_for_idle(struct aty128fb_par *par);
457static u32 depth_to_dst(u32 depth); 457static u32 depth_to_dst(u32 depth);
458
459#ifdef CONFIG_FB_ATY128_BACKLIGHT
458static void aty128_bl_set_power(struct fb_info *info, int power); 460static void aty128_bl_set_power(struct fb_info *info, int power);
461#endif
459 462
460#define BIOS_IN8(v) (readb(bios + (v))) 463#define BIOS_IN8(v) (readb(bios + (v)))
461#define BIOS_IN16(v) (readb(bios + (v)) | \ 464#define BIOS_IN16(v) (readb(bios + (v)) | \
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1507d19f481f..053ff63365b7 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2812,7 +2812,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2812 if (par->lock_blank || par->asleep) 2812 if (par->lock_blank || par->asleep)
2813 return 0; 2813 return 0;
2814 2814
2815#ifdef CONFIG_PMAC_BACKLIGHT 2815#ifdef CONFIG_FB_ATY_BACKLIGHT
2816 if (machine_is(powermac) && blank > FB_BLANK_NORMAL) 2816 if (machine_is(powermac) && blank > FB_BLANK_NORMAL)
2817 aty_bl_set_power(info, FB_BLANK_POWERDOWN); 2817 aty_bl_set_power(info, FB_BLANK_POWERDOWN);
2818#elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2818#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
@@ -2844,7 +2844,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2844 } 2844 }
2845 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); 2845 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
2846 2846
2847#ifdef CONFIG_PMAC_BACKLIGHT 2847#ifdef CONFIG_FB_ATY_BACKLIGHT
2848 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL) 2848 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL)
2849 aty_bl_set_power(info, FB_BLANK_UNBLANK); 2849 aty_bl_set_power(info, FB_BLANK_UNBLANK);
2850#elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2850#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 8d85fc58142e..8e3400d5dd21 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -266,6 +266,8 @@ static int force_measure_pll = 0;
266#ifdef CONFIG_MTRR 266#ifdef CONFIG_MTRR
267static int nomtrr = 0; 267static int nomtrr = 0;
268#endif 268#endif
269static int force_sleep;
270static int ignore_devlist;
269 271
270/* 272/*
271 * prototypes 273 * prototypes
@@ -2327,9 +2329,9 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2327 /* -2 is special: means ON on mobility chips and do not 2329 /* -2 is special: means ON on mobility chips and do not
2328 * change on others 2330 * change on others
2329 */ 2331 */
2330 radeonfb_pm_init(rinfo, rinfo->is_mobility ? 1 : -1); 2332 radeonfb_pm_init(rinfo, rinfo->is_mobility ? 1 : -1, ignore_devlist, force_sleep);
2331 } else 2333 } else
2332 radeonfb_pm_init(rinfo, default_dynclk); 2334 radeonfb_pm_init(rinfo, default_dynclk, ignore_devlist, force_sleep);
2333 2335
2334 pci_set_drvdata(pdev, info); 2336 pci_set_drvdata(pdev, info);
2335 2337
@@ -2477,6 +2479,12 @@ static int __init radeonfb_setup (char *options)
2477 force_measure_pll = 1; 2479 force_measure_pll = 1;
2478 } else if (!strncmp(this_opt, "ignore_edid", 11)) { 2480 } else if (!strncmp(this_opt, "ignore_edid", 11)) {
2479 ignore_edid = 1; 2481 ignore_edid = 1;
2482#if defined(CONFIG_PM) && defined(CONFIG_X86)
2483 } else if (!strncmp(this_opt, "force_sleep", 11)) {
2484 force_sleep = 1;
2485 } else if (!strncmp(this_opt, "ignore_devlist", 14)) {
2486 ignore_devlist = 1;
2487#endif
2480 } else 2488 } else
2481 mode_option = this_opt; 2489 mode_option = this_opt;
2482 } 2490 }
@@ -2532,3 +2540,9 @@ module_param(panel_yres, int, 0);
2532MODULE_PARM_DESC(panel_yres, "int: set panel yres"); 2540MODULE_PARM_DESC(panel_yres, "int: set panel yres");
2533module_param(mode_option, charp, 0); 2541module_param(mode_option, charp, 0);
2534MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); 2542MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
2543#if defined(CONFIG_PM) && defined(CONFIG_X86)
2544module_param(force_sleep, bool, 0);
2545MODULE_PARM_DESC(force_sleep, "bool: force D2 sleep mode on all hardware");
2546module_param(ignore_devlist, bool, 0);
2547MODULE_PARM_DESC(ignore_devlist, "bool: ignore workarounds for bugs in specific laptops");
2548#endif
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c7091761cef4..f31e606a2ded 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -27,6 +27,99 @@
27 27
28#include "ati_ids.h" 28#include "ati_ids.h"
29 29
30static void radeon_reinitialize_M10(struct radeonfb_info *rinfo);
31
32/*
33 * Workarounds for bugs in PC laptops:
34 * - enable D2 sleep in some IBM Thinkpads
35 * - special case for Samsung P35
36 *
37 * Whitelist by subsystem vendor/device because
38 * its the subsystem vendor's fault!
39 */
40
41#if defined(CONFIG_PM) && defined(CONFIG_X86)
42struct radeon_device_id {
43 const char *ident; /* (arbitrary) Name */
44 const unsigned short subsystem_vendor; /* Subsystem Vendor ID */
45 const unsigned short subsystem_device; /* Subsystem Device ID */
46 const enum radeon_pm_mode pm_mode_modifier; /* modify pm_mode */
47 const reinit_function_ptr new_reinit_func; /* changed reinit_func */
48};
49
50#define BUGFIX(model, sv, sd, pm, fn) { \
51 .ident = model, \
52 .subsystem_vendor = sv, \
53 .subsystem_device = sd, \
54 .pm_mode_modifier = pm, \
55 .new_reinit_func = fn \
56}
57
58static struct radeon_device_id radeon_workaround_list[] = {
59 BUGFIX("IBM Thinkpad R32",
60 PCI_VENDOR_ID_IBM, 0x1905,
61 radeon_pm_d2, NULL),
62 BUGFIX("IBM Thinkpad R40",
63 PCI_VENDOR_ID_IBM, 0x0526,
64 radeon_pm_d2, NULL),
65 BUGFIX("IBM Thinkpad R40",
66 PCI_VENDOR_ID_IBM, 0x0527,
67 radeon_pm_d2, NULL),
68 BUGFIX("IBM Thinkpad R50/R51/T40/T41",
69 PCI_VENDOR_ID_IBM, 0x0531,
70 radeon_pm_d2, NULL),
71 BUGFIX("IBM Thinkpad R51/T40/T41/T42",
72 PCI_VENDOR_ID_IBM, 0x0530,
73 radeon_pm_d2, NULL),
74 BUGFIX("IBM Thinkpad T30",
75 PCI_VENDOR_ID_IBM, 0x0517,
76 radeon_pm_d2, NULL),
77 BUGFIX("IBM Thinkpad T40p",
78 PCI_VENDOR_ID_IBM, 0x054d,
79 radeon_pm_d2, NULL),
80 BUGFIX("IBM Thinkpad T42",
81 PCI_VENDOR_ID_IBM, 0x0550,
82 radeon_pm_d2, NULL),
83 BUGFIX("IBM Thinkpad X31/X32",
84 PCI_VENDOR_ID_IBM, 0x052f,
85 radeon_pm_d2, NULL),
86 BUGFIX("Samsung P35",
87 PCI_VENDOR_ID_SAMSUNG, 0xc00c,
88 radeon_pm_off, radeon_reinitialize_M10),
89 { .ident = NULL }
90};
91
92static int radeon_apply_workarounds(struct radeonfb_info *rinfo)
93{
94 struct radeon_device_id *id;
95
96 for (id = radeon_workaround_list; id->ident != NULL; id++ )
97 if ((id->subsystem_vendor == rinfo->pdev->subsystem_vendor ) &&
98 (id->subsystem_device == rinfo->pdev->subsystem_device )) {
99
100 /* we found a device that requires workaround */
101 printk(KERN_DEBUG "radeonfb: %s detected"
102 ", enabling workaround\n", id->ident);
103
104 rinfo->pm_mode |= id->pm_mode_modifier;
105
106 if (id->new_reinit_func != NULL)
107 rinfo->reinit_func = id->new_reinit_func;
108
109 return 1;
110 }
111 return 0; /* not found */
112}
113
114#else /* defined(CONFIG_PM) && defined(CONFIG_X86) */
115static inline int radeon_apply_workarounds(struct radeonfb_info *rinfo)
116{
117 return 0;
118}
119#endif /* defined(CONFIG_PM) && defined(CONFIG_X86) */
120
121
122
30static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo) 123static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
31{ 124{
32 u32 tmp; 125 u32 tmp;
@@ -852,18 +945,26 @@ static void radeon_pm_setup_for_suspend(struct radeonfb_info *rinfo)
852 /* because both INPLL and OUTPLL take the same lock, that's why. */ 945 /* because both INPLL and OUTPLL take the same lock, that's why. */
853 tmp = INPLL( pllMCLK_MISC) | MCLK_MISC__EN_MCLK_TRISTATE_IN_SUSPEND; 946 tmp = INPLL( pllMCLK_MISC) | MCLK_MISC__EN_MCLK_TRISTATE_IN_SUSPEND;
854 OUTPLL( pllMCLK_MISC, tmp); 947 OUTPLL( pllMCLK_MISC, tmp);
855
856 /* AGP PLL control */
857 if (rinfo->family <= CHIP_FAMILY_RV280) {
858 OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | BUS_CNTL1__AGPCLK_VALID);
859 948
860 OUTREG(BUS_CNTL1, 949 /* BUS_CNTL1__MOBILE_PLATORM_SEL setting is northbridge chipset
861 (INREG(BUS_CNTL1) & ~BUS_CNTL1__MOBILE_PLATFORM_SEL_MASK) 950 * and radeon chip dependent. Thus we only enable it on Mac for
862 | (2<<BUS_CNTL1__MOBILE_PLATFORM_SEL__SHIFT)); // 440BX 951 * now (until we get more info on how to compute the correct
863 } else { 952 * value for various X86 bridges).
864 OUTREG(BUS_CNTL1, INREG(BUS_CNTL1)); 953 */
865 OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & ~0x4000) | 0x8000); 954#ifdef CONFIG_PPC_PMAC
955 if (machine_is(powermac)) {
956 /* AGP PLL control */
957 if (rinfo->family <= CHIP_FAMILY_RV280) {
958 OUTREG(BUS_CNTL1, INREG(BUS_CNTL1) | BUS_CNTL1__AGPCLK_VALID);
959 OUTREG(BUS_CNTL1,
960 (INREG(BUS_CNTL1) & ~BUS_CNTL1__MOBILE_PLATFORM_SEL_MASK)
961 | (2<<BUS_CNTL1__MOBILE_PLATFORM_SEL__SHIFT)); // 440BX
962 } else {
963 OUTREG(BUS_CNTL1, INREG(BUS_CNTL1));
964 OUTREG(BUS_CNTL1, (INREG(BUS_CNTL1) & ~0x4000) | 0x8000);
965 }
866 } 966 }
967#endif
867 968
868 OUTREG(CRTC_OFFSET_CNTL, (INREG(CRTC_OFFSET_CNTL) 969 OUTREG(CRTC_OFFSET_CNTL, (INREG(CRTC_OFFSET_CNTL)
869 & ~CRTC_OFFSET_CNTL__CRTC_STEREO_SYNC_OUT_EN)); 970 & ~CRTC_OFFSET_CNTL__CRTC_STEREO_SYNC_OUT_EN));
@@ -2713,7 +2814,7 @@ static void radeonfb_early_resume(void *data)
2713 2814
2714#endif /* CONFIG_PM */ 2815#endif /* CONFIG_PM */
2715 2816
2716void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) 2817void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep)
2717{ 2818{
2718 /* Find PM registers in config space if any*/ 2819 /* Find PM registers in config space if any*/
2719 rinfo->pm_reg = pci_find_capability(rinfo->pdev, PCI_CAP_ID_PM); 2820 rinfo->pm_reg = pci_find_capability(rinfo->pdev, PCI_CAP_ID_PM);
@@ -2729,22 +2830,13 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2729 } 2830 }
2730 2831
2731#if defined(CONFIG_PM) 2832#if defined(CONFIG_PM)
2833#if defined(CONFIG_PPC_PMAC)
2732 /* Check if we can power manage on suspend/resume. We can do 2834 /* Check if we can power manage on suspend/resume. We can do
2733 * D2 on M6, M7 and M9, and we can resume from D3 cold a few other 2835 * D2 on M6, M7 and M9, and we can resume from D3 cold a few other
2734 * "Mac" cards, but that's all. We need more infos about what the 2836 * "Mac" cards, but that's all. We need more infos about what the
2735 * BIOS does tho. Right now, all this PM stuff is pmac-only for that 2837 * BIOS does tho. Right now, all this PM stuff is pmac-only for that
2736 * reason. --BenH 2838 * reason. --BenH
2737 */ 2839 */
2738 /* Special case for Samsung P35 laptops
2739 */
2740 if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) &&
2741 (rinfo->pdev->device == PCI_CHIP_RV350_NP) &&
2742 (rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) &&
2743 (rinfo->pdev->subsystem_device == 0xc00c)) {
2744 rinfo->reinit_func = radeon_reinitialize_M10;
2745 rinfo->pm_mode |= radeon_pm_off;
2746 }
2747#if defined(CONFIG_PPC_PMAC)
2748 if (machine_is(powermac) && rinfo->of_node) { 2840 if (machine_is(powermac) && rinfo->of_node) {
2749 if (rinfo->is_mobility && rinfo->pm_reg && 2841 if (rinfo->is_mobility && rinfo->pm_reg &&
2750 rinfo->family <= CHIP_FAMILY_RV250) 2842 rinfo->family <= CHIP_FAMILY_RV250)
@@ -2790,6 +2882,18 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2790 } 2882 }
2791#endif /* defined(CONFIG_PPC_PMAC) */ 2883#endif /* defined(CONFIG_PPC_PMAC) */
2792#endif /* defined(CONFIG_PM) */ 2884#endif /* defined(CONFIG_PM) */
2885
2886 if (ignore_devlist)
2887 printk(KERN_DEBUG
2888 "radeonfb: skipping test for device workarounds\n");
2889 else
2890 radeon_apply_workarounds(rinfo);
2891
2892 if (force_sleep) {
2893 printk(KERN_DEBUG
2894 "radeonfb: forcefully enabling D2 sleep mode\n");
2895 rinfo->pm_mode |= radeon_pm_d2;
2896 }
2793} 2897}
2794 2898
2795void radeonfb_pm_exit(struct radeonfb_info *rinfo) 2899void radeonfb_pm_exit(struct radeonfb_info *rinfo)
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 38657b2d10eb..d5ff224a6258 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -273,6 +273,8 @@ enum radeon_pm_mode {
273 radeon_pm_off = 0x00000002, /* Can resume from D3 cold */ 273 radeon_pm_off = 0x00000002, /* Can resume from D3 cold */
274}; 274};
275 275
276typedef void (*reinit_function_ptr)(struct radeonfb_info *rinfo);
277
276struct radeonfb_info { 278struct radeonfb_info {
277 struct fb_info *info; 279 struct fb_info *info;
278 280
@@ -338,7 +340,7 @@ struct radeonfb_info {
338 int dynclk; 340 int dynclk;
339 int no_schedule; 341 int no_schedule;
340 enum radeon_pm_mode pm_mode; 342 enum radeon_pm_mode pm_mode;
341 void (*reinit_func)(struct radeonfb_info *rinfo); 343 reinit_function_ptr reinit_func;
342 344
343 /* Lock on register access */ 345 /* Lock on register access */
344 spinlock_t reg_lock; 346 spinlock_t reg_lock;
@@ -600,7 +602,7 @@ extern int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn, u8
600/* PM Functions */ 602/* PM Functions */
601extern int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state); 603extern int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state);
602extern int radeonfb_pci_resume(struct pci_dev *pdev); 604extern int radeonfb_pci_resume(struct pci_dev *pdev);
603extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk); 605extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep);
604extern void radeonfb_pm_exit(struct radeonfb_info *rinfo); 606extern void radeonfb_pm_exit(struct radeonfb_info *rinfo);
605 607
606/* Monitor probe functions */ 608/* Monitor probe functions */
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 022f9d3473f5..02f15297a021 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -10,7 +10,7 @@ menuconfig BACKLIGHT_LCD_SUPPORT
10 10
11config BACKLIGHT_CLASS_DEVICE 11config BACKLIGHT_CLASS_DEVICE
12 tristate "Lowlevel Backlight controls" 12 tristate "Lowlevel Backlight controls"
13 depends on BACKLIGHT_LCD_SUPPORT && FB 13 depends on BACKLIGHT_LCD_SUPPORT
14 default m 14 default m
15 help 15 help
16 This framework adds support for low-level control of the LCD 16 This framework adds support for low-level control of the LCD
@@ -26,7 +26,7 @@ config BACKLIGHT_DEVICE
26 26
27config LCD_CLASS_DEVICE 27config LCD_CLASS_DEVICE
28 tristate "Lowlevel LCD controls" 28 tristate "Lowlevel LCD controls"
29 depends on BACKLIGHT_LCD_SUPPORT && FB 29 depends on BACKLIGHT_LCD_SUPPORT
30 default m 30 default m
31 help 31 help
32 This framework adds support for low-level control of LCD. 32 This framework adds support for low-level control of LCD.
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 52ed12b12acc..eb4d03fa5391 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -197,7 +197,7 @@ static int __init mdacon_setup(char *str)
197__setup("mdacon=", mdacon_setup); 197__setup("mdacon=", mdacon_setup);
198#endif 198#endif
199 199
200static int __init mda_detect(void) 200static int mda_detect(void)
201{ 201{
202 int count=0; 202 int count=0;
203 u16 *p, p_save; 203 u16 *p, p_save;
@@ -282,7 +282,7 @@ static int __init mda_detect(void)
282 return 1; 282 return 1;
283} 283}
284 284
285static void __init mda_initialize(void) 285static void mda_initialize(void)
286{ 286{
287 write_mda_b(97, 0x00); /* horizontal total */ 287 write_mda_b(97, 0x00); /* horizontal total */
288 write_mda_b(80, 0x01); /* horizontal displayed */ 288 write_mda_b(80, 0x01); /* horizontal displayed */
diff --git a/drivers/video/fb_notify.c b/drivers/video/fb_notify.c
new file mode 100644
index 000000000000..8c020389e4fa
--- /dev/null
+++ b/drivers/video/fb_notify.c
@@ -0,0 +1,46 @@
1/*
2 * linux/drivers/video/fb_notify.c
3 *
4 * Copyright (C) 2006 Antonino Daplas <adaplas@pol.net>
5 *
6 * 2001 - Documented with DocBook
7 * - Brad Douglas <brad@neruo.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive
11 * for more details.
12 */
13#include <linux/fb.h>
14#include <linux/notifier.h>
15
16static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
17
18/**
19 * fb_register_client - register a client notifier
20 * @nb: notifier block to callback on events
21 */
22int fb_register_client(struct notifier_block *nb)
23{
24 return blocking_notifier_chain_register(&fb_notifier_list, nb);
25}
26EXPORT_SYMBOL(fb_register_client);
27
28/**
29 * fb_unregister_client - unregister a client notifier
30 * @nb: notifier block to callback on events
31 */
32int fb_unregister_client(struct notifier_block *nb)
33{
34 return blocking_notifier_chain_unregister(&fb_notifier_list, nb);
35}
36EXPORT_SYMBOL(fb_unregister_client);
37
38/**
39 * fb_notifier_call_chain - notify clients of fb_events
40 *
41 */
42int fb_notifier_call_chain(unsigned long val, void *v)
43{
44 return blocking_notifier_call_chain(&fb_notifier_list, val, v);
45}
46EXPORT_SYMBOL_GPL(fb_notifier_call_chain);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 4fc9df426c1a..17961e3ecaa0 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -52,7 +52,6 @@
52 52
53#define FBPIXMAPSIZE (1024 * 8) 53#define FBPIXMAPSIZE (1024 * 8)
54 54
55static BLOCKING_NOTIFIER_HEAD(fb_notifier_list);
56struct fb_info *registered_fb[FB_MAX]; 55struct fb_info *registered_fb[FB_MAX];
57int num_registered_fb; 56int num_registered_fb;
58 57
@@ -791,8 +790,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
791 790
792 event.info = info; 791 event.info = info;
793 event.data = &mode1; 792 event.data = &mode1;
794 ret = blocking_notifier_call_chain(&fb_notifier_list, 793 ret = fb_notifier_call_chain(FB_EVENT_MODE_DELETE, &event);
795 FB_EVENT_MODE_DELETE, &event);
796 } 794 }
797 795
798 if (!ret) 796 if (!ret)
@@ -837,8 +835,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
837 835
838 info->flags &= ~FBINFO_MISC_USEREVENT; 836 info->flags &= ~FBINFO_MISC_USEREVENT;
839 event.info = info; 837 event.info = info;
840 blocking_notifier_call_chain(&fb_notifier_list, 838 fb_notifier_call_chain(evnt, &event);
841 evnt, &event);
842 } 839 }
843 } 840 }
844 } 841 }
@@ -861,8 +858,7 @@ fb_blank(struct fb_info *info, int blank)
861 858
862 event.info = info; 859 event.info = info;
863 event.data = &blank; 860 event.data = &blank;
864 blocking_notifier_call_chain(&fb_notifier_list, 861 fb_notifier_call_chain(FB_EVENT_BLANK, &event);
865 FB_EVENT_BLANK, &event);
866 } 862 }
867 863
868 return ret; 864 return ret;
@@ -933,8 +929,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
933 con2fb.framebuffer = -1; 929 con2fb.framebuffer = -1;
934 event.info = info; 930 event.info = info;
935 event.data = &con2fb; 931 event.data = &con2fb;
936 blocking_notifier_call_chain(&fb_notifier_list, 932 fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
937 FB_EVENT_GET_CONSOLE_MAP, &event);
938 return copy_to_user(argp, &con2fb, 933 return copy_to_user(argp, &con2fb,
939 sizeof(con2fb)) ? -EFAULT : 0; 934 sizeof(con2fb)) ? -EFAULT : 0;
940 case FBIOPUT_CON2FBMAP: 935 case FBIOPUT_CON2FBMAP:
@@ -952,9 +947,8 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
952 return -EINVAL; 947 return -EINVAL;
953 event.info = info; 948 event.info = info;
954 event.data = &con2fb; 949 event.data = &con2fb;
955 return blocking_notifier_call_chain(&fb_notifier_list, 950 return fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
956 FB_EVENT_SET_CONSOLE_MAP, 951 &event);
957 &event);
958 case FBIOBLANK: 952 case FBIOBLANK:
959 acquire_console_sem(); 953 acquire_console_sem();
960 info->flags |= FBINFO_MISC_USEREVENT; 954 info->flags |= FBINFO_MISC_USEREVENT;
@@ -1330,8 +1324,7 @@ register_framebuffer(struct fb_info *fb_info)
1330 registered_fb[i] = fb_info; 1324 registered_fb[i] = fb_info;
1331 1325
1332 event.info = fb_info; 1326 event.info = fb_info;
1333 blocking_notifier_call_chain(&fb_notifier_list, 1327 fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
1334 FB_EVENT_FB_REGISTERED, &event);
1335 return 0; 1328 return 0;
1336} 1329}
1337 1330
@@ -1365,30 +1358,11 @@ unregister_framebuffer(struct fb_info *fb_info)
1365 fb_cleanup_class_device(fb_info); 1358 fb_cleanup_class_device(fb_info);
1366 class_device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1359 class_device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1367 event.info = fb_info; 1360 event.info = fb_info;
1368 blocking_notifier_call_chain(&fb_notifier_list, 1361 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1369 FB_EVENT_FB_UNREGISTERED, &event);
1370 return 0; 1362 return 0;
1371} 1363}
1372 1364
1373/** 1365/**
1374 * fb_register_client - register a client notifier
1375 * @nb: notifier block to callback on events
1376 */
1377int fb_register_client(struct notifier_block *nb)
1378{
1379 return blocking_notifier_chain_register(&fb_notifier_list, nb);
1380}
1381
1382/**
1383 * fb_unregister_client - unregister a client notifier
1384 * @nb: notifier block to callback on events
1385 */
1386int fb_unregister_client(struct notifier_block *nb)
1387{
1388 return blocking_notifier_chain_unregister(&fb_notifier_list, nb);
1389}
1390
1391/**
1392 * fb_set_suspend - low level driver signals suspend 1366 * fb_set_suspend - low level driver signals suspend
1393 * @info: framebuffer affected 1367 * @info: framebuffer affected
1394 * @state: 0 = resuming, !=0 = suspending 1368 * @state: 0 = resuming, !=0 = suspending
@@ -1403,13 +1377,11 @@ void fb_set_suspend(struct fb_info *info, int state)
1403 1377
1404 event.info = info; 1378 event.info = info;
1405 if (state) { 1379 if (state) {
1406 blocking_notifier_call_chain(&fb_notifier_list, 1380 fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
1407 FB_EVENT_SUSPEND, &event);
1408 info->state = FBINFO_STATE_SUSPENDED; 1381 info->state = FBINFO_STATE_SUSPENDED;
1409 } else { 1382 } else {
1410 info->state = FBINFO_STATE_RUNNING; 1383 info->state = FBINFO_STATE_RUNNING;
1411 blocking_notifier_call_chain(&fb_notifier_list, 1384 fb_notifier_call_chain(FB_EVENT_RESUME, &event);
1412 FB_EVENT_RESUME, &event);
1413 } 1385 }
1414} 1386}
1415 1387
@@ -1480,9 +1452,7 @@ int fb_new_modelist(struct fb_info *info)
1480 1452
1481 if (!list_empty(&info->modelist)) { 1453 if (!list_empty(&info->modelist)) {
1482 event.info = info; 1454 event.info = info;
1483 err = blocking_notifier_call_chain(&fb_notifier_list, 1455 err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
1484 FB_EVENT_NEW_MODELIST,
1485 &event);
1486 } 1456 }
1487 1457
1488 return err; 1458 return err;
@@ -1594,8 +1564,6 @@ EXPORT_SYMBOL(fb_blank);
1594EXPORT_SYMBOL(fb_pan_display); 1564EXPORT_SYMBOL(fb_pan_display);
1595EXPORT_SYMBOL(fb_get_buffer_offset); 1565EXPORT_SYMBOL(fb_get_buffer_offset);
1596EXPORT_SYMBOL(fb_set_suspend); 1566EXPORT_SYMBOL(fb_set_suspend);
1597EXPORT_SYMBOL(fb_register_client);
1598EXPORT_SYMBOL(fb_unregister_client);
1599EXPORT_SYMBOL(fb_get_options); 1567EXPORT_SYMBOL(fb_get_options);
1600 1568
1601MODULE_LICENSE("GPL"); 1569MODULE_LICENSE("GPL");
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 9f2066f0745a..d4f850117874 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -34,10 +34,6 @@
34#include "nv_proto.h" 34#include "nv_proto.h"
35#include "nv_dma.h" 35#include "nv_dma.h"
36 36
37#ifndef CONFIG_PCI /* sanity check */
38#error This driver requires PCI support.
39#endif
40
41#undef CONFIG_FB_NVIDIA_DEBUG 37#undef CONFIG_FB_NVIDIA_DEBUG
42#ifdef CONFIG_FB_NVIDIA_DEBUG 38#ifdef CONFIG_FB_NVIDIA_DEBUG
43#define NVTRACE printk 39#define NVTRACE printk
@@ -1303,20 +1299,19 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
1303 1299
1304 nvidia_save_vga(par, &par->SavedReg); 1300 nvidia_save_vga(par, &par->SavedReg);
1305 1301
1302 pci_set_drvdata(pd, info);
1303 nvidia_bl_init(par);
1306 if (register_framebuffer(info) < 0) { 1304 if (register_framebuffer(info) < 0) {
1307 printk(KERN_ERR PFX "error registering nVidia framebuffer\n"); 1305 printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
1308 goto err_out_iounmap_fb; 1306 goto err_out_iounmap_fb;
1309 } 1307 }
1310 1308
1311 pci_set_drvdata(pd, info);
1312 1309
1313 printk(KERN_INFO PFX 1310 printk(KERN_INFO PFX
1314 "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n", 1311 "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
1315 info->fix.id, 1312 info->fix.id,
1316 par->FbMapSize / (1024 * 1024), info->fix.smem_start); 1313 par->FbMapSize / (1024 * 1024), info->fix.smem_start);
1317 1314
1318 nvidia_bl_init(par);
1319
1320 NVTRACE_LEAVE(); 1315 NVTRACE_LEAVE();
1321 return 0; 1316 return 0;
1322 1317
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 33dddbae5420..76fc9d355eb7 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -2132,6 +2132,9 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
2132 2132
2133 fb_destroy_modedb(info->monspecs.modedb); 2133 fb_destroy_modedb(info->monspecs.modedb);
2134 info->monspecs.modedb = NULL; 2134 info->monspecs.modedb = NULL;
2135
2136 pci_set_drvdata(pd, info);
2137 riva_bl_init(info->par);
2135 ret = register_framebuffer(info); 2138 ret = register_framebuffer(info);
2136 if (ret < 0) { 2139 if (ret < 0) {
2137 printk(KERN_ERR PFX 2140 printk(KERN_ERR PFX
@@ -2139,8 +2142,6 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
2139 goto err_iounmap_screen_base; 2142 goto err_iounmap_screen_base;
2140 } 2143 }
2141 2144
2142 pci_set_drvdata(pd, info);
2143
2144 printk(KERN_INFO PFX 2145 printk(KERN_INFO PFX
2145 "PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n", 2146 "PCI nVidia %s framebuffer ver %s (%dMB @ 0x%lX)\n",
2146 info->fix.id, 2147 info->fix.id,
@@ -2148,8 +2149,6 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
2148 info->fix.smem_len / (1024 * 1024), 2149 info->fix.smem_len / (1024 * 1024),
2149 info->fix.smem_start); 2150 info->fix.smem_start);
2150 2151
2151 riva_bl_init(info->par);
2152
2153 NVTRACE_LEAVE(); 2152 NVTRACE_LEAVE();
2154 return 0; 2153 return 0;
2155 2154
diff --git a/fs/9p/conv.c b/fs/9p/conv.c
index 1e898144eb7c..56d88c1a09c5 100644
--- a/fs/9p/conv.c
+++ b/fs/9p/conv.c
@@ -673,8 +673,10 @@ struct v9fs_fcall *v9fs_create_tcreate(u32 fid, char *name, u32 perm, u8 mode,
673 struct cbuf *bufp = &buffer; 673 struct cbuf *bufp = &buffer;
674 674
675 size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */ 675 size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */
676 if (extended && extension!=NULL) 676 if (extended) {
677 size += 2 + strlen(extension); /* extension[s] */ 677 size += 2 + /* extension[s] */
678 (extension == NULL ? 0 : strlen(extension));
679 }
678 680
679 fc = v9fs_create_common(bufp, size, TCREATE); 681 fc = v9fs_create_common(bufp, size, TCREATE);
680 if (IS_ERR(fc)) 682 if (IS_ERR(fc))
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 2f580a197b8d..eae50c9d6dc4 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -434,11 +434,11 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
434 result = v9fs_t_remove(v9ses, fid, &fcall); 434 result = v9fs_t_remove(v9ses, fid, &fcall);
435 if (result < 0) { 435 if (result < 0) {
436 PRINT_FCALL_ERROR("remove fails", fcall); 436 PRINT_FCALL_ERROR("remove fails", fcall);
437 } else {
438 v9fs_put_idpool(fid, &v9ses->fidpool);
439 v9fs_fid_destroy(v9fid);
440 } 437 }
441 438
439 v9fs_put_idpool(fid, &v9ses->fidpool);
440 v9fs_fid_destroy(v9fid);
441
442 kfree(fcall); 442 kfree(fcall);
443 return result; 443 return result;
444} 444}
diff --git a/fs/buffer.c b/fs/buffer.c
index 3660dcb97591..71649ef9b658 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -470,13 +470,18 @@ out:
470 pass does the actual I/O. */ 470 pass does the actual I/O. */
471void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) 471void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
472{ 472{
473 struct address_space *mapping = bdev->bd_inode->i_mapping;
474
475 if (mapping->nrpages == 0)
476 return;
477
473 invalidate_bh_lrus(); 478 invalidate_bh_lrus();
474 /* 479 /*
475 * FIXME: what about destroy_dirty_buffers? 480 * FIXME: what about destroy_dirty_buffers?
476 * We really want to use invalidate_inode_pages2() for 481 * We really want to use invalidate_inode_pages2() for
477 * that, but not until that's cleaned up. 482 * that, but not until that's cleaned up.
478 */ 483 */
479 invalidate_inode_pages(bdev->bd_inode->i_mapping); 484 invalidate_inode_pages(mapping);
480} 485}
481 486
482/* 487/*
diff --git a/fs/coda/file.c b/fs/coda/file.c
index cc66c681bd11..dbfbcfa5b3c0 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -136,10 +136,8 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
136 coda_vfs_stat.open++; 136 coda_vfs_stat.open++;
137 137
138 cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); 138 cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
139 if (!cfi) { 139 if (!cfi)
140 unlock_kernel();
141 return -ENOMEM; 140 return -ENOMEM;
142 }
143 141
144 lock_kernel(); 142 lock_kernel();
145 143
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index e249cf733a6b..1d30d2ff440f 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -22,7 +22,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page)
22 22
23 err = -ENAMETOOLONG; 23 err = -ENAMETOOLONG;
24 if (size > 2 * EFS_BLOCKSIZE) 24 if (size > 2 * EFS_BLOCKSIZE)
25 goto fail; 25 goto fail_notlocked;
26 26
27 lock_kernel(); 27 lock_kernel();
28 /* read first 512 bytes of link target */ 28 /* read first 512 bytes of link target */
@@ -47,6 +47,7 @@ static int efs_symlink_readpage(struct file *file, struct page *page)
47 return 0; 47 return 0;
48fail: 48fail:
49 unlock_kernel(); 49 unlock_kernel();
50fail_notlocked:
50 SetPageError(page); 51 SetPageError(page);
51 kunmap(page); 52 kunmap(page);
52 unlock_page(page); 53 unlock_page(page);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index f804d5e9d60c..c5ee9f0691e3 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1158,7 +1158,7 @@ retry:
1158 ret = PTR_ERR(handle); 1158 ret = PTR_ERR(handle);
1159 goto out; 1159 goto out;
1160 } 1160 }
1161 if (test_opt(inode->i_sb, NOBH)) 1161 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1162 ret = nobh_prepare_write(page, from, to, ext3_get_block); 1162 ret = nobh_prepare_write(page, from, to, ext3_get_block);
1163 else 1163 else
1164 ret = block_prepare_write(page, from, to, ext3_get_block); 1164 ret = block_prepare_write(page, from, to, ext3_get_block);
@@ -1244,7 +1244,7 @@ static int ext3_writeback_commit_write(struct file *file, struct page *page,
1244 if (new_i_size > EXT3_I(inode)->i_disksize) 1244 if (new_i_size > EXT3_I(inode)->i_disksize)
1245 EXT3_I(inode)->i_disksize = new_i_size; 1245 EXT3_I(inode)->i_disksize = new_i_size;
1246 1246
1247 if (test_opt(inode->i_sb, NOBH)) 1247 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1248 ret = nobh_commit_write(file, page, from, to); 1248 ret = nobh_commit_write(file, page, from, to);
1249 else 1249 else
1250 ret = generic_commit_write(file, page, from, to); 1250 ret = generic_commit_write(file, page, from, to);
@@ -1494,7 +1494,7 @@ static int ext3_writeback_writepage(struct page *page,
1494 goto out_fail; 1494 goto out_fail;
1495 } 1495 }
1496 1496
1497 if (test_opt(inode->i_sb, NOBH)) 1497 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1498 ret = nobh_writepage(page, ext3_get_block, wbc); 1498 ret = nobh_writepage(page, ext3_get_block, wbc);
1499 else 1499 else
1500 ret = block_write_full_page(page, ext3_get_block, wbc); 1500 ret = block_write_full_page(page, ext3_get_block, wbc);
@@ -2402,14 +2402,15 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2402 struct buffer_head *bh; 2402 struct buffer_head *bh;
2403 struct ext3_group_desc * gdp; 2403 struct ext3_group_desc * gdp;
2404 2404
2405 2405 if (!ext3_valid_inum(sb, ino)) {
2406 if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && 2406 /*
2407 ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || 2407 * This error is already checked for in namei.c unless we are
2408 ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { 2408 * looking at an NFS filehandle, in which case no error
2409 ext3_error(sb, "ext3_get_inode_block", 2409 * report is needed
2410 "bad inode number: %lu", ino); 2410 */
2411 return 0; 2411 return 0;
2412 } 2412 }
2413
2413 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); 2414 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2414 if (block_group >= EXT3_SB(sb)->s_groups_count) { 2415 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2415 ext3_error(sb,"ext3_get_inode_block","group >= groups count"); 2416 ext3_error(sb,"ext3_get_inode_block","group >= groups count");
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index d9176dba3698..2aa7101b27cd 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1000,7 +1000,12 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str
1000 if (bh) { 1000 if (bh) {
1001 unsigned long ino = le32_to_cpu(de->inode); 1001 unsigned long ino = le32_to_cpu(de->inode);
1002 brelse (bh); 1002 brelse (bh);
1003 inode = iget(dir->i_sb, ino); 1003 if (!ext3_valid_inum(dir->i_sb, ino)) {
1004 ext3_error(dir->i_sb, "ext3_lookup",
1005 "bad inode number: %lu", ino);
1006 inode = NULL;
1007 } else
1008 inode = iget(dir->i_sb, ino);
1004 1009
1005 if (!inode) 1010 if (!inode)
1006 return ERR_PTR(-EACCES); 1011 return ERR_PTR(-EACCES);
@@ -1028,7 +1033,13 @@ struct dentry *ext3_get_parent(struct dentry *child)
1028 return ERR_PTR(-ENOENT); 1033 return ERR_PTR(-ENOENT);
1029 ino = le32_to_cpu(de->inode); 1034 ino = le32_to_cpu(de->inode);
1030 brelse(bh); 1035 brelse(bh);
1031 inode = iget(child->d_inode->i_sb, ino); 1036
1037 if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
1038 ext3_error(child->d_inode->i_sb, "ext3_get_parent",
1039 "bad inode number: %lu", ino);
1040 inode = NULL;
1041 } else
1042 inode = iget(child->d_inode->i_sb, ino);
1032 1043
1033 if (!inode) 1044 if (!inode)
1034 return ERR_PTR(-EACCES); 1045 return ERR_PTR(-EACCES);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 29cce456c7ce..43886fa00a2a 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -246,6 +246,8 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
246 u_long page, npages, block, pblocks, nblocks, offset; 246 u_long page, npages, block, pblocks, nblocks, offset;
247 loff_t pos; 247 loff_t pos;
248 248
249 lock_kernel();
250
249 switch ((long)fp->f_pos) { 251 switch ((long)fp->f_pos) {
250 case 0: 252 case 0:
251 if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0) 253 if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index a3bce3a77253..46fe60b2da23 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -105,7 +105,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
105 105
106/* 106/*
107 * Add a connection to the control filesystem (if it exists). Caller 107 * Add a connection to the control filesystem (if it exists). Caller
108 * must host fuse_mutex 108 * must hold fuse_mutex
109 */ 109 */
110int fuse_ctl_add_conn(struct fuse_conn *fc) 110int fuse_ctl_add_conn(struct fuse_conn *fc)
111{ 111{
@@ -139,7 +139,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
139 139
140/* 140/*
141 * Remove a connection from the control filesystem (if it exists). 141 * Remove a connection from the control filesystem (if it exists).
142 * Caller must host fuse_mutex 142 * Caller must hold fuse_mutex
143 */ 143 */
144void fuse_ctl_remove_conn(struct fuse_conn *fc) 144void fuse_ctl_remove_conn(struct fuse_conn *fc)
145{ 145{
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 72a74cde6de8..409ce6a7cca4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -14,6 +14,33 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/namei.h> 15#include <linux/namei.h>
16 16
17#if BITS_PER_LONG >= 64
18static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
19{
20 entry->d_time = time;
21}
22
23static inline u64 fuse_dentry_time(struct dentry *entry)
24{
25 return entry->d_time;
26}
27#else
28/*
29 * On 32 bit archs store the high 32 bits of time in d_fsdata
30 */
31static void fuse_dentry_settime(struct dentry *entry, u64 time)
32{
33 entry->d_time = time;
34 entry->d_fsdata = (void *) (unsigned long) (time >> 32);
35}
36
37static u64 fuse_dentry_time(struct dentry *entry)
38{
39 return (u64) entry->d_time +
40 ((u64) (unsigned long) entry->d_fsdata << 32);
41}
42#endif
43
17/* 44/*
18 * FUSE caches dentries and attributes with separate timeout. The 45 * FUSE caches dentries and attributes with separate timeout. The
19 * time in jiffies until the dentry/attributes are valid is stored in 46 * time in jiffies until the dentry/attributes are valid is stored in
@@ -23,10 +50,13 @@
23/* 50/*
24 * Calculate the time in jiffies until a dentry/attributes are valid 51 * Calculate the time in jiffies until a dentry/attributes are valid
25 */ 52 */
26static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) 53static u64 time_to_jiffies(unsigned long sec, unsigned long nsec)
27{ 54{
28 struct timespec ts = {sec, nsec}; 55 if (sec || nsec) {
29 return jiffies + timespec_to_jiffies(&ts); 56 struct timespec ts = {sec, nsec};
57 return get_jiffies_64() + timespec_to_jiffies(&ts);
58 } else
59 return 0;
30} 60}
31 61
32/* 62/*
@@ -35,7 +65,8 @@ static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec)
35 */ 65 */
36static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o) 66static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o)
37{ 67{
38 entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec); 68 fuse_dentry_settime(entry,
69 time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
39 if (entry->d_inode) 70 if (entry->d_inode)
40 get_fuse_inode(entry->d_inode)->i_time = 71 get_fuse_inode(entry->d_inode)->i_time =
41 time_to_jiffies(o->attr_valid, o->attr_valid_nsec); 72 time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
@@ -47,7 +78,7 @@ static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o)
47 */ 78 */
48void fuse_invalidate_attr(struct inode *inode) 79void fuse_invalidate_attr(struct inode *inode)
49{ 80{
50 get_fuse_inode(inode)->i_time = jiffies - 1; 81 get_fuse_inode(inode)->i_time = 0;
51} 82}
52 83
53/* 84/*
@@ -60,7 +91,7 @@ void fuse_invalidate_attr(struct inode *inode)
60 */ 91 */
61static void fuse_invalidate_entry_cache(struct dentry *entry) 92static void fuse_invalidate_entry_cache(struct dentry *entry)
62{ 93{
63 entry->d_time = jiffies - 1; 94 fuse_dentry_settime(entry, 0);
64} 95}
65 96
66/* 97/*
@@ -102,7 +133,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
102 133
103 if (inode && is_bad_inode(inode)) 134 if (inode && is_bad_inode(inode))
104 return 0; 135 return 0;
105 else if (time_after(jiffies, entry->d_time)) { 136 else if (fuse_dentry_time(entry) < get_jiffies_64()) {
106 int err; 137 int err;
107 struct fuse_entry_out outarg; 138 struct fuse_entry_out outarg;
108 struct fuse_conn *fc; 139 struct fuse_conn *fc;
@@ -666,7 +697,7 @@ static int fuse_revalidate(struct dentry *entry)
666 if (!fuse_allow_task(fc, current)) 697 if (!fuse_allow_task(fc, current))
667 return -EACCES; 698 return -EACCES;
668 if (get_node_id(inode) != FUSE_ROOT_ID && 699 if (get_node_id(inode) != FUSE_ROOT_ID &&
669 time_before_eq(jiffies, fi->i_time)) 700 fi->i_time >= get_jiffies_64())
670 return 0; 701 return 0;
671 702
672 return fuse_do_getattr(inode); 703 return fuse_do_getattr(inode);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0dbf96621841..69c7750d55b8 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -59,7 +59,7 @@ struct fuse_inode {
59 struct fuse_req *forget_req; 59 struct fuse_req *forget_req;
60 60
61 /** Time in jiffies until the file attributes are valid */ 61 /** Time in jiffies until the file attributes are valid */
62 unsigned long i_time; 62 u64 i_time;
63}; 63};
64 64
65/** FUSE specific file data */ 65/** FUSE specific file data */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index dcaaabd3b9c4..7d25092262ae 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -51,7 +51,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
51 return NULL; 51 return NULL;
52 52
53 fi = get_fuse_inode(inode); 53 fi = get_fuse_inode(inode);
54 fi->i_time = jiffies - 1; 54 fi->i_time = 0;
55 fi->nodeid = 0; 55 fi->nodeid = 0;
56 fi->nlookup = 0; 56 fi->nlookup = 0;
57 fi->forget_req = fuse_request_alloc(); 57 fi->forget_req = fuse_request_alloc();
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index f2386442adee..017cb0f134d6 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -187,7 +187,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
187{ 187{
188 struct inotify_kernel_event *kevent; 188 struct inotify_kernel_event *kevent;
189 189
190 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); 190 kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
191 if (unlikely(!kevent)) 191 if (unlikely(!kevent))
192 return NULL; 192 return NULL;
193 193
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index ecc439d2565f..501d83884530 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -187,6 +187,11 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
187 goto out; 187 goto out;
188 } 188 }
189 189
190 /* Set user creds for this exportpoint */
191 error = nfserrno(nfsd_setuser(rqstp, exp));
192 if (error)
193 goto out;
194
190 /* 195 /*
191 * Look up the dentry using the NFS file handle. 196 * Look up the dentry using the NFS file handle.
192 */ 197 */
@@ -241,16 +246,17 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
241 dprintk("nfsd: fh_verify - just checking\n"); 246 dprintk("nfsd: fh_verify - just checking\n");
242 dentry = fhp->fh_dentry; 247 dentry = fhp->fh_dentry;
243 exp = fhp->fh_export; 248 exp = fhp->fh_export;
249 /* Set user creds for this exportpoint; necessary even
250 * in the "just checking" case because this may be a
251 * filehandle that was created by fh_compose, and that
252 * is about to be used in another nfsv4 compound
253 * operation */
254 error = nfserrno(nfsd_setuser(rqstp, exp));
255 if (error)
256 goto out;
244 } 257 }
245 cache_get(&exp->h); 258 cache_get(&exp->h);
246 259
247 /* Set user creds for this exportpoint; necessary even in the "just
248 * checking" case because this may be a filehandle that was created by
249 * fh_compose, and that is about to be used in another nfsv4 compound
250 * operation */
251 error = nfserrno(nfsd_setuser(rqstp, exp));
252 if (error)
253 goto out;
254 260
255 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); 261 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
256 if (error) 262 if (error)
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index c9a478099281..e478f1941831 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -99,7 +99,7 @@ config IBM_PARTITION
99 99
100config MAC_PARTITION 100config MAC_PARTITION
101 bool "Macintosh partition map support" if PARTITION_ADVANCED 101 bool "Macintosh partition map support" if PARTITION_ADVANCED
102 default y if MAC 102 default y if (MAC || PPC_PMAC)
103 help 103 help
104 Say Y here if you would like to use hard disks under Linux which 104 Say Y here if you would like to use hard disks under Linux which
105 were partitioned on a Macintosh. 105 were partitioned on a Macintosh.
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index abd5f23a426d..d344b411e261 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -129,7 +129,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
129 struct inode * inode; 129 struct inode * inode;
130 130
131 if (l > sb->s_blocksize) 131 if (l > sb->s_blocksize)
132 goto out; 132 goto out_notlocked;
133 133
134 lock_kernel(); 134 lock_kernel();
135 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); 135 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
@@ -155,6 +155,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
155 err = ufs_add_nondir(dentry, inode); 155 err = ufs_add_nondir(dentry, inode);
156out: 156out:
157 unlock_kernel(); 157 unlock_kernel();
158out_notlocked:
158 return err; 159 return err;
159 160
160out_fail: 161out_fail:
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index ceda3a2859d2..7858703ed84c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
246#define BUF_BUSY XBF_DONT_BLOCK 246#define BUF_BUSY XBF_DONT_BLOCK
247 247
248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) 248#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
249#define XFS_BUF_ZEROFLAGS(bp) \ 249#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
250 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) 250 ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
251 251
252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) 252#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) 253#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9bdef9d51900..4754f342a5d3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
314 return; 314 return;
315 } 315 }
316 316
317 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
318 xfs_fs_cmn_err(CE_NOTE, mp,
319 "Disabling barriers, underlying device is readonly");
320 mp->m_flags &= ~XFS_MOUNT_BARRIER;
321 return;
322 }
323
317 error = xfs_barrier_test(mp); 324 error = xfs_barrier_test(mp);
318 if (error) { 325 if (error) {
319 xfs_fs_cmn_err(CE_NOTE, mp, 326 xfs_fs_cmn_err(CE_NOTE, mp,
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index e95e99f7168f..f137856c3261 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -217,17 +217,24 @@ xfs_qm_statvfs(
217 return 0; 217 return 0;
218 dp = &dqp->q_core; 218 dp = &dqp->q_core;
219 219
220 limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; 220 limit = dp->d_blk_softlimit ?
221 be64_to_cpu(dp->d_blk_softlimit) :
222 be64_to_cpu(dp->d_blk_hardlimit);
221 if (limit && statp->f_blocks > limit) { 223 if (limit && statp->f_blocks > limit) {
222 statp->f_blocks = limit; 224 statp->f_blocks = limit;
223 statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? 225 statp->f_bfree =
224 (statp->f_blocks - dp->d_bcount) : 0; 226 (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
227 (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
225 } 228 }
226 limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; 229
230 limit = dp->d_ino_softlimit ?
231 be64_to_cpu(dp->d_ino_softlimit) :
232 be64_to_cpu(dp->d_ino_hardlimit);
227 if (limit && statp->f_files > limit) { 233 if (limit && statp->f_files > limit) {
228 statp->f_files = limit; 234 statp->f_files = limit;
229 statp->f_ffree = (statp->f_files > dp->d_icount) ? 235 statp->f_ffree =
230 (statp->f_ffree - dp->d_icount) : 0; 236 (statp->f_files > be64_to_cpu(dp->d_icount)) ?
237 (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
231 } 238 }
232 239
233 xfs_qm_dqput(dqp); 240 xfs_qm_dqput(dqp);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 86c1bf0bba9e..1f8ecff8553a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -334,10 +334,9 @@ xfs_itobp(
334#if !defined(__KERNEL__) 334#if !defined(__KERNEL__)
335 ni = 0; 335 ni = 0;
336#elif defined(DEBUG) 336#elif defined(DEBUG)
337 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 337 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
338 (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog);
339#else /* usual case */ 338#else /* usual case */
340 ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; 339 ni = 1;
341#endif 340#endif
342 341
343 for (i = 0; i < ni; i++) { 342 for (i = 0; i < ni; i++) {
@@ -348,11 +347,15 @@ xfs_itobp(
348 (i << mp->m_sb.sb_inodelog)); 347 (i << mp->m_sb.sb_inodelog));
349 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && 348 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
350 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); 349 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
351 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, 350 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
352 XFS_RANDOM_ITOBP_INOTOBP))) { 351 XFS_ERRTAG_ITOBP_INOTOBP,
352 XFS_RANDOM_ITOBP_INOTOBP))) {
353 if (imap_flags & XFS_IMAP_BULKSTAT) {
354 xfs_trans_brelse(tp, bp);
355 return XFS_ERROR(EINVAL);
356 }
353#ifdef DEBUG 357#ifdef DEBUG
354 if (!(imap_flags & XFS_IMAP_BULKSTAT)) 358 cmn_err(CE_ALERT,
355 cmn_err(CE_ALERT,
356 "Device %s - bad inode magic/vsn " 359 "Device %s - bad inode magic/vsn "
357 "daddr %lld #%d (magic=%x)", 360 "daddr %lld #%d (magic=%x)",
358 XFS_BUFTARG_NAME(mp->m_ddev_targp), 361 XFS_BUFTARG_NAME(mp->m_ddev_targp),
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e730328636c3..21ac1a67e3e0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log,
1413 ops = iclog->ic_header.h_num_logops; 1413 ops = iclog->ic_header.h_num_logops;
1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); 1414 INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
1415 1415
1416 bp = iclog->ic_bp; 1416 bp = iclog->ic_bp;
1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); 1417 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1418 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); 1419 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log,
1430 } 1430 }
1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); 1431 XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count);
1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ 1432 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
1433 XFS_BUF_ZEROFLAGS(bp);
1433 XFS_BUF_BUSY(bp); 1434 XFS_BUF_BUSY(bp);
1434 XFS_BUF_ASYNC(bp); 1435 XFS_BUF_ASYNC(bp);
1435 /* 1436 /*
1436 * Do an ordered write for the log block. 1437 * Do an ordered write for the log block.
1437 * 1438 * Its unnecessary to flush the first split block in the log wrap case.
1438 * It may not be needed to flush the first split block in the log wrap
1439 * case, but do it anyways to be safe -AK
1440 */ 1439 */
1441 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1440 if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
1442 XFS_BUF_ORDERED(bp); 1441 XFS_BUF_ORDERED(bp);
1443 1442
1444 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1443 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log,
1460 return error; 1459 return error;
1461 } 1460 }
1462 if (split) { 1461 if (split) {
1463 bp = iclog->ic_log->l_xbuf; 1462 bp = iclog->ic_log->l_xbuf;
1464 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == 1463 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
1465 (unsigned long)1); 1464 (unsigned long)1);
1466 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1465 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log,
1468 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ 1467 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
1469 (__psint_t)count), split); 1468 (__psint_t)count), split);
1470 XFS_BUF_SET_FSPRIVATE(bp, iclog); 1469 XFS_BUF_SET_FSPRIVATE(bp, iclog);
1470 XFS_BUF_ZEROFLAGS(bp);
1471 XFS_BUF_BUSY(bp); 1471 XFS_BUF_BUSY(bp);
1472 XFS_BUF_ASYNC(bp); 1472 XFS_BUF_ASYNC(bp);
1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1473 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 6c96391f3f1a..b427d220a169 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -515,7 +515,7 @@ xfs_mount(
515 if (error) 515 if (error)
516 goto error2; 516 goto error2;
517 517
518 if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) 518 if (mp->m_flags & XFS_MOUNT_BARRIER)
519 xfs_mountfs_check_barriers(mp); 519 xfs_mountfs_check_barriers(mp);
520 520
521 error = XFS_IOINIT(vfsp, args, flags); 521 error = XFS_IOINIT(vfsp, args, flags);
diff --git a/include/asm-arm/arch-iop3xx/iop331-irqs.h b/include/asm-arm/arch-iop3xx/iop331-irqs.h
index 8ff73d487222..7135ad7e335e 100644
--- a/include/asm-arm/arch-iop3xx/iop331-irqs.h
+++ b/include/asm-arm/arch-iop3xx/iop331-irqs.h
@@ -91,7 +91,6 @@
91#define NR_IRQS NR_IOP331_IRQS 91#define NR_IRQS NR_IOP331_IRQS
92 92
93 93
94#if defined(CONFIG_ARCH_IQ80331)
95/* 94/*
96 * Interrupts available on the IQ80331 board 95 * Interrupts available on the IQ80331 board
97 */ 96 */
@@ -111,7 +110,6 @@
111#define IRQ_IQ80331_INTC IRQ_IOP331_XINT2 110#define IRQ_IQ80331_INTC IRQ_IOP331_XINT2
112#define IRQ_IQ80331_INTD IRQ_IOP331_XINT3 111#define IRQ_IQ80331_INTD IRQ_IOP331_XINT3
113 112
114#elif defined(CONFIG_MACH_IQ80332)
115/* 113/*
116 * Interrupts available on the IQ80332 board 114 * Interrupts available on the IQ80332 board
117 */ 115 */
@@ -131,6 +129,4 @@
131#define IRQ_IQ80332_INTC IRQ_IOP331_XINT2 129#define IRQ_IQ80332_INTC IRQ_IOP331_XINT2
132#define IRQ_IQ80332_INTD IRQ_IOP331_XINT3 130#define IRQ_IQ80332_INTD IRQ_IOP331_XINT3
133 131
134#endif
135
136#endif // _IOP331_IRQ_H_ 132#endif // _IOP331_IRQ_H_
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 0730a20f6db8..8774d06689da 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -45,6 +45,7 @@ typedef u8 kprobe_opcode_t;
45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 45#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
46#define ARCH_SUPPORTS_KRETPROBES 46#define ARCH_SUPPORTS_KRETPROBES
47#define ARCH_INACTIVE_KPROBE_COUNT 0 47#define ARCH_INACTIVE_KPROBE_COUNT 0
48#define flush_insn_slot(p) do { } while (0)
48 49
49void arch_remove_kprobe(struct kprobe *p); 50void arch_remove_kprobe(struct kprobe *p);
50void kretprobe_trampoline(void); 51void kretprobe_trampoline(void);
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 2418a787c405..938904910115 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -125,5 +125,6 @@ static inline void jprobe_return(void)
125} 125}
126extern void invalidate_stacked_regs(void); 126extern void invalidate_stacked_regs(void);
127extern void flush_register_stack(void); 127extern void flush_register_stack(void);
128extern void flush_insn_slot(struct kprobe *p);
128 129
129#endif /* _ASM_KPROBES_H */ 130#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-powerpc/backlight.h b/include/asm-powerpc/backlight.h
index 58d4b6f8d827..8cf5c37c3817 100644
--- a/include/asm-powerpc/backlight.h
+++ b/include/asm-powerpc/backlight.h
@@ -30,8 +30,12 @@ static inline void pmac_backlight_key_down(void)
30 pmac_backlight_key(1); 30 pmac_backlight_key(1);
31} 31}
32 32
33extern void pmac_backlight_set_legacy_brightness_pmu(int brightness);
33extern int pmac_backlight_set_legacy_brightness(int brightness); 34extern int pmac_backlight_set_legacy_brightness(int brightness);
34extern int pmac_backlight_get_legacy_brightness(void); 35extern int pmac_backlight_get_legacy_brightness(void);
35 36
37extern void pmac_backlight_enable(void);
38extern void pmac_backlight_disable(void);
39
36#endif /* __KERNEL__ */ 40#endif /* __KERNEL__ */
37#endif 41#endif
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index 2d0af52c823d..34e1f89a5fa0 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -51,6 +51,7 @@ typedef unsigned int kprobe_opcode_t;
51 51
52#define ARCH_SUPPORTS_KRETPROBES 52#define ARCH_SUPPORTS_KRETPROBES
53#define ARCH_INACTIVE_KPROBE_COUNT 1 53#define ARCH_INACTIVE_KPROBE_COUNT 1
54#define flush_insn_slot(p) do { } while (0)
54 55
55void kretprobe_trampoline(void); 56void kretprobe_trampoline(void);
56extern void arch_remove_kprobe(struct kprobe *p); 57extern void arch_remove_kprobe(struct kprobe *p);
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h
index 0ae5084c427b..d03a21c97abb 100644
--- a/include/asm-sparc/signal.h
+++ b/include/asm-sparc/signal.h
@@ -168,7 +168,7 @@ struct sigstack {
168 * statically allocated data.. which is NOT GOOD. 168 * statically allocated data.. which is NOT GOOD.
169 * 169 *
170 */ 170 */
171#define SA_STATIC_ALLOC 0x80 171#define SA_STATIC_ALLOC 0x8000
172#endif 172#endif
173 173
174#include <asm-generic/signal.h> 174#include <asm-generic/signal.h>
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index 15065af566c2..c9f5c34d318c 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -13,6 +13,7 @@ typedef u32 kprobe_opcode_t;
13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
14#define arch_remove_kprobe(p) do {} while (0) 14#define arch_remove_kprobe(p) do {} while (0)
15#define ARCH_INACTIVE_KPROBE_COUNT 0 15#define ARCH_INACTIVE_KPROBE_COUNT 0
16#define flush_insn_slot(p) do { } while (0)
16 17
17/* Architecture specific copy of original instruction*/ 18/* Architecture specific copy of original instruction*/
18struct arch_specific_insn { 19struct arch_specific_insn {
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 03f5bc9b6bec..1ba19eb34ce3 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
339 " .section .sun4v_2insn_patch, \"ax\"\n" 339 " .section .sun4v_2insn_patch, \"ax\"\n"
340 " .word 661b\n" 340 " .word 661b\n"
341 " andn %0, %4, %0\n" 341 " andn %0, %4, %0\n"
342 " or %0, %3, %0\n" 342 " or %0, %5, %0\n"
343 " .previous\n" 343 " .previous\n"
344 : "=r" (val) 344 : "=r" (val)
345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), 345 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h
index 5015bb8d6c32..89d42431efb5 100644
--- a/include/asm-sparc64/sfp-machine.h
+++ b/include/asm-sparc64/sfp-machine.h
@@ -34,7 +34,7 @@
34#define _FP_MUL_MEAT_D(R,X,Y) \ 34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) 35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \ 36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) 37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38 38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) 39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) 40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index fbfb50136edb..4e3919524240 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -60,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; }
60static inline void detect_calgary(void) { return; } 60static inline void detect_calgary(void) { return; }
61#endif 61#endif
62 62
63static inline unsigned int bus_to_phb(unsigned char busno)
64{
65 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
66}
67
68#endif /* _ASM_X86_64_CALGARY_H */ 63#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index d36febd9bb18..cf5317898fb0 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -47,6 +47,7 @@ typedef u8 kprobe_opcode_t;
47 47
48void kretprobe_trampoline(void); 48void kretprobe_trampoline(void);
49extern void arch_remove_kprobe(struct kprobe *p); 49extern void arch_remove_kprobe(struct kprobe *p);
50#define flush_insn_slot(p) do { } while (0)
50 51
51/* Architecture specific copy of original instruction*/ 52/* Architecture specific copy of original instruction*/
52struct arch_specific_insn { 53struct arch_specific_insn {
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f7bf875aae40..10f346165cab 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -19,7 +19,7 @@
19#define EXCEPTION_STACK_ORDER 0 19#define EXCEPTION_STACK_ORDER 0
20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
21 21
22#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER 22#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
24 24
25#define IRQSTACK_ORDER 2 25#define IRQSTACK_ORDER 2
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 5f9a01805821..ba94ab3d2673 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
43extern void swiotlb_init(void); 43extern void swiotlb_init(void);
44 44
45extern int swiotlb_force;
46
45#ifdef CONFIG_SWIOTLB 47#ifdef CONFIG_SWIOTLB
46extern int swiotlb; 48extern int swiotlb;
47#else 49#else
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index dbb7769009be..1c86d65bc4b9 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -57,7 +57,8 @@ struct proc_event {
57 PROC_EVENT_EXIT = 0x80000000 57 PROC_EVENT_EXIT = 0x80000000
58 } what; 58 } what;
59 __u32 cpu; 59 __u32 cpu;
60 struct timespec timestamp; 60 __u64 __attribute__((aligned(8))) timestamp_ns;
61 /* Number of nano seconds since system boot */
61 union { /* must be last field of proc_event struct */ 62 union { /* must be last field of proc_event struct */
62 struct { 63 struct {
63 __u32 err; 64 __u32 err;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 35e137636b0b..4ea39fee99c7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
172 unsigned int relation); 172 unsigned int relation);
173 173
174 174
175/* pass an event to the cpufreq governor */
176int cpufreq_governor(unsigned int cpu, unsigned int event);
177
178int cpufreq_register_governor(struct cpufreq_governor *governor); 175int cpufreq_register_governor(struct cpufreq_governor *governor);
179void cpufreq_unregister_governor(struct cpufreq_governor *governor); 176void cpufreq_unregister_governor(struct cpufreq_governor *governor);
180 177
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 7e8b6011b8f3..11487b6e7127 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -55,7 +55,7 @@ static inline void delayacct_tsk_init(struct task_struct *tsk)
55{ 55{
56 /* reinitialize in case parent's non-null pointer was dup'ed*/ 56 /* reinitialize in case parent's non-null pointer was dup'ed*/
57 tsk->delays = NULL; 57 tsk->delays = NULL;
58 if (unlikely(delayacct_on)) 58 if (delayacct_on)
59 __delayacct_tsk_init(tsk); 59 __delayacct_tsk_init(tsk);
60} 60}
61 61
@@ -80,9 +80,7 @@ static inline void delayacct_blkio_end(void)
80static inline int delayacct_add_tsk(struct taskstats *d, 80static inline int delayacct_add_tsk(struct taskstats *d,
81 struct task_struct *tsk) 81 struct task_struct *tsk)
82{ 82{
83 if (likely(!delayacct_on)) 83 if (!delayacct_on || !tsk->delays)
84 return -EINVAL;
85 if (!tsk->delays)
86 return 0; 84 return 0;
87 return __delayacct_add_tsk(d, tsk); 85 return __delayacct_add_tsk(d, tsk);
88} 86}
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 5607e6457a65..9f9cce7bd86d 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -492,6 +492,15 @@ static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
492{ 492{
493 return container_of(inode, struct ext3_inode_info, vfs_inode); 493 return container_of(inode, struct ext3_inode_info, vfs_inode);
494} 494}
495
496static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
497{
498 return ino == EXT3_ROOT_INO ||
499 ino == EXT3_JOURNAL_INO ||
500 ino == EXT3_RESIZE_INO ||
501 (ino >= EXT3_FIRST_INO(sb) &&
502 ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
503}
495#else 504#else
496/* Assume that user mode programs are passing in an ext3fs superblock, not 505/* Assume that user mode programs are passing in an ext3fs superblock, not
497 * a kernel struct super_block. This will allow us to call the feature-test 506 * a kernel struct super_block. This will allow us to call the feature-test
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 405f44e44e5d..4ad0673b1995 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -524,7 +524,7 @@ struct fb_event {
524 524
525extern int fb_register_client(struct notifier_block *nb); 525extern int fb_register_client(struct notifier_block *nb);
526extern int fb_unregister_client(struct notifier_block *nb); 526extern int fb_unregister_client(struct notifier_block *nb);
527 527extern int fb_notifier_call_chain(unsigned long val, void *v);
528/* 528/*
529 * Pixmap structure definition 529 * Pixmap structure definition
530 * 530 *
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34c3a215f2cd..d097b5b72bc6 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -96,7 +96,8 @@ struct robust_list_head {
96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, 96long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
97 u32 __user *uaddr2, u32 val2, u32 val3); 97 u32 __user *uaddr2, u32 val2, u32 val3);
98 98
99extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); 99extern int
100handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
100 101
101#ifdef CONFIG_FUTEX 102#ifdef CONFIG_FUTEX
102extern void exit_robust_list(struct task_struct *curr); 103extern void exit_robust_list(struct task_struct *curr);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index dc7abef10965..99620451d958 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -571,6 +571,7 @@ typedef struct ide_drive_s {
571 u8 waiting_for_dma; /* dma currently in progress */ 571 u8 waiting_for_dma; /* dma currently in progress */
572 u8 unmask; /* okay to unmask other irqs */ 572 u8 unmask; /* okay to unmask other irqs */
573 u8 bswap; /* byte swap data */ 573 u8 bswap; /* byte swap data */
574 u8 noflush; /* don't attempt flushes */
574 u8 dsc_overlap; /* DSC overlap */ 575 u8 dsc_overlap; /* DSC overlap */
575 u8 nice1; /* give potential excess bandwidth */ 576 u8 nice1; /* give potential excess bandwidth */
576 577
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b48eae32dc61..fbf6d901e9c2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -47,8 +47,8 @@
47#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */ 47#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */
48#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */ 48#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */
49#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */ 49#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */
50#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
50#ifdef CONFIG_IRQ_PER_CPU 51#ifdef CONFIG_IRQ_PER_CPU
51# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
52# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 52# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
53#else 53#else
54# define CHECK_IRQ_PER_CPU(var) 0 54# define CHECK_IRQ_PER_CPU(var) 0
@@ -58,6 +58,7 @@
58#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */ 58#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */
59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ 59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ 60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
61#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
61 62
62struct proc_dir_entry; 63struct proc_dir_entry;
63 64
@@ -124,6 +125,7 @@ struct irq_chip {
124 * @action: the irq action chain 125 * @action: the irq action chain
125 * @status: status information 126 * @status: status information
126 * @depth: disable-depth, for nested irq_disable() calls 127 * @depth: disable-depth, for nested irq_disable() calls
128 * @wake_depth: enable depth, for multiple set_irq_wake() callers
127 * @irq_count: stats field to detect stalled irqs 129 * @irq_count: stats field to detect stalled irqs
128 * @irqs_unhandled: stats field for spurious unhandled interrupts 130 * @irqs_unhandled: stats field for spurious unhandled interrupts
129 * @lock: locking for SMP 131 * @lock: locking for SMP
@@ -147,6 +149,7 @@ struct irq_desc {
147 unsigned int status; /* IRQ status */ 149 unsigned int status; /* IRQ status */
148 150
149 unsigned int depth; /* nested irq disables */ 151 unsigned int depth; /* nested irq disables */
152 unsigned int wake_depth; /* nested wake enables */
150 unsigned int irq_count; /* For detecting broken IRQs */ 153 unsigned int irq_count; /* For detecting broken IRQs */
151 unsigned int irqs_unhandled; 154 unsigned int irqs_unhandled;
152 spinlock_t lock; 155 spinlock_t lock;
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 043376920f51..329ebcffa106 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -47,8 +47,8 @@
47 * - (NOM / DEN) fits in (32 - LSH) bits. 47 * - (NOM / DEN) fits in (32 - LSH) bits.
48 * - (NOM % DEN) fits in (32 - LSH) bits. 48 * - (NOM % DEN) fits in (32 - LSH) bits.
49 */ 49 */
50#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \ 50#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
51 + (((NOM % DEN) << LSH) + DEN / 2) / DEN) 51 + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
52 52
53/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ 53/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
54#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) 54#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cc497a2b6da..66c3100c2b94 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -265,12 +265,14 @@ enum {
265 265
266 /* ata_eh_info->flags */ 266 /* ata_eh_info->flags */
267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ 268 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
270 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 270 ATA_EHI_QUIET = (1 << 3), /* be quiet */
271 271
272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
273 273
274 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
275
274 /* max repeat if error condition is still set after ->error_handler */ 276 /* max repeat if error condition is still set after ->error_handler */
275 ATA_EH_MAX_REPEAT = 5, 277 ATA_EH_MAX_REPEAT = 5,
276 278
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 87764022cc67..31f02ba036ce 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -79,6 +79,8 @@ struct bridge_skb_cb {
79 __u32 ipv4; 79 __u32 ipv4;
80 } daddr; 80 } daddr;
81}; 81};
82
83extern int brnf_deferred_hooks;
82#endif /* CONFIG_BRIDGE_NETFILTER */ 84#endif /* CONFIG_BRIDGE_NETFILTER */
83 85
84#endif /* __KERNEL__ */ 86#endif /* __KERNEL__ */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 2ed807ddc08c..783177387ac6 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -231,7 +231,6 @@ extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
231extern unsigned int pmu_power_flags; 231extern unsigned int pmu_power_flags;
232 232
233/* Backlight */ 233/* Backlight */
234extern int disable_kernel_backlight; 234extern void pmu_backlight_init(void);
235extern void pmu_backlight_init(struct device_node*);
236 235
237#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
diff --git a/ipc/msg.c b/ipc/msg.c
index cd92d342953e..2b4fccf8ea55 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/ipc/msg.c 2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian 3 * Copyright (C) 1992 Krishna Balasubramanian
4 * 4 *
5 * Removed all the remaining kerneld mess 5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly 6 * Catch the -EFAULT stuff properly
@@ -41,22 +41,24 @@ int msg_ctlmax = MSGMAX;
41int msg_ctlmnb = MSGMNB; 41int msg_ctlmnb = MSGMNB;
42int msg_ctlmni = MSGMNI; 42int msg_ctlmni = MSGMNI;
43 43
44/* one msg_receiver structure for each sleeping receiver */ 44/*
45 * one msg_receiver structure for each sleeping receiver:
46 */
45struct msg_receiver { 47struct msg_receiver {
46 struct list_head r_list; 48 struct list_head r_list;
47 struct task_struct* r_tsk; 49 struct task_struct *r_tsk;
48 50
49 int r_mode; 51 int r_mode;
50 long r_msgtype; 52 long r_msgtype;
51 long r_maxsize; 53 long r_maxsize;
52 54
53 struct msg_msg* volatile r_msg; 55 volatile struct msg_msg *r_msg;
54}; 56};
55 57
56/* one msg_sender for each sleeping sender */ 58/* one msg_sender for each sleeping sender */
57struct msg_sender { 59struct msg_sender {
58 struct list_head list; 60 struct list_head list;
59 struct task_struct* tsk; 61 struct task_struct *tsk;
60}; 62};
61 63
62#define SEARCH_ANY 1 64#define SEARCH_ANY 1
@@ -64,45 +66,42 @@ struct msg_sender {
64#define SEARCH_NOTEQUAL 3 66#define SEARCH_NOTEQUAL 3
65#define SEARCH_LESSEQUAL 4 67#define SEARCH_LESSEQUAL 4
66 68
67static atomic_t msg_bytes = ATOMIC_INIT(0); 69static atomic_t msg_bytes = ATOMIC_INIT(0);
68static atomic_t msg_hdrs = ATOMIC_INIT(0); 70static atomic_t msg_hdrs = ATOMIC_INIT(0);
69 71
70static struct ipc_ids msg_ids; 72static struct ipc_ids msg_ids;
71 73
72#define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id)) 74#define msg_lock(id) ((struct msg_queue *)ipc_lock(&msg_ids, id))
73#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) 75#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
74#define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id)) 76#define msg_rmid(id) ((struct msg_queue *)ipc_rmid(&msg_ids, id))
75#define msg_checkid(msq, msgid) \ 77#define msg_checkid(msq, msgid) ipc_checkid(&msg_ids, &msq->q_perm, msgid)
76 ipc_checkid(&msg_ids,&msq->q_perm,msgid) 78#define msg_buildid(id, seq) ipc_buildid(&msg_ids, id, seq)
77#define msg_buildid(id, seq) \
78 ipc_buildid(&msg_ids, id, seq)
79 79
80static void freeque (struct msg_queue *msq, int id); 80static void freeque(struct msg_queue *msq, int id);
81static int newque (key_t key, int msgflg); 81static int newque(key_t key, int msgflg);
82#ifdef CONFIG_PROC_FS 82#ifdef CONFIG_PROC_FS
83static int sysvipc_msg_proc_show(struct seq_file *s, void *it); 83static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
84#endif 84#endif
85 85
86void __init msg_init (void) 86void __init msg_init(void)
87{ 87{
88 ipc_init_ids(&msg_ids,msg_ctlmni); 88 ipc_init_ids(&msg_ids, msg_ctlmni);
89 ipc_init_proc_interface("sysvipc/msg", 89 ipc_init_proc_interface("sysvipc/msg",
90 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", 90 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
91 &msg_ids, 91 &msg_ids,
92 sysvipc_msg_proc_show); 92 sysvipc_msg_proc_show);
93} 93}
94 94
95static int newque (key_t key, int msgflg) 95static int newque(key_t key, int msgflg)
96{ 96{
97 int id;
98 int retval;
99 struct msg_queue *msq; 97 struct msg_queue *msq;
98 int id, retval;
100 99
101 msq = ipc_rcu_alloc(sizeof(*msq)); 100 msq = ipc_rcu_alloc(sizeof(*msq));
102 if (!msq) 101 if (!msq)
103 return -ENOMEM; 102 return -ENOMEM;
104 103
105 msq->q_perm.mode = (msgflg & S_IRWXUGO); 104 msq->q_perm.mode = msgflg & S_IRWXUGO;
106 msq->q_perm.key = key; 105 msq->q_perm.key = key;
107 106
108 msq->q_perm.security = NULL; 107 msq->q_perm.security = NULL;
@@ -113,13 +112,13 @@ static int newque (key_t key, int msgflg)
113 } 112 }
114 113
115 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); 114 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
116 if(id == -1) { 115 if (id == -1) {
117 security_msg_queue_free(msq); 116 security_msg_queue_free(msq);
118 ipc_rcu_putref(msq); 117 ipc_rcu_putref(msq);
119 return -ENOSPC; 118 return -ENOSPC;
120 } 119 }
121 120
122 msq->q_id = msg_buildid(id,msq->q_perm.seq); 121 msq->q_id = msg_buildid(id, msq->q_perm.seq);
123 msq->q_stime = msq->q_rtime = 0; 122 msq->q_stime = msq->q_rtime = 0;
124 msq->q_ctime = get_seconds(); 123 msq->q_ctime = get_seconds();
125 msq->q_cbytes = msq->q_qnum = 0; 124 msq->q_cbytes = msq->q_qnum = 0;
@@ -133,44 +132,44 @@ static int newque (key_t key, int msgflg)
133 return msq->q_id; 132 return msq->q_id;
134} 133}
135 134
136static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) 135static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
137{ 136{
138 mss->tsk=current; 137 mss->tsk = current;
139 current->state=TASK_INTERRUPTIBLE; 138 current->state = TASK_INTERRUPTIBLE;
140 list_add_tail(&mss->list,&msq->q_senders); 139 list_add_tail(&mss->list, &msq->q_senders);
141} 140}
142 141
143static inline void ss_del(struct msg_sender* mss) 142static inline void ss_del(struct msg_sender *mss)
144{ 143{
145 if(mss->list.next != NULL) 144 if (mss->list.next != NULL)
146 list_del(&mss->list); 145 list_del(&mss->list);
147} 146}
148 147
149static void ss_wakeup(struct list_head* h, int kill) 148static void ss_wakeup(struct list_head *h, int kill)
150{ 149{
151 struct list_head *tmp; 150 struct list_head *tmp;
152 151
153 tmp = h->next; 152 tmp = h->next;
154 while (tmp != h) { 153 while (tmp != h) {
155 struct msg_sender* mss; 154 struct msg_sender *mss;
156 155
157 mss = list_entry(tmp,struct msg_sender,list); 156 mss = list_entry(tmp, struct msg_sender, list);
158 tmp = tmp->next; 157 tmp = tmp->next;
159 if(kill) 158 if (kill)
160 mss->list.next=NULL; 159 mss->list.next = NULL;
161 wake_up_process(mss->tsk); 160 wake_up_process(mss->tsk);
162 } 161 }
163} 162}
164 163
165static void expunge_all(struct msg_queue* msq, int res) 164static void expunge_all(struct msg_queue *msq, int res)
166{ 165{
167 struct list_head *tmp; 166 struct list_head *tmp;
168 167
169 tmp = msq->q_receivers.next; 168 tmp = msq->q_receivers.next;
170 while (tmp != &msq->q_receivers) { 169 while (tmp != &msq->q_receivers) {
171 struct msg_receiver* msr; 170 struct msg_receiver *msr;
172 171
173 msr = list_entry(tmp,struct msg_receiver,r_list); 172 msr = list_entry(tmp, struct msg_receiver, r_list);
174 tmp = tmp->next; 173 tmp = tmp->next;
175 msr->r_msg = NULL; 174 msr->r_msg = NULL;
176 wake_up_process(msr->r_tsk); 175 wake_up_process(msr->r_tsk);
@@ -178,26 +177,28 @@ static void expunge_all(struct msg_queue* msq, int res)
178 msr->r_msg = ERR_PTR(res); 177 msr->r_msg = ERR_PTR(res);
179 } 178 }
180} 179}
181/* 180
182 * freeque() wakes up waiters on the sender and receiver waiting queue, 181/*
183 * removes the message queue from message queue ID 182 * freeque() wakes up waiters on the sender and receiver waiting queue,
183 * removes the message queue from message queue ID
184 * array, and cleans up all the messages associated with this queue. 184 * array, and cleans up all the messages associated with this queue.
185 * 185 *
186 * msg_ids.mutex and the spinlock for this message queue is hold 186 * msg_ids.mutex and the spinlock for this message queue is hold
187 * before freeque() is called. msg_ids.mutex remains locked on exit. 187 * before freeque() is called. msg_ids.mutex remains locked on exit.
188 */ 188 */
189static void freeque (struct msg_queue *msq, int id) 189static void freeque(struct msg_queue *msq, int id)
190{ 190{
191 struct list_head *tmp; 191 struct list_head *tmp;
192 192
193 expunge_all(msq,-EIDRM); 193 expunge_all(msq, -EIDRM);
194 ss_wakeup(&msq->q_senders,1); 194 ss_wakeup(&msq->q_senders, 1);
195 msq = msg_rmid(id); 195 msq = msg_rmid(id);
196 msg_unlock(msq); 196 msg_unlock(msq);
197 197
198 tmp = msq->q_messages.next; 198 tmp = msq->q_messages.next;
199 while(tmp != &msq->q_messages) { 199 while (tmp != &msq->q_messages) {
200 struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list); 200 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
201
201 tmp = tmp->next; 202 tmp = tmp->next;
202 atomic_dec(&msg_hdrs); 203 atomic_dec(&msg_hdrs);
203 free_msg(msg); 204 free_msg(msg);
@@ -207,10 +208,10 @@ static void freeque (struct msg_queue *msq, int id)
207 ipc_rcu_putref(msq); 208 ipc_rcu_putref(msq);
208} 209}
209 210
210asmlinkage long sys_msgget (key_t key, int msgflg) 211asmlinkage long sys_msgget(key_t key, int msgflg)
211{ 212{
212 int id, ret = -EPERM;
213 struct msg_queue *msq; 213 struct msg_queue *msq;
214 int id, ret = -EPERM;
214 215
215 mutex_lock(&msg_ids.mutex); 216 mutex_lock(&msg_ids.mutex);
216 if (key == IPC_PRIVATE) 217 if (key == IPC_PRIVATE)
@@ -224,31 +225,34 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
224 ret = -EEXIST; 225 ret = -EEXIST;
225 } else { 226 } else {
226 msq = msg_lock(id); 227 msq = msg_lock(id);
227 BUG_ON(msq==NULL); 228 BUG_ON(msq == NULL);
228 if (ipcperms(&msq->q_perm, msgflg)) 229 if (ipcperms(&msq->q_perm, msgflg))
229 ret = -EACCES; 230 ret = -EACCES;
230 else { 231 else {
231 int qid = msg_buildid(id, msq->q_perm.seq); 232 int qid = msg_buildid(id, msq->q_perm.seq);
232 ret = security_msg_queue_associate(msq, msgflg); 233
234 ret = security_msg_queue_associate(msq, msgflg);
233 if (!ret) 235 if (!ret)
234 ret = qid; 236 ret = qid;
235 } 237 }
236 msg_unlock(msq); 238 msg_unlock(msq);
237 } 239 }
238 mutex_unlock(&msg_ids.mutex); 240 mutex_unlock(&msg_ids.mutex);
241
239 return ret; 242 return ret;
240} 243}
241 244
242static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) 245static inline unsigned long
246copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
243{ 247{
244 switch(version) { 248 switch(version) {
245 case IPC_64: 249 case IPC_64:
246 return copy_to_user (buf, in, sizeof(*in)); 250 return copy_to_user(buf, in, sizeof(*in));
247 case IPC_OLD: 251 case IPC_OLD:
248 { 252 {
249 struct msqid_ds out; 253 struct msqid_ds out;
250 254
251 memset(&out,0,sizeof(out)); 255 memset(&out, 0, sizeof(out));
252 256
253 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); 257 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
254 258
@@ -256,18 +260,18 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_
256 out.msg_rtime = in->msg_rtime; 260 out.msg_rtime = in->msg_rtime;
257 out.msg_ctime = in->msg_ctime; 261 out.msg_ctime = in->msg_ctime;
258 262
259 if(in->msg_cbytes > USHRT_MAX) 263 if (in->msg_cbytes > USHRT_MAX)
260 out.msg_cbytes = USHRT_MAX; 264 out.msg_cbytes = USHRT_MAX;
261 else 265 else
262 out.msg_cbytes = in->msg_cbytes; 266 out.msg_cbytes = in->msg_cbytes;
263 out.msg_lcbytes = in->msg_cbytes; 267 out.msg_lcbytes = in->msg_cbytes;
264 268
265 if(in->msg_qnum > USHRT_MAX) 269 if (in->msg_qnum > USHRT_MAX)
266 out.msg_qnum = USHRT_MAX; 270 out.msg_qnum = USHRT_MAX;
267 else 271 else
268 out.msg_qnum = in->msg_qnum; 272 out.msg_qnum = in->msg_qnum;
269 273
270 if(in->msg_qbytes > USHRT_MAX) 274 if (in->msg_qbytes > USHRT_MAX)
271 out.msg_qbytes = USHRT_MAX; 275 out.msg_qbytes = USHRT_MAX;
272 else 276 else
273 out.msg_qbytes = in->msg_qbytes; 277 out.msg_qbytes = in->msg_qbytes;
@@ -276,8 +280,8 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_
276 out.msg_lspid = in->msg_lspid; 280 out.msg_lspid = in->msg_lspid;
277 out.msg_lrpid = in->msg_lrpid; 281 out.msg_lrpid = in->msg_lrpid;
278 282
279 return copy_to_user (buf, &out, sizeof(out)); 283 return copy_to_user(buf, &out, sizeof(out));
280 } 284 }
281 default: 285 default:
282 return -EINVAL; 286 return -EINVAL;
283 } 287 }
@@ -290,14 +294,15 @@ struct msq_setbuf {
290 mode_t mode; 294 mode_t mode;
291}; 295};
292 296
293static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) 297static inline unsigned long
298copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
294{ 299{
295 switch(version) { 300 switch(version) {
296 case IPC_64: 301 case IPC_64:
297 { 302 {
298 struct msqid64_ds tbuf; 303 struct msqid64_ds tbuf;
299 304
300 if (copy_from_user (&tbuf, buf, sizeof (tbuf))) 305 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
301 return -EFAULT; 306 return -EFAULT;
302 307
303 out->qbytes = tbuf.msg_qbytes; 308 out->qbytes = tbuf.msg_qbytes;
@@ -306,60 +311,61 @@ static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __
306 out->mode = tbuf.msg_perm.mode; 311 out->mode = tbuf.msg_perm.mode;
307 312
308 return 0; 313 return 0;
309 } 314 }
310 case IPC_OLD: 315 case IPC_OLD:
311 { 316 {
312 struct msqid_ds tbuf_old; 317 struct msqid_ds tbuf_old;
313 318
314 if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old))) 319 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
315 return -EFAULT; 320 return -EFAULT;
316 321
317 out->uid = tbuf_old.msg_perm.uid; 322 out->uid = tbuf_old.msg_perm.uid;
318 out->gid = tbuf_old.msg_perm.gid; 323 out->gid = tbuf_old.msg_perm.gid;
319 out->mode = tbuf_old.msg_perm.mode; 324 out->mode = tbuf_old.msg_perm.mode;
320 325
321 if(tbuf_old.msg_qbytes == 0) 326 if (tbuf_old.msg_qbytes == 0)
322 out->qbytes = tbuf_old.msg_lqbytes; 327 out->qbytes = tbuf_old.msg_lqbytes;
323 else 328 else
324 out->qbytes = tbuf_old.msg_qbytes; 329 out->qbytes = tbuf_old.msg_qbytes;
325 330
326 return 0; 331 return 0;
327 } 332 }
328 default: 333 default:
329 return -EINVAL; 334 return -EINVAL;
330 } 335 }
331} 336}
332 337
333asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) 338asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
334{ 339{
335 int err, version;
336 struct msg_queue *msq;
337 struct msq_setbuf setbuf;
338 struct kern_ipc_perm *ipcp; 340 struct kern_ipc_perm *ipcp;
339 341 struct msq_setbuf setbuf;
342 struct msg_queue *msq;
343 int err, version;
344
340 if (msqid < 0 || cmd < 0) 345 if (msqid < 0 || cmd < 0)
341 return -EINVAL; 346 return -EINVAL;
342 347
343 version = ipc_parse_version(&cmd); 348 version = ipc_parse_version(&cmd);
344 349
345 switch (cmd) { 350 switch (cmd) {
346 case IPC_INFO: 351 case IPC_INFO:
347 case MSG_INFO: 352 case MSG_INFO:
348 { 353 {
349 struct msginfo msginfo; 354 struct msginfo msginfo;
350 int max_id; 355 int max_id;
356
351 if (!buf) 357 if (!buf)
352 return -EFAULT; 358 return -EFAULT;
353 /* We must not return kernel stack data. 359 /*
360 * We must not return kernel stack data.
354 * due to padding, it's not enough 361 * due to padding, it's not enough
355 * to set all member fields. 362 * to set all member fields.
356 */ 363 */
357
358 err = security_msg_queue_msgctl(NULL, cmd); 364 err = security_msg_queue_msgctl(NULL, cmd);
359 if (err) 365 if (err)
360 return err; 366 return err;
361 367
362 memset(&msginfo,0,sizeof(msginfo)); 368 memset(&msginfo, 0, sizeof(msginfo));
363 msginfo.msgmni = msg_ctlmni; 369 msginfo.msgmni = msg_ctlmni;
364 msginfo.msgmax = msg_ctlmax; 370 msginfo.msgmax = msg_ctlmax;
365 msginfo.msgmnb = msg_ctlmnb; 371 msginfo.msgmnb = msg_ctlmnb;
@@ -377,36 +383,37 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
377 } 383 }
378 max_id = msg_ids.max_id; 384 max_id = msg_ids.max_id;
379 mutex_unlock(&msg_ids.mutex); 385 mutex_unlock(&msg_ids.mutex);
380 if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) 386 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
381 return -EFAULT; 387 return -EFAULT;
382 return (max_id < 0) ? 0: max_id; 388 return (max_id < 0) ? 0 : max_id;
383 } 389 }
384 case MSG_STAT: 390 case MSG_STAT:
385 case IPC_STAT: 391 case IPC_STAT:
386 { 392 {
387 struct msqid64_ds tbuf; 393 struct msqid64_ds tbuf;
388 int success_return; 394 int success_return;
395
389 if (!buf) 396 if (!buf)
390 return -EFAULT; 397 return -EFAULT;
391 if(cmd == MSG_STAT && msqid >= msg_ids.entries->size) 398 if (cmd == MSG_STAT && msqid >= msg_ids.entries->size)
392 return -EINVAL; 399 return -EINVAL;
393 400
394 memset(&tbuf,0,sizeof(tbuf)); 401 memset(&tbuf, 0, sizeof(tbuf));
395 402
396 msq = msg_lock(msqid); 403 msq = msg_lock(msqid);
397 if (msq == NULL) 404 if (msq == NULL)
398 return -EINVAL; 405 return -EINVAL;
399 406
400 if(cmd == MSG_STAT) { 407 if (cmd == MSG_STAT) {
401 success_return = msg_buildid(msqid, msq->q_perm.seq); 408 success_return = msg_buildid(msqid, msq->q_perm.seq);
402 } else { 409 } else {
403 err = -EIDRM; 410 err = -EIDRM;
404 if (msg_checkid(msq,msqid)) 411 if (msg_checkid(msq, msqid))
405 goto out_unlock; 412 goto out_unlock;
406 success_return = 0; 413 success_return = 0;
407 } 414 }
408 err = -EACCES; 415 err = -EACCES;
409 if (ipcperms (&msq->q_perm, S_IRUGO)) 416 if (ipcperms(&msq->q_perm, S_IRUGO))
410 goto out_unlock; 417 goto out_unlock;
411 418
412 err = security_msg_queue_msgctl(msq, cmd); 419 err = security_msg_queue_msgctl(msq, cmd);
@@ -430,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
430 case IPC_SET: 437 case IPC_SET:
431 if (!buf) 438 if (!buf)
432 return -EFAULT; 439 return -EFAULT;
433 if (copy_msqid_from_user (&setbuf, buf, version)) 440 if (copy_msqid_from_user(&setbuf, buf, version))
434 return -EFAULT; 441 return -EFAULT;
435 break; 442 break;
436 case IPC_RMID: 443 case IPC_RMID:
@@ -441,12 +448,12 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
441 448
442 mutex_lock(&msg_ids.mutex); 449 mutex_lock(&msg_ids.mutex);
443 msq = msg_lock(msqid); 450 msq = msg_lock(msqid);
444 err=-EINVAL; 451 err = -EINVAL;
445 if (msq == NULL) 452 if (msq == NULL)
446 goto out_up; 453 goto out_up;
447 454
448 err = -EIDRM; 455 err = -EIDRM;
449 if (msg_checkid(msq,msqid)) 456 if (msg_checkid(msq, msqid))
450 goto out_unlock_up; 457 goto out_unlock_up;
451 ipcp = &msq->q_perm; 458 ipcp = &msq->q_perm;
452 459
@@ -454,15 +461,16 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
454 if (err) 461 if (err)
455 goto out_unlock_up; 462 goto out_unlock_up;
456 if (cmd==IPC_SET) { 463 if (cmd==IPC_SET) {
457 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode); 464 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
465 setbuf.mode);
458 if (err) 466 if (err)
459 goto out_unlock_up; 467 goto out_unlock_up;
460 } 468 }
461 469
462 err = -EPERM; 470 err = -EPERM;
463 if (current->euid != ipcp->cuid && 471 if (current->euid != ipcp->cuid &&
464 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) 472 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
465 /* We _could_ check for CAP_CHOWN above, but we don't */ 473 /* We _could_ check for CAP_CHOWN above, but we don't */
466 goto out_unlock_up; 474 goto out_unlock_up;
467 475
468 err = security_msg_queue_msgctl(msq, cmd); 476 err = security_msg_queue_msgctl(msq, cmd);
@@ -480,22 +488,22 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
480 488
481 ipcp->uid = setbuf.uid; 489 ipcp->uid = setbuf.uid;
482 ipcp->gid = setbuf.gid; 490 ipcp->gid = setbuf.gid;
483 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | 491 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
484 (S_IRWXUGO & setbuf.mode); 492 (S_IRWXUGO & setbuf.mode);
485 msq->q_ctime = get_seconds(); 493 msq->q_ctime = get_seconds();
486 /* sleeping receivers might be excluded by 494 /* sleeping receivers might be excluded by
487 * stricter permissions. 495 * stricter permissions.
488 */ 496 */
489 expunge_all(msq,-EAGAIN); 497 expunge_all(msq, -EAGAIN);
490 /* sleeping senders might be able to send 498 /* sleeping senders might be able to send
491 * due to a larger queue size. 499 * due to a larger queue size.
492 */ 500 */
493 ss_wakeup(&msq->q_senders,0); 501 ss_wakeup(&msq->q_senders, 0);
494 msg_unlock(msq); 502 msg_unlock(msq);
495 break; 503 break;
496 } 504 }
497 case IPC_RMID: 505 case IPC_RMID:
498 freeque (msq, msqid); 506 freeque(msq, msqid);
499 break; 507 break;
500 } 508 }
501 err = 0; 509 err = 0;
@@ -510,41 +518,44 @@ out_unlock:
510 return err; 518 return err;
511} 519}
512 520
513static int testmsg(struct msg_msg* msg,long type,int mode) 521static int testmsg(struct msg_msg *msg, long type, int mode)
514{ 522{
515 switch(mode) 523 switch(mode)
516 { 524 {
517 case SEARCH_ANY: 525 case SEARCH_ANY:
518 return 1; 526 return 1;
519 case SEARCH_LESSEQUAL: 527 case SEARCH_LESSEQUAL:
520 if(msg->m_type <=type) 528 if (msg->m_type <=type)
521 return 1; 529 return 1;
522 break; 530 break;
523 case SEARCH_EQUAL: 531 case SEARCH_EQUAL:
524 if(msg->m_type == type) 532 if (msg->m_type == type)
525 return 1; 533 return 1;
526 break; 534 break;
527 case SEARCH_NOTEQUAL: 535 case SEARCH_NOTEQUAL:
528 if(msg->m_type != type) 536 if (msg->m_type != type)
529 return 1; 537 return 1;
530 break; 538 break;
531 } 539 }
532 return 0; 540 return 0;
533} 541}
534 542
535static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) 543static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
536{ 544{
537 struct list_head* tmp; 545 struct list_head *tmp;
538 546
539 tmp = msq->q_receivers.next; 547 tmp = msq->q_receivers.next;
540 while (tmp != &msq->q_receivers) { 548 while (tmp != &msq->q_receivers) {
541 struct msg_receiver* msr; 549 struct msg_receiver *msr;
542 msr = list_entry(tmp,struct msg_receiver,r_list); 550
551 msr = list_entry(tmp, struct msg_receiver, r_list);
543 tmp = tmp->next; 552 tmp = tmp->next;
544 if(testmsg(msg,msr->r_msgtype,msr->r_mode) && 553 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
545 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { 554 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
555 msr->r_msgtype, msr->r_mode)) {
556
546 list_del(&msr->r_list); 557 list_del(&msr->r_list);
547 if(msr->r_maxsize < msg->m_ts) { 558 if (msr->r_maxsize < msg->m_ts) {
548 msr->r_msg = NULL; 559 msr->r_msg = NULL;
549 wake_up_process(msr->r_tsk); 560 wake_up_process(msr->r_tsk);
550 smp_mb(); 561 smp_mb();
@@ -556,6 +567,7 @@ static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg)
556 wake_up_process(msr->r_tsk); 567 wake_up_process(msr->r_tsk);
557 smp_mb(); 568 smp_mb();
558 msr->r_msg = msg; 569 msr->r_msg = msg;
570
559 return 1; 571 return 1;
560 } 572 }
561 } 573 }
@@ -563,40 +575,41 @@ static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg)
563 return 0; 575 return 0;
564} 576}
565 577
566asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) 578asmlinkage long
579sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
567{ 580{
568 struct msg_queue *msq; 581 struct msg_queue *msq;
569 struct msg_msg *msg; 582 struct msg_msg *msg;
570 long mtype; 583 long mtype;
571 int err; 584 int err;
572 585
573 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) 586 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
574 return -EINVAL; 587 return -EINVAL;
575 if (get_user(mtype, &msgp->mtype)) 588 if (get_user(mtype, &msgp->mtype))
576 return -EFAULT; 589 return -EFAULT;
577 if (mtype < 1) 590 if (mtype < 1)
578 return -EINVAL; 591 return -EINVAL;
579 592
580 msg = load_msg(msgp->mtext, msgsz); 593 msg = load_msg(msgp->mtext, msgsz);
581 if(IS_ERR(msg)) 594 if (IS_ERR(msg))
582 return PTR_ERR(msg); 595 return PTR_ERR(msg);
583 596
584 msg->m_type = mtype; 597 msg->m_type = mtype;
585 msg->m_ts = msgsz; 598 msg->m_ts = msgsz;
586 599
587 msq = msg_lock(msqid); 600 msq = msg_lock(msqid);
588 err=-EINVAL; 601 err = -EINVAL;
589 if(msq==NULL) 602 if (msq == NULL)
590 goto out_free; 603 goto out_free;
591 604
592 err= -EIDRM; 605 err= -EIDRM;
593 if (msg_checkid(msq,msqid)) 606 if (msg_checkid(msq, msqid))
594 goto out_unlock_free; 607 goto out_unlock_free;
595 608
596 for (;;) { 609 for (;;) {
597 struct msg_sender s; 610 struct msg_sender s;
598 611
599 err=-EACCES; 612 err = -EACCES;
600 if (ipcperms(&msq->q_perm, S_IWUGO)) 613 if (ipcperms(&msq->q_perm, S_IWUGO))
601 goto out_unlock_free; 614 goto out_unlock_free;
602 615
@@ -604,14 +617,14 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz,
604 if (err) 617 if (err)
605 goto out_unlock_free; 618 goto out_unlock_free;
606 619
607 if(msgsz + msq->q_cbytes <= msq->q_qbytes && 620 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
608 1 + msq->q_qnum <= msq->q_qbytes) { 621 1 + msq->q_qnum <= msq->q_qbytes) {
609 break; 622 break;
610 } 623 }
611 624
612 /* queue full, wait: */ 625 /* queue full, wait: */
613 if(msgflg&IPC_NOWAIT) { 626 if (msgflg & IPC_NOWAIT) {
614 err=-EAGAIN; 627 err = -EAGAIN;
615 goto out_unlock_free; 628 goto out_unlock_free;
616 } 629 }
617 ss_add(msq, &s); 630 ss_add(msq, &s);
@@ -626,9 +639,9 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz,
626 goto out_unlock_free; 639 goto out_unlock_free;
627 } 640 }
628 ss_del(&s); 641 ss_del(&s);
629 642
630 if (signal_pending(current)) { 643 if (signal_pending(current)) {
631 err=-ERESTARTNOHAND; 644 err = -ERESTARTNOHAND;
632 goto out_unlock_free; 645 goto out_unlock_free;
633 } 646 }
634 } 647 }
@@ -636,47 +649,47 @@ asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz,
636 msq->q_lspid = current->tgid; 649 msq->q_lspid = current->tgid;
637 msq->q_stime = get_seconds(); 650 msq->q_stime = get_seconds();
638 651
639 if(!pipelined_send(msq,msg)) { 652 if (!pipelined_send(msq, msg)) {
640 /* noone is waiting for this message, enqueue it */ 653 /* noone is waiting for this message, enqueue it */
641 list_add_tail(&msg->m_list,&msq->q_messages); 654 list_add_tail(&msg->m_list, &msq->q_messages);
642 msq->q_cbytes += msgsz; 655 msq->q_cbytes += msgsz;
643 msq->q_qnum++; 656 msq->q_qnum++;
644 atomic_add(msgsz,&msg_bytes); 657 atomic_add(msgsz, &msg_bytes);
645 atomic_inc(&msg_hdrs); 658 atomic_inc(&msg_hdrs);
646 } 659 }
647 660
648 err = 0; 661 err = 0;
649 msg = NULL; 662 msg = NULL;
650 663
651out_unlock_free: 664out_unlock_free:
652 msg_unlock(msq); 665 msg_unlock(msq);
653out_free: 666out_free:
654 if(msg!=NULL) 667 if (msg != NULL)
655 free_msg(msg); 668 free_msg(msg);
656 return err; 669 return err;
657} 670}
658 671
659static inline int convert_mode(long* msgtyp, int msgflg) 672static inline int convert_mode(long *msgtyp, int msgflg)
660{ 673{
661 /* 674 /*
662 * find message of correct type. 675 * find message of correct type.
663 * msgtyp = 0 => get first. 676 * msgtyp = 0 => get first.
664 * msgtyp > 0 => get first message of matching type. 677 * msgtyp > 0 => get first message of matching type.
665 * msgtyp < 0 => get message with least type must be < abs(msgtype). 678 * msgtyp < 0 => get message with least type must be < abs(msgtype).
666 */ 679 */
667 if(*msgtyp==0) 680 if (*msgtyp == 0)
668 return SEARCH_ANY; 681 return SEARCH_ANY;
669 if(*msgtyp<0) { 682 if (*msgtyp < 0) {
670 *msgtyp=-(*msgtyp); 683 *msgtyp = -*msgtyp;
671 return SEARCH_LESSEQUAL; 684 return SEARCH_LESSEQUAL;
672 } 685 }
673 if(msgflg & MSG_EXCEPT) 686 if (msgflg & MSG_EXCEPT)
674 return SEARCH_NOTEQUAL; 687 return SEARCH_NOTEQUAL;
675 return SEARCH_EQUAL; 688 return SEARCH_EQUAL;
676} 689}
677 690
678asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, 691asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
679 long msgtyp, int msgflg) 692 long msgtyp, int msgflg)
680{ 693{
681 struct msg_queue *msq; 694 struct msg_queue *msq;
682 struct msg_msg *msg; 695 struct msg_msg *msg;
@@ -684,44 +697,51 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
684 697
685 if (msqid < 0 || (long) msgsz < 0) 698 if (msqid < 0 || (long) msgsz < 0)
686 return -EINVAL; 699 return -EINVAL;
687 mode = convert_mode(&msgtyp,msgflg); 700 mode = convert_mode(&msgtyp, msgflg);
688 701
689 msq = msg_lock(msqid); 702 msq = msg_lock(msqid);
690 if(msq==NULL) 703 if (msq == NULL)
691 return -EINVAL; 704 return -EINVAL;
692 705
693 msg = ERR_PTR(-EIDRM); 706 msg = ERR_PTR(-EIDRM);
694 if (msg_checkid(msq,msqid)) 707 if (msg_checkid(msq, msqid))
695 goto out_unlock; 708 goto out_unlock;
696 709
697 for (;;) { 710 for (;;) {
698 struct msg_receiver msr_d; 711 struct msg_receiver msr_d;
699 struct list_head* tmp; 712 struct list_head *tmp;
700 713
701 msg = ERR_PTR(-EACCES); 714 msg = ERR_PTR(-EACCES);
702 if (ipcperms (&msq->q_perm, S_IRUGO)) 715 if (ipcperms(&msq->q_perm, S_IRUGO))
703 goto out_unlock; 716 goto out_unlock;
704 717
705 msg = ERR_PTR(-EAGAIN); 718 msg = ERR_PTR(-EAGAIN);
706 tmp = msq->q_messages.next; 719 tmp = msq->q_messages.next;
707 while (tmp != &msq->q_messages) { 720 while (tmp != &msq->q_messages) {
708 struct msg_msg *walk_msg; 721 struct msg_msg *walk_msg;
709 walk_msg = list_entry(tmp,struct msg_msg,m_list); 722
710 if(testmsg(walk_msg,msgtyp,mode) && 723 walk_msg = list_entry(tmp, struct msg_msg, m_list);
711 !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { 724 if (testmsg(walk_msg, msgtyp, mode) &&
725 !security_msg_queue_msgrcv(msq, walk_msg, current,
726 msgtyp, mode)) {
727
712 msg = walk_msg; 728 msg = walk_msg;
713 if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { 729 if (mode == SEARCH_LESSEQUAL &&
714 msg=walk_msg; 730 walk_msg->m_type != 1) {
715 msgtyp=walk_msg->m_type-1; 731 msg = walk_msg;
732 msgtyp = walk_msg->m_type - 1;
716 } else { 733 } else {
717 msg=walk_msg; 734 msg = walk_msg;
718 break; 735 break;
719 } 736 }
720 } 737 }
721 tmp = tmp->next; 738 tmp = tmp->next;
722 } 739 }
723 if(!IS_ERR(msg)) { 740 if (!IS_ERR(msg)) {
724 /* Found a suitable message. Unlink it from the queue. */ 741 /*
742 * Found a suitable message.
743 * Unlink it from the queue.
744 */
725 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { 745 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
726 msg = ERR_PTR(-E2BIG); 746 msg = ERR_PTR(-E2BIG);
727 goto out_unlock; 747 goto out_unlock;
@@ -731,9 +751,9 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
731 msq->q_rtime = get_seconds(); 751 msq->q_rtime = get_seconds();
732 msq->q_lrpid = current->tgid; 752 msq->q_lrpid = current->tgid;
733 msq->q_cbytes -= msg->m_ts; 753 msq->q_cbytes -= msg->m_ts;
734 atomic_sub(msg->m_ts,&msg_bytes); 754 atomic_sub(msg->m_ts, &msg_bytes);
735 atomic_dec(&msg_hdrs); 755 atomic_dec(&msg_hdrs);
736 ss_wakeup(&msq->q_senders,0); 756 ss_wakeup(&msq->q_senders, 0);
737 msg_unlock(msq); 757 msg_unlock(msq);
738 break; 758 break;
739 } 759 }
@@ -742,13 +762,13 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
742 msg = ERR_PTR(-ENOMSG); 762 msg = ERR_PTR(-ENOMSG);
743 goto out_unlock; 763 goto out_unlock;
744 } 764 }
745 list_add_tail(&msr_d.r_list,&msq->q_receivers); 765 list_add_tail(&msr_d.r_list, &msq->q_receivers);
746 msr_d.r_tsk = current; 766 msr_d.r_tsk = current;
747 msr_d.r_msgtype = msgtyp; 767 msr_d.r_msgtype = msgtyp;
748 msr_d.r_mode = mode; 768 msr_d.r_mode = mode;
749 if(msgflg & MSG_NOERROR) 769 if (msgflg & MSG_NOERROR)
750 msr_d.r_maxsize = INT_MAX; 770 msr_d.r_maxsize = INT_MAX;
751 else 771 else
752 msr_d.r_maxsize = msgsz; 772 msr_d.r_maxsize = msgsz;
753 msr_d.r_msg = ERR_PTR(-EAGAIN); 773 msr_d.r_msg = ERR_PTR(-EAGAIN);
754 current->state = TASK_INTERRUPTIBLE; 774 current->state = TASK_INTERRUPTIBLE;
@@ -773,17 +793,17 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
773 * wake_up_process(). There is a race with exit(), see 793 * wake_up_process(). There is a race with exit(), see
774 * ipc/mqueue.c for the details. 794 * ipc/mqueue.c for the details.
775 */ 795 */
776 msg = (struct msg_msg*) msr_d.r_msg; 796 msg = (struct msg_msg*)msr_d.r_msg;
777 while (msg == NULL) { 797 while (msg == NULL) {
778 cpu_relax(); 798 cpu_relax();
779 msg = (struct msg_msg*) msr_d.r_msg; 799 msg = (struct msg_msg *)msr_d.r_msg;
780 } 800 }
781 801
782 /* Lockless receive, part 3: 802 /* Lockless receive, part 3:
783 * If there is a message or an error then accept it without 803 * If there is a message or an error then accept it without
784 * locking. 804 * locking.
785 */ 805 */
786 if(msg != ERR_PTR(-EAGAIN)) { 806 if (msg != ERR_PTR(-EAGAIN)) {
787 rcu_read_unlock(); 807 rcu_read_unlock();
788 break; 808 break;
789 } 809 }
@@ -798,7 +818,7 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
798 * Repeat test after acquiring the spinlock. 818 * Repeat test after acquiring the spinlock.
799 */ 819 */
800 msg = (struct msg_msg*)msr_d.r_msg; 820 msg = (struct msg_msg*)msr_d.r_msg;
801 if(msg != ERR_PTR(-EAGAIN)) 821 if (msg != ERR_PTR(-EAGAIN))
802 goto out_unlock; 822 goto out_unlock;
803 823
804 list_del(&msr_d.r_list); 824 list_del(&msr_d.r_list);
@@ -810,14 +830,15 @@ out_unlock:
810 } 830 }
811 } 831 }
812 if (IS_ERR(msg)) 832 if (IS_ERR(msg))
813 return PTR_ERR(msg); 833 return PTR_ERR(msg);
814 834
815 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; 835 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
816 if (put_user (msg->m_type, &msgp->mtype) || 836 if (put_user (msg->m_type, &msgp->mtype) ||
817 store_msg(msgp->mtext, msg, msgsz)) { 837 store_msg(msgp->mtext, msg, msgsz)) {
818 msgsz = -EFAULT; 838 msgsz = -EFAULT;
819 } 839 }
820 free_msg(msg); 840 free_msg(msg);
841
821 return msgsz; 842 return msgsz;
822} 843}
823 844
@@ -827,20 +848,20 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
827 struct msg_queue *msq = it; 848 struct msg_queue *msq = it;
828 849
829 return seq_printf(s, 850 return seq_printf(s,
830 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", 851 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
831 msq->q_perm.key, 852 msq->q_perm.key,
832 msq->q_id, 853 msq->q_id,
833 msq->q_perm.mode, 854 msq->q_perm.mode,
834 msq->q_cbytes, 855 msq->q_cbytes,
835 msq->q_qnum, 856 msq->q_qnum,
836 msq->q_lspid, 857 msq->q_lspid,
837 msq->q_lrpid, 858 msq->q_lrpid,
838 msq->q_perm.uid, 859 msq->q_perm.uid,
839 msq->q_perm.gid, 860 msq->q_perm.gid,
840 msq->q_perm.cuid, 861 msq->q_perm.cuid,
841 msq->q_perm.cgid, 862 msq->q_perm.cgid,
842 msq->q_stime, 863 msq->q_stime,
843 msq->q_rtime, 864 msq->q_rtime,
844 msq->q_ctime); 865 msq->q_ctime);
845} 866}
846#endif 867#endif
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index f05392d64267..57ca3730205d 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -19,15 +19,15 @@
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/delayacct.h> 20#include <linux/delayacct.h>
21 21
22int delayacct_on __read_mostly; /* Delay accounting turned on/off */ 22int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */
23kmem_cache_t *delayacct_cache; 23kmem_cache_t *delayacct_cache;
24 24
25static int __init delayacct_setup_enable(char *str) 25static int __init delayacct_setup_disable(char *str)
26{ 26{
27 delayacct_on = 1; 27 delayacct_on = 0;
28 return 1; 28 return 1;
29} 29}
30__setup("delayacct", delayacct_setup_enable); 30__setup("nodelayacct", delayacct_setup_disable);
31 31
32void delayacct_init(void) 32void delayacct_init(void)
33{ 33{
diff --git a/kernel/futex.c b/kernel/futex.c
index cf0c8e21d1ab..dda2049692a2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -415,15 +415,15 @@ out_unlock:
415 */ 415 */
416void exit_pi_state_list(struct task_struct *curr) 416void exit_pi_state_list(struct task_struct *curr)
417{ 417{
418 struct futex_hash_bucket *hb;
419 struct list_head *next, *head = &curr->pi_state_list; 418 struct list_head *next, *head = &curr->pi_state_list;
420 struct futex_pi_state *pi_state; 419 struct futex_pi_state *pi_state;
420 struct futex_hash_bucket *hb;
421 union futex_key key; 421 union futex_key key;
422 422
423 /* 423 /*
424 * We are a ZOMBIE and nobody can enqueue itself on 424 * We are a ZOMBIE and nobody can enqueue itself on
425 * pi_state_list anymore, but we have to be careful 425 * pi_state_list anymore, but we have to be careful
426 * versus waiters unqueueing themselfs 426 * versus waiters unqueueing themselves:
427 */ 427 */
428 spin_lock_irq(&curr->pi_lock); 428 spin_lock_irq(&curr->pi_lock);
429 while (!list_empty(head)) { 429 while (!list_empty(head)) {
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr)
431 next = head->next; 431 next = head->next;
432 pi_state = list_entry(next, struct futex_pi_state, list); 432 pi_state = list_entry(next, struct futex_pi_state, list);
433 key = pi_state->key; 433 key = pi_state->key;
434 hb = hash_futex(&key);
434 spin_unlock_irq(&curr->pi_lock); 435 spin_unlock_irq(&curr->pi_lock);
435 436
436 hb = hash_futex(&key);
437 spin_lock(&hb->lock); 437 spin_lock(&hb->lock);
438 438
439 spin_lock_irq(&curr->pi_lock); 439 spin_lock_irq(&curr->pi_lock);
440 /*
441 * We dropped the pi-lock, so re-check whether this
442 * task still owns the PI-state:
443 */
440 if (head->next != next) { 444 if (head->next != next) {
441 spin_unlock(&hb->lock); 445 spin_unlock(&hb->lock);
442 continue; 446 continue;
443 } 447 }
444 448
445 list_del_init(&pi_state->list);
446
447 WARN_ON(pi_state->owner != curr); 449 WARN_ON(pi_state->owner != curr);
448 450 WARN_ON(list_empty(&pi_state->list));
451 list_del_init(&pi_state->list);
449 pi_state->owner = NULL; 452 pi_state->owner = NULL;
450 spin_unlock_irq(&curr->pi_lock); 453 spin_unlock_irq(&curr->pi_lock);
451 454
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
470 head = &hb->chain; 473 head = &hb->chain;
471 474
472 list_for_each_entry_safe(this, next, head, list) { 475 list_for_each_entry_safe(this, next, head, list) {
473 if (match_futex (&this->key, &me->key)) { 476 if (match_futex(&this->key, &me->key)) {
474 /* 477 /*
475 * Another waiter already exists - bump up 478 * Another waiter already exists - bump up
476 * the refcount and return its pi_state: 479 * the refcount and return its pi_state:
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
482 if (unlikely(!pi_state)) 485 if (unlikely(!pi_state))
483 return -EINVAL; 486 return -EINVAL;
484 487
488 WARN_ON(!atomic_read(&pi_state->refcount));
489
485 atomic_inc(&pi_state->refcount); 490 atomic_inc(&pi_state->refcount);
486 me->pi_state = pi_state; 491 me->pi_state = pi_state;
487 492
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
490 } 495 }
491 496
492 /* 497 /*
493 * We are the first waiter - try to look up the real owner and 498 * We are the first waiter - try to look up the real owner and attach
494 * attach the new pi_state to it: 499 * the new pi_state to it, but bail out when the owner died bit is set
500 * and TID = 0:
495 */ 501 */
496 pid = uval & FUTEX_TID_MASK; 502 pid = uval & FUTEX_TID_MASK;
503 if (!pid && (uval & FUTEX_OWNER_DIED))
504 return -ESRCH;
497 p = futex_find_get_task(pid); 505 p = futex_find_get_task(pid);
498 if (!p) 506 if (!p)
499 return -ESRCH; 507 return -ESRCH;
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
510 pi_state->key = me->key; 518 pi_state->key = me->key;
511 519
512 spin_lock_irq(&p->pi_lock); 520 spin_lock_irq(&p->pi_lock);
521 WARN_ON(!list_empty(&pi_state->list));
513 list_add(&pi_state->list, &p->pi_state_list); 522 list_add(&pi_state->list, &p->pi_state_list);
514 pi_state->owner = p; 523 pi_state->owner = p;
515 spin_unlock_irq(&p->pi_lock); 524 spin_unlock_irq(&p->pi_lock);
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
573 * kept enabled while there is PI state around. We must also 582 * kept enabled while there is PI state around. We must also
574 * preserve the owner died bit.) 583 * preserve the owner died bit.)
575 */ 584 */
576 newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid;
577 587
578 inc_preempt_count(); 588 inc_preempt_count();
579 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
580 dec_preempt_count(); 590 dec_preempt_count();
591 if (curval == -EFAULT)
592 return -EFAULT;
593 if (curval != uval)
594 return -EINVAL;
595 }
581 596
582 if (curval == -EFAULT) 597 spin_lock_irq(&pi_state->owner->pi_lock);
583 return -EFAULT; 598 WARN_ON(list_empty(&pi_state->list));
584 if (curval != uval) 599 list_del_init(&pi_state->list);
585 return -EINVAL; 600 spin_unlock_irq(&pi_state->owner->pi_lock);
586 601
587 list_del_init(&pi_state->owner->pi_state_list); 602 spin_lock_irq(&new_owner->pi_lock);
603 WARN_ON(!list_empty(&pi_state->list));
588 list_add(&pi_state->list, &new_owner->pi_state_list); 604 list_add(&pi_state->list, &new_owner->pi_state_list);
589 pi_state->owner = new_owner; 605 pi_state->owner = new_owner;
606 spin_unlock_irq(&new_owner->pi_lock);
607
590 rt_mutex_unlock(&pi_state->pi_mutex); 608 rt_mutex_unlock(&pi_state->pi_mutex);
591 609
592 return 0; 610 return 0;
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1236 /* Owner died? */ 1254 /* Owner died? */
1237 if (q.pi_state->owner != NULL) { 1255 if (q.pi_state->owner != NULL) {
1238 spin_lock_irq(&q.pi_state->owner->pi_lock); 1256 spin_lock_irq(&q.pi_state->owner->pi_lock);
1257 WARN_ON(list_empty(&q.pi_state->list));
1239 list_del_init(&q.pi_state->list); 1258 list_del_init(&q.pi_state->list);
1240 spin_unlock_irq(&q.pi_state->owner->pi_lock); 1259 spin_unlock_irq(&q.pi_state->owner->pi_lock);
1241 } else 1260 } else
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1244 q.pi_state->owner = current; 1263 q.pi_state->owner = current;
1245 1264
1246 spin_lock_irq(&current->pi_lock); 1265 spin_lock_irq(&current->pi_lock);
1266 WARN_ON(!list_empty(&q.pi_state->list));
1247 list_add(&q.pi_state->list, &current->pi_state_list); 1267 list_add(&q.pi_state->list, &current->pi_state_list);
1248 spin_unlock_irq(&current->pi_lock); 1268 spin_unlock_irq(&current->pi_lock);
1249 1269
@@ -1427,9 +1447,11 @@ retry_locked:
1427 * again. If it succeeds then we can return without waking 1447 * again. If it succeeds then we can return without waking
1428 * anyone else up: 1448 * anyone else up:
1429 */ 1449 */
1430 inc_preempt_count(); 1450 if (!(uval & FUTEX_OWNER_DIED)) {
1431 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1451 inc_preempt_count();
1432 dec_preempt_count(); 1452 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1453 dec_preempt_count();
1454 }
1433 1455
1434 if (unlikely(uval == -EFAULT)) 1456 if (unlikely(uval == -EFAULT))
1435 goto pi_faulted; 1457 goto pi_faulted;
@@ -1462,9 +1484,11 @@ retry_locked:
1462 /* 1484 /*
1463 * No waiters - kernel unlocks the futex: 1485 * No waiters - kernel unlocks the futex:
1464 */ 1486 */
1465 ret = unlock_futex_pi(uaddr, uval); 1487 if (!(uval & FUTEX_OWNER_DIED)) {
1466 if (ret == -EFAULT) 1488 ret = unlock_futex_pi(uaddr, uval);
1467 goto pi_faulted; 1489 if (ret == -EFAULT)
1490 goto pi_faulted;
1491 }
1468 1492
1469out_unlock: 1493out_unlock:
1470 spin_unlock(&hb->lock); 1494 spin_unlock(&hb->lock);
@@ -1683,9 +1707,9 @@ err_unlock:
1683 * Process a futex-list entry, check whether it's owned by the 1707 * Process a futex-list entry, check whether it's owned by the
1684 * dying task, and do notification if so: 1708 * dying task, and do notification if so:
1685 */ 1709 */
1686int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) 1710int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1687{ 1711{
1688 u32 uval, nval; 1712 u32 uval, nval, mval;
1689 1713
1690retry: 1714retry:
1691 if (get_user(uval, uaddr)) 1715 if (get_user(uval, uaddr))
@@ -1702,21 +1726,45 @@ retry:
1702 * thread-death.) The rest of the cleanup is done in 1726 * thread-death.) The rest of the cleanup is done in
1703 * userspace. 1727 * userspace.
1704 */ 1728 */
1705 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 1729 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1706 uval | FUTEX_OWNER_DIED); 1730 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1731
1707 if (nval == -EFAULT) 1732 if (nval == -EFAULT)
1708 return -1; 1733 return -1;
1709 1734
1710 if (nval != uval) 1735 if (nval != uval)
1711 goto retry; 1736 goto retry;
1712 1737
1713 if (uval & FUTEX_WAITERS) 1738 /*
1714 futex_wake(uaddr, 1); 1739 * Wake robust non-PI futexes here. The wakeup of
1740 * PI futexes happens in exit_pi_state():
1741 */
1742 if (!pi) {
1743 if (uval & FUTEX_WAITERS)
1744 futex_wake(uaddr, 1);
1745 }
1715 } 1746 }
1716 return 0; 1747 return 0;
1717} 1748}
1718 1749
1719/* 1750/*
1751 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1752 */
1753static inline int fetch_robust_entry(struct robust_list __user **entry,
1754 struct robust_list __user **head, int *pi)
1755{
1756 unsigned long uentry;
1757
1758 if (get_user(uentry, (unsigned long *)head))
1759 return -EFAULT;
1760
1761 *entry = (void *)(uentry & ~1UL);
1762 *pi = uentry & 1;
1763
1764 return 0;
1765}
1766
1767/*
1720 * Walk curr->robust_list (very carefully, it's a userspace list!) 1768 * Walk curr->robust_list (very carefully, it's a userspace list!)
1721 * and mark any locks found there dead, and notify any waiters. 1769 * and mark any locks found there dead, and notify any waiters.
1722 * 1770 *
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr)
1726{ 1774{
1727 struct robust_list_head __user *head = curr->robust_list; 1775 struct robust_list_head __user *head = curr->robust_list;
1728 struct robust_list __user *entry, *pending; 1776 struct robust_list __user *entry, *pending;
1729 unsigned int limit = ROBUST_LIST_LIMIT; 1777 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
1730 unsigned long futex_offset; 1778 unsigned long futex_offset;
1731 1779
1732 /* 1780 /*
1733 * Fetch the list head (which was registered earlier, via 1781 * Fetch the list head (which was registered earlier, via
1734 * sys_set_robust_list()): 1782 * sys_set_robust_list()):
1735 */ 1783 */
1736 if (get_user(entry, &head->list.next)) 1784 if (fetch_robust_entry(&entry, &head->list.next, &pi))
1737 return; 1785 return;
1738 /* 1786 /*
1739 * Fetch the relative futex offset: 1787 * Fetch the relative futex offset:
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr)
1744 * Fetch any possibly pending lock-add first, and handle it 1792 * Fetch any possibly pending lock-add first, and handle it
1745 * if it exists: 1793 * if it exists:
1746 */ 1794 */
1747 if (get_user(pending, &head->list_op_pending)) 1795 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1748 return; 1796 return;
1797
1749 if (pending) 1798 if (pending)
1750 handle_futex_death((void *)pending + futex_offset, curr); 1799 handle_futex_death((void *)pending + futex_offset, curr, pip);
1751 1800
1752 while (entry != &head->list) { 1801 while (entry != &head->list) {
1753 /* 1802 /*
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr)
1756 */ 1805 */
1757 if (entry != pending) 1806 if (entry != pending)
1758 if (handle_futex_death((void *)entry + futex_offset, 1807 if (handle_futex_death((void *)entry + futex_offset,
1759 curr)) 1808 curr, pi))
1760 return; 1809 return;
1761 /* 1810 /*
1762 * Fetch the next entry in the list: 1811 * Fetch the next entry in the list:
1763 */ 1812 */
1764 if (get_user(entry, &entry->next)) 1813 if (fetch_robust_entry(&entry, &entry->next, &pi))
1765 return; 1814 return;
1766 /* 1815 /*
1767 * Avoid excessively long or circular lists: 1816 * Avoid excessively long or circular lists:
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1d92b441fb7..d1aab1a452cc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -12,6 +12,23 @@
12 12
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15
16/*
17 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
18 */
19static inline int
20fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
21 compat_uptr_t *head, int *pi)
22{
23 if (get_user(*uentry, head))
24 return -EFAULT;
25
26 *entry = compat_ptr((*uentry) & ~1);
27 *pi = (unsigned int)(*uentry) & 1;
28
29 return 0;
30}
31
15/* 32/*
16 * Walk curr->robust_list (very carefully, it's a userspace list!) 33 * Walk curr->robust_list (very carefully, it's a userspace list!)
17 * and mark any locks found there dead, and notify any waiters. 34 * and mark any locks found there dead, and notify any waiters.
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr)
22{ 39{
23 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
24 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi;
25 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, upending;
26 unsigned int limit = ROBUST_LIST_LIMIT;
27 compat_long_t futex_offset; 44 compat_long_t futex_offset;
28 45
29 /* 46 /*
30 * Fetch the list head (which was registered earlier, via 47 * Fetch the list head (which was registered earlier, via
31 * sys_set_robust_list()): 48 * sys_set_robust_list()):
32 */ 49 */
33 if (get_user(uentry, &head->list.next)) 50 if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
34 return; 51 return;
35 entry = compat_ptr(uentry);
36 /* 52 /*
37 * Fetch the relative futex offset: 53 * Fetch the relative futex offset:
38 */ 54 */
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr)
42 * Fetch any possibly pending lock-add first, and handle it 58 * Fetch any possibly pending lock-add first, and handle it
43 * if it exists: 59 * if it exists:
44 */ 60 */
45 if (get_user(upending, &head->list_op_pending)) 61 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pi))
46 return; 63 return;
47 pending = compat_ptr(upending);
48 if (upending) 64 if (upending)
49 handle_futex_death((void *)pending + futex_offset, curr); 65 handle_futex_death((void *)pending + futex_offset, curr, pi);
50 66
51 while (compat_ptr(uentry) != &head->list) { 67 while (compat_ptr(uentry) != &head->list) {
52 /* 68 /*
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr)
55 */ 71 */
56 if (entry != pending) 72 if (entry != pending)
57 if (handle_futex_death((void *)entry + futex_offset, 73 if (handle_futex_death((void *)entry + futex_offset,
58 curr)) 74 curr, pi))
59 return; 75 return;
60 76
61 /* 77 /*
62 * Fetch the next entry in the list: 78 * Fetch the next entry in the list:
63 */ 79 */
64 if (get_user(uentry, (compat_uptr_t *)&entry->next)) 80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t *)&entry->next, &pi))
65 return; 82 return;
66 entry = compat_ptr(uentry);
67 /* 83 /*
68 * Avoid excessively long or circular lists: 84 * Avoid excessively long or circular lists:
69 */ 85 */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d17766d40dab..be989efc7856 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -835,7 +835,7 @@ static void migrate_hrtimers(int cpu)
835} 835}
836#endif /* CONFIG_HOTPLUG_CPU */ 836#endif /* CONFIG_HOTPLUG_CPU */
837 837
838static int __devinit hrtimer_cpu_notify(struct notifier_block *self, 838static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
839 unsigned long action, void *hcpu) 839 unsigned long action, void *hcpu)
840{ 840{
841 long cpu = (long)hcpu; 841 long cpu = (long)hcpu;
@@ -859,7 +859,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
859 return NOTIFY_OK; 859 return NOTIFY_OK;
860} 860}
861 861
862static struct notifier_block __devinitdata hrtimers_nb = { 862static struct notifier_block __cpuinitdata hrtimers_nb = {
863 .notifier_call = hrtimer_cpu_notify, 863 .notifier_call = hrtimer_cpu_notify,
864}; 864};
865 865
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4e461438e48b..92be519eff26 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -137,16 +137,40 @@ EXPORT_SYMBOL(enable_irq);
137 * @irq: interrupt to control 137 * @irq: interrupt to control
138 * @on: enable/disable power management wakeup 138 * @on: enable/disable power management wakeup
139 * 139 *
140 * Enable/disable power management wakeup mode 140 * Enable/disable power management wakeup mode, which is
141 * disabled by default. Enables and disables must match,
142 * just as they match for non-wakeup mode support.
143 *
144 * Wakeup mode lets this IRQ wake the system from sleep
145 * states like "suspend to RAM".
141 */ 146 */
142int set_irq_wake(unsigned int irq, unsigned int on) 147int set_irq_wake(unsigned int irq, unsigned int on)
143{ 148{
144 struct irq_desc *desc = irq_desc + irq; 149 struct irq_desc *desc = irq_desc + irq;
145 unsigned long flags; 150 unsigned long flags;
146 int ret = -ENXIO; 151 int ret = -ENXIO;
152 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
147 153
154 /* wakeup-capable irqs can be shared between drivers that
155 * don't need to have the same sleep mode behaviors.
156 */
148 spin_lock_irqsave(&desc->lock, flags); 157 spin_lock_irqsave(&desc->lock, flags);
149 if (desc->chip->set_wake) 158 if (on) {
159 if (desc->wake_depth++ == 0)
160 desc->status |= IRQ_WAKEUP;
161 else
162 set_wake = NULL;
163 } else {
164 if (desc->wake_depth == 0) {
165 printk(KERN_WARNING "Unbalanced IRQ %d "
166 "wake disable\n", irq);
167 WARN_ON(1);
168 } else if (--desc->wake_depth == 0)
169 desc->status &= ~IRQ_WAKEUP;
170 else
171 set_wake = NULL;
172 }
173 if (set_wake)
150 ret = desc->chip->set_wake(irq, on); 174 ret = desc->chip->set_wake(irq, on);
151 spin_unlock_irqrestore(&desc->lock, flags); 175 spin_unlock_irqrestore(&desc->lock, flags);
152 return ret; 176 return ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 64aab081153b..3f57dfdc8f92 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -393,6 +393,7 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
393static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 393static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
394{ 394{
395 copy_kprobe(p, ap); 395 copy_kprobe(p, ap);
396 flush_insn_slot(ap);
396 ap->addr = p->addr; 397 ap->addr = p->addr;
397 ap->pre_handler = aggr_pre_handler; 398 ap->pre_handler = aggr_pre_handler;
398 ap->fault_handler = aggr_fault_handler; 399 ap->fault_handler = aggr_fault_handler;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 759805c9859a..436ab35f6fa7 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -548,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
548 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 548 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
549} 549}
550 550
551static int __devinit rcu_cpu_notify(struct notifier_block *self, 551static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
552 unsigned long action, void *hcpu) 552 unsigned long action, void *hcpu)
553{ 553{
554 long cpu = (long)hcpu; 554 long cpu = (long)hcpu;
@@ -565,7 +565,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
565 return NOTIFY_OK; 565 return NOTIFY_OK;
566} 566}
567 567
568static struct notifier_block __devinitdata rcu_nb = { 568static struct notifier_block __cpuinitdata rcu_nb = {
569 .notifier_call = rcu_cpu_notify, 569 .notifier_call = rcu_cpu_notify,
570}; 570};
571 571
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index d2ef13b485e7..3e13a1e5856f 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -7,6 +7,8 @@
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen 9 * Copyright (C) 2006 Esben Nielsen
10 *
11 * See Documentation/rt-mutex-design.txt for details.
10 */ 12 */
11#include <linux/spinlock.h> 13#include <linux/spinlock.h>
12#include <linux/module.h> 14#include <linux/module.h>
diff --git a/kernel/sched.c b/kernel/sched.c
index b44b9a43b0fc..a2be2d055299 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4456,9 +4456,9 @@ asmlinkage long sys_sched_yield(void)
4456 return 0; 4456 return 0;
4457} 4457}
4458 4458
4459static inline int __resched_legal(void) 4459static inline int __resched_legal(int expected_preempt_count)
4460{ 4460{
4461 if (unlikely(preempt_count())) 4461 if (unlikely(preempt_count() != expected_preempt_count))
4462 return 0; 4462 return 0;
4463 if (unlikely(system_state != SYSTEM_RUNNING)) 4463 if (unlikely(system_state != SYSTEM_RUNNING))
4464 return 0; 4464 return 0;
@@ -4484,7 +4484,7 @@ static void __cond_resched(void)
4484 4484
4485int __sched cond_resched(void) 4485int __sched cond_resched(void)
4486{ 4486{
4487 if (need_resched() && __resched_legal()) { 4487 if (need_resched() && __resched_legal(0)) {
4488 __cond_resched(); 4488 __cond_resched();
4489 return 1; 4489 return 1;
4490 } 4490 }
@@ -4510,7 +4510,7 @@ int cond_resched_lock(spinlock_t *lock)
4510 ret = 1; 4510 ret = 1;
4511 spin_lock(lock); 4511 spin_lock(lock);
4512 } 4512 }
4513 if (need_resched() && __resched_legal()) { 4513 if (need_resched() && __resched_legal(1)) {
4514 spin_release(&lock->dep_map, 1, _THIS_IP_); 4514 spin_release(&lock->dep_map, 1, _THIS_IP_);
4515 _raw_spin_unlock(lock); 4515 _raw_spin_unlock(lock);
4516 preempt_enable_no_resched(); 4516 preempt_enable_no_resched();
@@ -4526,7 +4526,7 @@ int __sched cond_resched_softirq(void)
4526{ 4526{
4527 BUG_ON(!in_softirq()); 4527 BUG_ON(!in_softirq());
4528 4528
4529 if (need_resched() && __resched_legal()) { 4529 if (need_resched() && __resched_legal(0)) {
4530 raw_local_irq_disable(); 4530 raw_local_irq_disable();
4531 _local_bh_enable(); 4531 _local_bh_enable();
4532 raw_local_irq_enable(); 4532 raw_local_irq_enable();
@@ -6494,7 +6494,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6494 for (i = 0; i < MAX_NUMNODES; i++) 6494 for (i = 0; i < MAX_NUMNODES; i++)
6495 init_numa_sched_groups_power(sched_group_nodes[i]); 6495 init_numa_sched_groups_power(sched_group_nodes[i]);
6496 6496
6497 init_numa_sched_groups_power(sched_group_allnodes); 6497 if (sched_group_allnodes) {
6498 int group = cpu_to_allnodes_group(first_cpu(*cpu_map));
6499 struct sched_group *sg = &sched_group_allnodes[group];
6500
6501 init_numa_sched_groups_power(sg);
6502 }
6498#endif 6503#endif
6499 6504
6500 /* Attach the domains */ 6505 /* Attach the domains */
@@ -6761,6 +6766,11 @@ void __init sched_init(void)
6761 } 6766 }
6762 6767
6763 set_load_weight(&init_task); 6768 set_load_weight(&init_task);
6769
6770#ifdef CONFIG_RT_MUTEXES
6771 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6772#endif
6773
6764 /* 6774 /*
6765 * The boot idle thread does lazy MMU switching as well: 6775 * The boot idle thread does lazy MMU switching as well:
6766 */ 6776 */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0f08a84ae307..3789ca98197c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -65,6 +65,7 @@ static inline void wakeup_softirqd(void)
65 * This one is for softirq.c-internal use, 65 * This one is for softirq.c-internal use,
66 * where hardirqs are disabled legitimately: 66 * where hardirqs are disabled legitimately:
67 */ 67 */
68#ifdef CONFIG_TRACE_IRQFLAGS
68static void __local_bh_disable(unsigned long ip) 69static void __local_bh_disable(unsigned long ip)
69{ 70{
70 unsigned long flags; 71 unsigned long flags;
@@ -80,6 +81,13 @@ static void __local_bh_disable(unsigned long ip)
80 trace_softirqs_off(ip); 81 trace_softirqs_off(ip);
81 raw_local_irq_restore(flags); 82 raw_local_irq_restore(flags);
82} 83}
84#else /* !CONFIG_TRACE_IRQFLAGS */
85static inline void __local_bh_disable(unsigned long ip)
86{
87 add_preempt_count(SOFTIRQ_OFFSET);
88 barrier();
89}
90#endif /* CONFIG_TRACE_IRQFLAGS */
83 91
84void local_bh_disable(void) 92void local_bh_disable(void)
85{ 93{
@@ -121,12 +129,16 @@ EXPORT_SYMBOL(_local_bh_enable);
121 129
122void local_bh_enable(void) 130void local_bh_enable(void)
123{ 131{
132#ifdef CONFIG_TRACE_IRQFLAGS
124 unsigned long flags; 133 unsigned long flags;
125 134
126 WARN_ON_ONCE(in_irq()); 135 WARN_ON_ONCE(in_irq());
136#endif
127 WARN_ON_ONCE(irqs_disabled()); 137 WARN_ON_ONCE(irqs_disabled());
128 138
139#ifdef CONFIG_TRACE_IRQFLAGS
129 local_irq_save(flags); 140 local_irq_save(flags);
141#endif
130 /* 142 /*
131 * Are softirqs going to be turned on now: 143 * Are softirqs going to be turned on now:
132 */ 144 */
@@ -142,18 +154,22 @@ void local_bh_enable(void)
142 do_softirq(); 154 do_softirq();
143 155
144 dec_preempt_count(); 156 dec_preempt_count();
157#ifdef CONFIG_TRACE_IRQFLAGS
145 local_irq_restore(flags); 158 local_irq_restore(flags);
159#endif
146 preempt_check_resched(); 160 preempt_check_resched();
147} 161}
148EXPORT_SYMBOL(local_bh_enable); 162EXPORT_SYMBOL(local_bh_enable);
149 163
150void local_bh_enable_ip(unsigned long ip) 164void local_bh_enable_ip(unsigned long ip)
151{ 165{
166#ifdef CONFIG_TRACE_IRQFLAGS
152 unsigned long flags; 167 unsigned long flags;
153 168
154 WARN_ON_ONCE(in_irq()); 169 WARN_ON_ONCE(in_irq());
155 170
156 local_irq_save(flags); 171 local_irq_save(flags);
172#endif
157 /* 173 /*
158 * Are softirqs going to be turned on now: 174 * Are softirqs going to be turned on now:
159 */ 175 */
@@ -169,7 +185,9 @@ void local_bh_enable_ip(unsigned long ip)
169 do_softirq(); 185 do_softirq();
170 186
171 dec_preempt_count(); 187 dec_preempt_count();
188#ifdef CONFIG_TRACE_IRQFLAGS
172 local_irq_restore(flags); 189 local_irq_restore(flags);
190#endif
173 preempt_check_resched(); 191 preempt_check_resched();
174} 192}
175EXPORT_SYMBOL(local_bh_enable_ip); 193EXPORT_SYMBOL(local_bh_enable_ip);
@@ -547,7 +565,7 @@ static void takeover_tasklets(unsigned int cpu)
547} 565}
548#endif /* CONFIG_HOTPLUG_CPU */ 566#endif /* CONFIG_HOTPLUG_CPU */
549 567
550static int __devinit cpu_callback(struct notifier_block *nfb, 568static int __cpuinit cpu_callback(struct notifier_block *nfb,
551 unsigned long action, 569 unsigned long action,
552 void *hcpu) 570 void *hcpu)
553{ 571{
@@ -587,7 +605,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
587 return NOTIFY_OK; 605 return NOTIFY_OK;
588} 606}
589 607
590static struct notifier_block __devinitdata cpu_nfb = { 608static struct notifier_block __cpuinitdata cpu_nfb = {
591 .notifier_call = cpu_callback 609 .notifier_call = cpu_callback
592}; 610};
593 611
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 6b76caa22981..03e6a2b0b787 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
104/* 104/*
105 * Create/destroy watchdog threads as CPUs come and go: 105 * Create/destroy watchdog threads as CPUs come and go:
106 */ 106 */
107static int __devinit 107static int __cpuinit
108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
109{ 109{
110 int hotcpu = (unsigned long)hcpu; 110 int hotcpu = (unsigned long)hcpu;
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
142 return NOTIFY_OK; 142 return NOTIFY_OK;
143} 143}
144 144
145static struct notifier_block __devinitdata cpu_nfb = { 145static struct notifier_block __cpuinitdata cpu_nfb = {
146 .notifier_call = cpu_callback 146 .notifier_call = cpu_callback
147}; 147};
148 148
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index f45179ce028e..e78187657330 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -121,46 +121,45 @@ static int send_reply(struct sk_buff *skb, pid_t pid)
121/* 121/*
122 * Send taskstats data in @skb to listeners registered for @cpu's exit data 122 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 */ 123 */
124static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) 124static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
125{ 125{
126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); 126 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
127 struct listener_list *listeners; 127 struct listener_list *listeners;
128 struct listener *s, *tmp; 128 struct listener *s, *tmp;
129 struct sk_buff *skb_next, *skb_cur = skb; 129 struct sk_buff *skb_next, *skb_cur = skb;
130 void *reply = genlmsg_data(genlhdr); 130 void *reply = genlmsg_data(genlhdr);
131 int rc, ret, delcount = 0; 131 int rc, delcount = 0;
132 132
133 rc = genlmsg_end(skb, reply); 133 rc = genlmsg_end(skb, reply);
134 if (rc < 0) { 134 if (rc < 0) {
135 nlmsg_free(skb); 135 nlmsg_free(skb);
136 return rc; 136 return;
137 } 137 }
138 138
139 rc = 0; 139 rc = 0;
140 listeners = &per_cpu(listener_array, cpu); 140 listeners = &per_cpu(listener_array, cpu);
141 down_read(&listeners->sem); 141 down_read(&listeners->sem);
142 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 142 list_for_each_entry(s, &listeners->list, list) {
143 skb_next = NULL; 143 skb_next = NULL;
144 if (!list_is_last(&s->list, &listeners->list)) { 144 if (!list_is_last(&s->list, &listeners->list)) {
145 skb_next = skb_clone(skb_cur, GFP_KERNEL); 145 skb_next = skb_clone(skb_cur, GFP_KERNEL);
146 if (!skb_next) { 146 if (!skb_next)
147 nlmsg_free(skb_cur);
148 rc = -ENOMEM;
149 break; 147 break;
150 }
151 } 148 }
152 ret = genlmsg_unicast(skb_cur, s->pid); 149 rc = genlmsg_unicast(skb_cur, s->pid);
153 if (ret == -ECONNREFUSED) { 150 if (rc == -ECONNREFUSED) {
154 s->valid = 0; 151 s->valid = 0;
155 delcount++; 152 delcount++;
156 rc = ret;
157 } 153 }
158 skb_cur = skb_next; 154 skb_cur = skb_next;
159 } 155 }
160 up_read(&listeners->sem); 156 up_read(&listeners->sem);
161 157
158 if (skb_cur)
159 nlmsg_free(skb_cur);
160
162 if (!delcount) 161 if (!delcount)
163 return rc; 162 return;
164 163
165 /* Delete invalidated entries */ 164 /* Delete invalidated entries */
166 down_write(&listeners->sem); 165 down_write(&listeners->sem);
@@ -171,13 +170,12 @@ static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
171 } 170 }
172 } 171 }
173 up_write(&listeners->sem); 172 up_write(&listeners->sem);
174 return rc;
175} 173}
176 174
177static int fill_pid(pid_t pid, struct task_struct *pidtsk, 175static int fill_pid(pid_t pid, struct task_struct *pidtsk,
178 struct taskstats *stats) 176 struct taskstats *stats)
179{ 177{
180 int rc; 178 int rc = 0;
181 struct task_struct *tsk = pidtsk; 179 struct task_struct *tsk = pidtsk;
182 180
183 if (!pidtsk) { 181 if (!pidtsk) {
@@ -196,12 +194,10 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk,
196 * Each accounting subsystem adds calls to its functions to 194 * Each accounting subsystem adds calls to its functions to
197 * fill in relevant parts of struct taskstsats as follows 195 * fill in relevant parts of struct taskstsats as follows
198 * 196 *
199 * rc = per-task-foo(stats, tsk); 197 * per-task-foo(stats, tsk);
200 * if (rc)
201 * goto err;
202 */ 198 */
203 199
204 rc = delayacct_add_tsk(stats, tsk); 200 delayacct_add_tsk(stats, tsk);
205 stats->version = TASKSTATS_VERSION; 201 stats->version = TASKSTATS_VERSION;
206 202
207 /* Define err: label here if needed */ 203 /* Define err: label here if needed */
diff --git a/kernel/timer.c b/kernel/timer.c
index 05809c2e2fd6..b650f04888ed 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -84,7 +84,7 @@ typedef struct tvec_t_base_s tvec_base_t;
84 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; 87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
88 88
89static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
90 struct timer_list *timer) 90 struct timer_list *timer)
@@ -408,7 +408,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
408 * This function cascades all vectors and executes all expired timer 408 * This function cascades all vectors and executes all expired timer
409 * vectors. 409 * vectors.
410 */ 410 */
411#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK 411#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
412 412
413static inline void __run_timers(tvec_base_t *base) 413static inline void __run_timers(tvec_base_t *base)
414{ 414{
@@ -1688,7 +1688,7 @@ static void __devinit migrate_timers(int cpu)
1688} 1688}
1689#endif /* CONFIG_HOTPLUG_CPU */ 1689#endif /* CONFIG_HOTPLUG_CPU */
1690 1690
1691static int __devinit timer_cpu_notify(struct notifier_block *self, 1691static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1692 unsigned long action, void *hcpu) 1692 unsigned long action, void *hcpu)
1693{ 1693{
1694 long cpu = (long)hcpu; 1694 long cpu = (long)hcpu;
@@ -1708,7 +1708,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1708 return NOTIFY_OK; 1708 return NOTIFY_OK;
1709} 1709}
1710 1710
1711static struct notifier_block __devinitdata timers_nb = { 1711static struct notifier_block __cpuinitdata timers_nb = {
1712 .notifier_call = timer_cpu_notify, 1712 .notifier_call = timer_cpu_notify,
1713}; 1713};
1714 1714
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index eebb1d839235..448e8f7b342d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -93,9 +93,12 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
93 spin_unlock_irqrestore(&cwq->lock, flags); 93 spin_unlock_irqrestore(&cwq->lock, flags);
94} 94}
95 95
96/* 96/**
97 * Queue work on a workqueue. Return non-zero if it was successfully 97 * queue_work - queue work on a workqueue
98 * added. 98 * @wq: workqueue to use
99 * @work: work to queue
100 *
101 * Returns non-zero if it was successfully added.
99 * 102 *
100 * We queue the work to the CPU it was submitted, but there is no 103 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU. 104 * guarantee that it will be processed by that CPU.
@@ -128,6 +131,14 @@ static void delayed_work_timer_fn(unsigned long __data)
128 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 131 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
129} 132}
130 133
134/**
135 * queue_delayed_work - queue work on a workqueue after delay
136 * @wq: workqueue to use
137 * @work: work to queue
138 * @delay: number of jiffies to wait before queueing
139 *
140 * Returns non-zero if it was successfully added.
141 */
131int fastcall queue_delayed_work(struct workqueue_struct *wq, 142int fastcall queue_delayed_work(struct workqueue_struct *wq,
132 struct work_struct *work, unsigned long delay) 143 struct work_struct *work, unsigned long delay)
133{ 144{
@@ -150,6 +161,15 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
150} 161}
151EXPORT_SYMBOL_GPL(queue_delayed_work); 162EXPORT_SYMBOL_GPL(queue_delayed_work);
152 163
164/**
165 * queue_delayed_work_on - queue work on specific CPU after delay
166 * @cpu: CPU number to execute work on
167 * @wq: workqueue to use
168 * @work: work to queue
169 * @delay: number of jiffies to wait before queueing
170 *
171 * Returns non-zero if it was successfully added.
172 */
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 173int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay) 174 struct work_struct *work, unsigned long delay)
155{ 175{
@@ -275,8 +295,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
275 } 295 }
276} 296}
277 297
278/* 298/**
279 * flush_workqueue - ensure that any scheduled work has run to completion. 299 * flush_workqueue - ensure that any scheduled work has run to completion.
300 * @wq: workqueue to flush
280 * 301 *
281 * Forces execution of the workqueue and blocks until its completion. 302 * Forces execution of the workqueue and blocks until its completion.
282 * This is typically used in driver shutdown handlers. 303 * This is typically used in driver shutdown handlers.
@@ -400,6 +421,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
400 kthread_stop(p); 421 kthread_stop(p);
401} 422}
402 423
424/**
425 * destroy_workqueue - safely terminate a workqueue
426 * @wq: target workqueue
427 *
428 * Safely destroy a workqueue. All work currently pending will be done first.
429 */
403void destroy_workqueue(struct workqueue_struct *wq) 430void destroy_workqueue(struct workqueue_struct *wq)
404{ 431{
405 int cpu; 432 int cpu;
@@ -425,18 +452,41 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
425 452
426static struct workqueue_struct *keventd_wq; 453static struct workqueue_struct *keventd_wq;
427 454
455/**
456 * schedule_work - put work task in global workqueue
457 * @work: job to be done
458 *
459 * This puts a job in the kernel-global workqueue.
460 */
428int fastcall schedule_work(struct work_struct *work) 461int fastcall schedule_work(struct work_struct *work)
429{ 462{
430 return queue_work(keventd_wq, work); 463 return queue_work(keventd_wq, work);
431} 464}
432EXPORT_SYMBOL(schedule_work); 465EXPORT_SYMBOL(schedule_work);
433 466
467/**
468 * schedule_delayed_work - put work task in global workqueue after delay
469 * @work: job to be done
470 * @delay: number of jiffies to wait
471 *
472 * After waiting for a given time this puts a job in the kernel-global
473 * workqueue.
474 */
434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 475int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
435{ 476{
436 return queue_delayed_work(keventd_wq, work, delay); 477 return queue_delayed_work(keventd_wq, work, delay);
437} 478}
438EXPORT_SYMBOL(schedule_delayed_work); 479EXPORT_SYMBOL(schedule_delayed_work);
439 480
481/**
482 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
483 * @cpu: cpu to use
484 * @work: job to be done
485 * @delay: number of jiffies to wait
486 *
487 * After waiting for a given time this puts a job in the kernel-global
488 * workqueue on the specified CPU.
489 */
440int schedule_delayed_work_on(int cpu, 490int schedule_delayed_work_on(int cpu,
441 struct work_struct *work, unsigned long delay) 491 struct work_struct *work, unsigned long delay)
442{ 492{
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 7f922dccf1a5..fceb97c3aff7 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -347,7 +347,10 @@ int zlib_inflate(z_streamp strm, int flush)
347 static const unsigned short order[19] = /* permutation of code lengths */ 347 static const unsigned short order[19] = /* permutation of code lengths */
348 {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; 348 {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
349 349
350 if (strm == NULL || strm->state == NULL || strm->next_out == NULL || 350 /* Do not check for strm->next_out == NULL here as ppc zImage
351 inflates to strm->next_out = 0 */
352
353 if (strm == NULL || strm->state == NULL ||
351 (strm->next_in == NULL && strm->avail_in != 0)) 354 (strm->next_in == NULL && strm->avail_in != 0))
352 return Z_STREAM_ERROR; 355 return Z_STREAM_ERROR;
353 356
diff --git a/mm/filemap.c b/mm/filemap.c
index d087fc3d3281..b9a60c43b61a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -849,8 +849,6 @@ static void shrink_readahead_size_eio(struct file *filp,
849 return; 849 return;
850 850
851 ra->ra_pages /= 4; 851 ra->ra_pages /= 4;
852 printk(KERN_WARNING "Reducing readahead size to %luK\n",
853 ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
854} 852}
855 853
856/** 854/**
diff --git a/mm/slab.c b/mm/slab.c
index 0f20843beffd..21ba06035700 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1106,7 +1106,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1106 1106
1107#endif 1107#endif
1108 1108
1109static int __devinit cpuup_callback(struct notifier_block *nfb, 1109static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1110 unsigned long action, void *hcpu) 1110 unsigned long action, void *hcpu)
1111{ 1111{
1112 long cpu = (long)hcpu; 1112 long cpu = (long)hcpu;
@@ -3224,7 +3224,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3224EXPORT_SYMBOL(kmem_cache_alloc); 3224EXPORT_SYMBOL(kmem_cache_alloc);
3225 3225
3226/** 3226/**
3227 * kmem_cache_alloc - Allocate an object. The memory is set to zero. 3227 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3228 * @cache: The cache to allocate from. 3228 * @cache: The cache to allocate from.
3229 * @flags: See kmalloc(). 3229 * @flags: See kmalloc().
3230 * 3230 *
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index cbc8a389a0a8..05b3de888243 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1;
61#define brnf_filter_vlan_tagged 1 61#define brnf_filter_vlan_tagged 1
62#endif 62#endif
63 63
64int brnf_deferred_hooks;
65EXPORT_SYMBOL_GPL(brnf_deferred_hooks);
66
64static __be16 inline vlan_proto(const struct sk_buff *skb) 67static __be16 inline vlan_proto(const struct sk_buff *skb)
65{ 68{
66 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 69 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
890 return NF_ACCEPT; 893 return NF_ACCEPT;
891 else if (ip->version == 6 && !brnf_call_ip6tables) 894 else if (ip->version == 6 && !brnf_call_ip6tables)
892 return NF_ACCEPT; 895 return NF_ACCEPT;
896 else if (!brnf_deferred_hooks)
897 return NF_ACCEPT;
893#endif 898#endif
894 if (hook == NF_IP_POST_ROUTING) 899 if (hook == NF_IP_POST_ROUTING)
895 return NF_ACCEPT; 900 return NF_ACCEPT;
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
index dbb08528ddf5..f7e84e9d13ad 100644
--- a/net/ieee80211/Kconfig
+++ b/net/ieee80211/Kconfig
@@ -58,6 +58,7 @@ config IEEE80211_CRYPT_TKIP
58 depends on IEEE80211 && NET_RADIO 58 depends on IEEE80211 && NET_RADIO
59 select CRYPTO 59 select CRYPTO
60 select CRYPTO_MICHAEL_MIC 60 select CRYPTO_MICHAEL_MIC
61 select CRC32
61 ---help--- 62 ---help---
62 Include software based cipher suites in support of IEEE 802.11i 63 Include software based cipher suites in support of IEEE 802.11i
63 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled 64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index ebc33ca6e692..4cef39e171d0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -116,6 +116,16 @@ ieee80211softmac_auth_queue(void *data)
116 kfree(auth); 116 kfree(auth);
117} 117}
118 118
119/* Sends a response to an auth challenge (for shared key auth). */
120static void
121ieee80211softmac_auth_challenge_response(void *_aq)
122{
123 struct ieee80211softmac_auth_queue_item *aq = _aq;
124
125 /* Send our response */
126 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
127}
128
119/* Handle the auth response from the AP 129/* Handle the auth response from the AP
120 * This should be registered with ieee80211 as handle_auth 130 * This should be registered with ieee80211 as handle_auth
121 */ 131 */
@@ -197,24 +207,30 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
197 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE: 207 case IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE:
198 /* Check to make sure we have a challenge IE */ 208 /* Check to make sure we have a challenge IE */
199 data = (u8 *)auth->info_element; 209 data = (u8 *)auth->info_element;
200 if(*data++ != MFIE_TYPE_CHALLENGE){ 210 if (*data++ != MFIE_TYPE_CHALLENGE) {
201 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n"); 211 printkl(KERN_NOTICE PFX "Shared Key Authentication failed due to a missing challenge.\n");
202 break; 212 break;
203 } 213 }
204 /* Save the challenge */ 214 /* Save the challenge */
205 spin_lock_irqsave(&mac->lock, flags); 215 spin_lock_irqsave(&mac->lock, flags);
206 net->challenge_len = *data++; 216 net->challenge_len = *data++;
207 if(net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) 217 if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
208 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; 218 net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
209 if(net->challenge != NULL) 219 if (net->challenge != NULL)
210 kfree(net->challenge); 220 kfree(net->challenge);
211 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); 221 net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
212 memcpy(net->challenge, data, net->challenge_len); 222 memcpy(net->challenge, data, net->challenge_len);
213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 223 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
214 spin_unlock_irqrestore(&mac->lock, flags);
215 224
216 /* Send our response */ 225 /* We reuse the work struct from the auth request here.
217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 226 * It is safe to do so as each one is per-request, and
227 * at this point (dealing with authentication response)
228 * we have obviously already sent the initial auth
229 * request. */
230 cancel_delayed_work(&aq->work);
231 INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
232 schedule_work(&aq->work);
233 spin_unlock_irqrestore(&mac->lock, flags);
218 return 0; 234 return 0;
219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge); 236 kfree(net->challenge);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 184c78ca79e6..212734ca238f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -429,7 +429,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
429 } 429 }
430 430
431 /* Remove any debris in the socket control block */ 431 /* Remove any debris in the socket control block */
432 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 432 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
433 433
434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
435 ip_rcv_finish); 435 ip_rcv_finish);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9ccacf57f08b..85893eef6b16 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1578,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1579 1579
1580 if (cache==NULL) { 1580 if (cache==NULL) {
1581 struct sk_buff *skb2;
1581 struct net_device *dev; 1582 struct net_device *dev;
1582 int vif; 1583 int vif;
1583 1584
@@ -1591,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1591 read_unlock(&mrt_lock); 1592 read_unlock(&mrt_lock);
1592 return -ENODEV; 1593 return -ENODEV;
1593 } 1594 }
1594 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 1595 skb2 = skb_clone(skb, GFP_ATOMIC);
1595 skb->nh.iph->ihl = sizeof(struct iphdr)>>2; 1596 if (!skb2) {
1596 skb->nh.iph->saddr = rt->rt_src; 1597 read_unlock(&mrt_lock);
1597 skb->nh.iph->daddr = rt->rt_dst; 1598 return -ENOMEM;
1598 skb->nh.iph->version = 0; 1599 }
1599 err = ipmr_cache_unresolved(vif, skb); 1600
1601 skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr));
1602 skb2->nh.iph->ihl = sizeof(struct iphdr)>>2;
1603 skb2->nh.iph->saddr = rt->rt_src;
1604 skb2->nh.iph->daddr = rt->rt_dst;
1605 skb2->nh.iph->version = 0;
1606 err = ipmr_cache_unresolved(vif, skb2);
1600 read_unlock(&mrt_lock); 1607 read_unlock(&mrt_lock);
1601 return err; 1608 return err;
1602 } 1609 }
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index af35235672d5..9a39e2969712 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct,
1200 tuple.dst.protonum = IPPROTO_TCP; 1200 tuple.dst.protonum = IPPROTO_TCP;
1201 1201
1202 exp = __ip_conntrack_expect_find(&tuple); 1202 exp = __ip_conntrack_expect_find(&tuple);
1203 if (exp->master == ct) 1203 if (exp && exp->master == ct)
1204 return exp; 1204 return exp;
1205 return NULL; 1205 return NULL;
1206} 1206}
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7bd3c22003a2..7a9fa04a467a 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = {
534 534
535/* Sysctl support */ 535/* Sysctl support */
536 536
537int ip_conntrack_checksum = 1;
538
537#ifdef CONFIG_SYSCTL 539#ifdef CONFIG_SYSCTL
538 540
539/* From ip_conntrack_core.c */ 541/* From ip_conntrack_core.c */
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout;
568static int log_invalid_proto_min = 0; 570static int log_invalid_proto_min = 0;
569static int log_invalid_proto_max = 255; 571static int log_invalid_proto_max = 255;
570 572
571int ip_conntrack_checksum = 1;
572
573static struct ctl_table_header *ip_ct_sysctl_header; 573static struct ctl_table_header *ip_ct_sysctl_header;
574 574
575static ctl_table ip_ct_sysctl_table[] = { 575static ctl_table ip_ct_sysctl_table[] = {
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 0b1b416759cc..18b7fbdccb61 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb,
1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
1256 1256
1257 /* SNMP replies and originating SNMP traps get mangled */ 1257 /* SNMP replies and originating SNMP traps get mangled */
1258 if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1258 if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
1259 return NF_ACCEPT; 1259 return NF_ACCEPT;
1260 if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1260 if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
1261 return NF_ACCEPT; 1261 return NF_ACCEPT;
1262 1262
1263 /* No NAT? */ 1263 /* No NAT? */
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bd221ec3f81e..62b2762a2420 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
609 if (sin) { 609 if (sin) {
610 sin->sin_family = AF_INET; 610 sin->sin_family = AF_INET;
611 sin->sin_addr.s_addr = skb->nh.iph->saddr; 611 sin->sin_addr.s_addr = skb->nh.iph->saddr;
612 sin->sin_port = 0;
612 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 613 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
613 } 614 }
614 if (inet->cmsg_flags) 615 if (inet->cmsg_flags)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index df8f051c0fce..25c2a9e03895 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
71 goto out; 71 goto out;
72 } 72 }
73 73
74 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
75
74 /* 76 /*
75 * Store incoming device index. When the packet will 77 * Store incoming device index. When the packet will
76 * be queued, we cannot refer to skb->dev anymore. 78 * be queued, we cannot refer to skb->dev anymore.
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa1ce0ae123e..d57e61ce4a7d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
411 /* Copy the address. */ 411 /* Copy the address. */
412 if (sin6) { 412 if (sin6) {
413 sin6->sin6_family = AF_INET6; 413 sin6->sin6_family = AF_INET6;
414 sin6->sin6_port = 0;
414 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 415 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr);
415 sin6->sin6_flowinfo = 0; 416 sin6->sin6_flowinfo = 0;
416 sin6->sin6_scope_id = 0; 417 sin6->sin6_scope_id = 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 42a178aa30f9..a9894ddfd72a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM
386 <file:Documentation/modules.txt>. If unsure, say `N'. 386 <file:Documentation/modules.txt>. If unsure, say `N'.
387 387
388config NETFILTER_XT_MATCH_SCTP 388config NETFILTER_XT_MATCH_SCTP
389 tristate '"sctp" protocol match support' 389 tristate '"sctp" protocol match support (EXPERIMENTAL)'
390 depends on NETFILTER_XTABLES 390 depends on NETFILTER_XTABLES && EXPERIMENTAL
391 help 391 help
392 With this option enabled, you will be able to use the 392 With this option enabled, you will be able to use the
393 `sctp' match in order to match on SCTP source/destination ports 393 `sctp' match in order to match on SCTP source/destination ports
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5fcab2ef231f..4ef836699962 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = {
428 428
429/* Sysctl support */ 429/* Sysctl support */
430 430
431int nf_conntrack_checksum = 1;
432
431#ifdef CONFIG_SYSCTL 433#ifdef CONFIG_SYSCTL
432 434
433/* From nf_conntrack_core.c */ 435/* From nf_conntrack_core.c */
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout;
459static int log_invalid_proto_min = 0; 461static int log_invalid_proto_min = 0;
460static int log_invalid_proto_max = 255; 462static int log_invalid_proto_max = 255;
461 463
462int nf_conntrack_checksum = 1;
463
464static struct ctl_table_header *nf_ct_sysctl_header; 464static struct ctl_table_header *nf_ct_sysctl_header;
465 465
466static ctl_table nf_ct_sysctl_table[] = { 466static ctl_table nf_ct_sysctl_table[] = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bb6fcee452ca..662a869593bf 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
219 219
220 switch (verdict & NF_VERDICT_MASK) { 220 switch (verdict & NF_VERDICT_MASK) {
221 case NF_ACCEPT: 221 case NF_ACCEPT:
222 case NF_STOP:
222 info->okfn(skb); 223 info->okfn(skb);
224 case NF_STOLEN:
223 break; 225 break;
224
225 case NF_QUEUE: 226 case NF_QUEUE:
226 if (!nf_queue(&skb, elem, info->pf, info->hook, 227 if (!nf_queue(&skb, elem, info->pf, info->hook,
227 info->indev, info->outdev, info->okfn, 228 info->indev, info->outdev, info->okfn,
228 verdict >> NF_VERDICT_BITS)) 229 verdict >> NF_VERDICT_BITS))
229 goto next_hook; 230 goto next_hook;
230 break; 231 break;
232 default:
233 kfree_skb(skb);
231 } 234 }
232 rcu_read_unlock(); 235 rcu_read_unlock();
233
234 if (verdict == NF_DROP)
235 kfree_skb(skb);
236
237 kfree(info); 236 kfree(info);
238 return; 237 return;
239} 238}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 5fe4c9df17f5..a9f4f6f3c628 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -113,6 +113,21 @@ checkentry(const char *tablename,
113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 114 info->bitmask & ~XT_PHYSDEV_OP_MASK)
115 return 0; 115 return 0;
116 if (brnf_deferred_hooks == 0 &&
117 info->bitmask & XT_PHYSDEV_OP_OUT &&
118 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
119 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
120 hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
121 (1 << NF_IP_POST_ROUTING))) {
122 printk(KERN_WARNING "physdev match: using --physdev-out in the "
123 "OUTPUT, FORWARD and POSTROUTING chains for non-bridged "
124 "traffic is deprecated and breaks other things, it will "
125 "be removed in January 2007. See Documentation/"
126 "feature-removal-schedule.txt for details. This doesn't "
127 "affect you in case you're using it for purely bridged "
128 "traffic.\n");
129 brnf_deferred_hooks = 1;
130 }
116 return 1; 131 return 1;
117} 132}
118 133
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
index 3ac703b5cb8f..d2f5320a80bf 100644
--- a/net/netfilter/xt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -9,6 +9,8 @@
9#include <linux/skbuff.h> 9#include <linux/skbuff.h>
10#include <linux/if_ether.h> 10#include <linux/if_ether.h>
11#include <linux/if_packet.h> 11#include <linux/if_packet.h>
12#include <linux/in.h>
13#include <linux/ip.h>
12 14
13#include <linux/netfilter/xt_pkttype.h> 15#include <linux/netfilter/xt_pkttype.h>
14#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb,
28 unsigned int protoff, 30 unsigned int protoff,
29 int *hotdrop) 31 int *hotdrop)
30{ 32{
33 u_int8_t type;
31 const struct xt_pkttype_info *info = matchinfo; 34 const struct xt_pkttype_info *info = matchinfo;
32 35
33 return (skb->pkt_type == info->pkttype) ^ info->invert; 36 if (skb->pkt_type == PACKET_LOOPBACK)
37 type = (MULTICAST(skb->nh.iph->daddr)
38 ? PACKET_MULTICAST
39 : PACKET_BROADCAST);
40 else
41 type = skb->pkt_type;
42
43 return (type == info->pkttype) ^ info->invert;
34} 44}
35 45
36static struct xt_match pkttype_match = { 46static struct xt_match pkttype_match = {
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 2180c88cfe89..f01132263535 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -85,6 +85,13 @@ cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \
85cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \ 85cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \
86 echo $(3); fi;) 86 echo $(3); fi;)
87 87
88# ld-option
89# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
90ld-option = $(shell if $(CC) $(1) \
91 -nostdlib -o ldtest$$$$.out -xc /dev/null \
92 > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi; \
93 rm -f ldtest$$$$.out)
94
88### 95###
89# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= 96# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
90# Usage: 97# Usage:
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index f9460a6218de..c9ca0c23bd91 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1518,6 +1518,7 @@ sub dump_function($$) {
1518 $prototype =~ s/^asmlinkage +//; 1518 $prototype =~ s/^asmlinkage +//;
1519 $prototype =~ s/^inline +//; 1519 $prototype =~ s/^inline +//;
1520 $prototype =~ s/^__inline__ +//; 1520 $prototype =~ s/^__inline__ +//;
1521 $prototype =~ s/__devinit +//;
1521 $prototype =~ s/^#define +//; #ak added 1522 $prototype =~ s/^#define +//; #ak added
1522 $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//; 1523 $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//;
1523 1524
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0111990ba837..f03960e697ce 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -644,10 +644,18 @@ void policydb_destroy(struct policydb *p)
644 kfree(lra); 644 kfree(lra);
645 645
646 for (rt = p->range_tr; rt; rt = rt -> next) { 646 for (rt = p->range_tr; rt; rt = rt -> next) {
647 kfree(lrt); 647 if (lrt) {
648 ebitmap_destroy(&lrt->range.level[0].cat);
649 ebitmap_destroy(&lrt->range.level[1].cat);
650 kfree(lrt);
651 }
648 lrt = rt; 652 lrt = rt;
649 } 653 }
650 kfree(lrt); 654 if (lrt) {
655 ebitmap_destroy(&lrt->range.level[0].cat);
656 ebitmap_destroy(&lrt->range.level[1].cat);
657 kfree(lrt);
658 }
651 659
652 if (p->type_attr_map) { 660 if (p->type_attr_map) {
653 for (i = 0; i < p->p_types.nprim; i++) 661 for (i = 0; i < p->p_types.nprim; i++)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d2e80e62ff0c..85e429884393 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -833,6 +833,8 @@ static int security_compute_sid(u32 ssid,
833 goto out; 833 goto out;
834 } 834 }
835 835
836 context_init(&newcontext);
837
836 POLICY_RDLOCK; 838 POLICY_RDLOCK;
837 839
838 scontext = sidtab_search(&sidtab, ssid); 840 scontext = sidtab_search(&sidtab, ssid);
@@ -850,8 +852,6 @@ static int security_compute_sid(u32 ssid,
850 goto out_unlock; 852 goto out_unlock;
851 } 853 }
852 854
853 context_init(&newcontext);
854
855 /* Set the user identity. */ 855 /* Set the user identity. */
856 switch (specified) { 856 switch (specified) {
857 case AVTAB_TRANSITION: 857 case AVTAB_TRANSITION: