aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2006-02-22 18:38:53 -0500
committerSteve French <sfrench@us.ibm.com>2006-02-22 18:38:53 -0500
commit5d2f248a5f3acac4b763439327c92091be7abb1c (patch)
tree8f30098a6d17c0367adfbb780e5a8d9a5c43ad5a
parenta048d7a8704b35ff6372fdf5eedd4533f37b1885 (diff)
parent9e956c2dac9bec602ed1ba29181b45ba6d2b6448 (diff)
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Signed-off-by: Steve French <sfrench@us.ibm.com>
-rw-r--r--Documentation/cpu-hotplug.txt9
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/filesystems/tmpfs.txt21
-rw-r--r--Documentation/filesystems/v9fs.txt16
-rw-r--r--Documentation/sysctl/kernel.txt10
-rw-r--r--arch/arm/common/rtctime.c16
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/mach-at91rm9200/devices.c4
-rw-r--r--arch/arm/mach-at91rm9200/gpio.c17
-rw-r--r--arch/arm/mach-ixp4xx/common.c20
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-power.c3
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c7
-rw-r--r--arch/arm/mach-versatile/pci.c93
-rw-r--r--arch/arm/mm/abort-ev6.S2
-rw-r--r--arch/arm/tools/mach-types35
-rw-r--r--arch/h8300/Kconfig.debug2
-rw-r--r--arch/h8300/defconfig2
-rw-r--r--arch/i386/kernel/mpparse.c2
-rw-r--r--arch/m32r/kernel/sys_m32r.c61
-rw-r--r--arch/m68k/kernel/process.c3
-rw-r--r--arch/mips/configs/ip27_defconfig13
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c5
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c5
-rw-r--r--arch/mips/kernel/linux32.c22
-rw-r--r--arch/mips/kernel/scall64-n32.S4
-rw-r--r--arch/mips/kernel/signal32.c74
-rw-r--r--arch/mips/kernel/signal_n32.c33
-rw-r--r--arch/mips/kernel/smp.c20
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/mm/cex-sb1.S2
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c2
-rw-r--r--arch/mips/sibyte/Kconfig4
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c5
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/lparcfg.c6
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c16
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c4
-rw-r--r--arch/powerpc/kernel/time.c282
-rw-r--r--arch/powerpc/platforms/powermac/feature.c10
-rw-r--r--arch/powerpc/platforms/pseries/smp.c2
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c8
-rw-r--r--arch/ppc/xmon/start.c15
-rw-r--r--arch/x86_64/defconfig6
-rw-r--r--drivers/block/pktcdvd.c82
-rw-r--r--drivers/char/agp/Kconfig55
-rw-r--r--drivers/char/agp/amd64-agp.c6
-rw-r--r--drivers/char/agp/sworks-agp.c4
-rw-r--r--drivers/char/drm/i915_irq.c5
-rw-r--r--drivers/char/drm/r300_cmdbuf.c48
-rw-r--r--drivers/char/drm/r300_reg.h3
-rw-r--r--drivers/char/drm/radeon_drv.h3
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/crypto/padlock-aes.c8
-rw-r--r--drivers/fc4/fc.c2
-rw-r--r--drivers/ieee1394/sbp2.c10
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c14
-rw-r--r--drivers/net/irda/irda-usb.c90
-rw-r--r--drivers/net/irda/irda-usb.h7
-rw-r--r--drivers/net/wireless/ipw2200.c6
-rw-r--r--drivers/s390/block/Kconfig14
-rw-r--r--drivers/s390/block/Makefile2
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dasd_eer.c1090
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/cio/qdio.c13
-rw-r--r--drivers/scsi/esp.c4
-rw-r--r--drivers/scsi/libata-core.c15
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/video/aty/radeon_pm.c19
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/cifs/cifssmb.c7
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/proc/inode.c4
-rw-r--r--fs/proc/root.c17
-rw-r--r--fs/super.c15
-rw-r--r--include/asm-arm/arch-at91rm9200/gpio.h1
-rw-r--r--include/asm-arm/arch-ixp4xx/nas100d.h4
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/irq.h2
-rw-r--r--include/asm-m68k/raw_io.h1
-rw-r--r--include/asm-mips/uaccess.h6
-rw-r--r--include/asm-mips/unistd.h6
-rw-r--r--include/asm-ppc/machdep.h13
-rw-r--r--include/asm-s390/dasd.h13
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/kobject.h6
-rw-r--r--include/linux/libata.h16
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c18
-rw-r--r--lib/iomap_copy.c2
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c123
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c81
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c18
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c16
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--sound/core/control_compat.c16
114 files changed, 1045 insertions, 1943 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index e71bc6cbbc5e..57a09f99ecb0 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -46,10 +46,12 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
46 maxcpus=2 will only boot 2. You can choose to bring the 46 maxcpus=2 will only boot 2. You can choose to bring the
47 other cpus later online, read FAQ's for more info. 47 other cpus later online, read FAQ's for more info.
48 48
49additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus. 49additional_cpus*=n Use this to limit hotpluggable cpus. This option sets
50 This option sets
51 cpu_possible_map = cpu_present_map + additional_cpus 50 cpu_possible_map = cpu_present_map + additional_cpus
52 51
52(*) Option valid only for following architectures
53- x86_64, ia64, s390
54
53ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT 55ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
54to determine the number of potentially hot-pluggable cpus. The implementation 56to determine the number of potentially hot-pluggable cpus. The implementation
55should only rely on this to count the #of cpus, but *MUST* not rely on the 57should only rely on this to count the #of cpus, but *MUST* not rely on the
@@ -57,6 +59,9 @@ apicid values in those tables for disabled apics. In the event BIOS doesnt
57mark such hot-pluggable cpus as disabled entries, one could use this 59mark such hot-pluggable cpus as disabled entries, one could use this
58parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. 60parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
59 61
62s390 uses the number of cpus it detects at IPL time to also the number of bits
63in cpu_possible_map. If it is desired to add additional cpus at a later time
64the number should be specified using this option or the possible_cpus option.
60 65
61possible_cpus=n [s390 only] use this to set hotpluggable cpus. 66possible_cpus=n [s390 only] use this to set hotpluggable cpus.
62 This option sets possible_cpus bits in 67 This option sets possible_cpus bits in
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b730d765b525..be5ae600f533 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -171,3 +171,12 @@ Why: The ISA interface is faster and should be always available. The I2C
171 probing is also known to cause trouble in at least one case (see 171 probing is also known to cause trouble in at least one case (see
172 bug #5889.) 172 bug #5889.)
173Who: Jean Delvare <khali@linux-fr.org> 173Who: Jean Delvare <khali@linux-fr.org>
174
175---------------------------
176
177What: mount/umount uevents
178When: February 2007
179Why: These events are not correct, and do not properly let userspace know
180 when a file system has been mounted or unmounted. Userspace should
181 poll the /proc/mounts file instead to detect this properly.
182Who: Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index dbe4d87d2615..8a155418c705 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -79,15 +79,18 @@ that instance in a system with many cpus making intensive use of it.
79 79
80 80
81tmpfs has a mount option to set the NUMA memory allocation policy for 81tmpfs has a mount option to set the NUMA memory allocation policy for
82all files in that instance: 82all files in that instance (if CONFIG_NUMA is enabled) - which can be
83mpol=interleave prefers to allocate memory from each node in turn 83adjusted on the fly via 'mount -o remount ...'
84mpol=default prefers to allocate memory from the local node
85mpol=bind prefers to allocate from mpol_nodelist
86mpol=preferred prefers to allocate from first node in mpol_nodelist
87 84
88The following mount option is used in conjunction with mpol=interleave, 85mpol=default prefers to allocate memory from the local node
89mpol=bind or mpol=preferred: 86mpol=prefer:Node prefers to allocate memory from the given Node
90mpol_nodelist: nodelist suitable for parsing with nodelist_parse. 87mpol=bind:NodeList allocates memory only from nodes in NodeList
88mpol=interleave prefers to allocate from each node in turn
89mpol=interleave:NodeList allocates from each node of NodeList in turn
90
91NodeList format is a comma-separated list of decimal numbers and ranges,
92a range being two hyphen-separated decimal numbers, the smallest and
93largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
91 94
92 95
93To specify the initial root directory you can use the following mount 96To specify the initial root directory you can use the following mount
@@ -109,4 +112,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
109Author: 112Author:
110 Christoph Rohland <cr@sap.com>, 1.12.01 113 Christoph Rohland <cr@sap.com>, 1.12.01
111Updated: 114Updated:
112 Hugh Dickins <hugh@veritas.com>, 13 March 2005 115 Hugh Dickins <hugh@veritas.com>, 19 February 2006
diff --git a/Documentation/filesystems/v9fs.txt b/Documentation/filesystems/v9fs.txt
index 4e92feb6b507..24c7a9c41f0d 100644
--- a/Documentation/filesystems/v9fs.txt
+++ b/Documentation/filesystems/v9fs.txt
@@ -57,8 +57,6 @@ OPTIONS
57 57
58 port=n port to connect to on the remote server 58 port=n port to connect to on the remote server
59 59
60 timeout=n request timeouts (in ms) (default 60000ms)
61
62 noextend force legacy mode (no 9P2000.u semantics) 60 noextend force legacy mode (no 9P2000.u semantics)
63 61
64 uid attempt to mount as a particular uid 62 uid attempt to mount as a particular uid
@@ -74,10 +72,16 @@ OPTIONS
74RESOURCES 72RESOURCES
75========= 73=========
76 74
77The Linux version of the 9P server, along with some client-side utilities 75The Linux version of the 9P server is now maintained under the npfs project
78can be found at http://v9fs.sf.net (along with a CVS repository of the 76on sourceforge (http://sourceforge.net/projects/npfs).
79development branch of this module). There are user and developer mailing 77
80lists here, as well as a bug-tracker. 78There are user and developer mailing lists available through the v9fs project
79on sourceforge (http://sourceforge.net/projects/v9fs).
80
81News and other information is maintained on SWiK (http://swik.net/v9fs).
82
83Bug reports may be issued through the kernel.org bugzilla
84(http://bugzilla.kernel.org)
81 85
82For more information on the Plan 9 Operating System check out 86For more information on the Plan 9 Operating System check out
83http://plan9.bell-labs.com/plan9 87http://plan9.bell-labs.com/plan9
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 9f11d36a8c10..b0c7ab93dcb9 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -16,6 +16,7 @@ before actually making adjustments.
16 16
17Currently, these files might (depending on your configuration) 17Currently, these files might (depending on your configuration)
18show up in /proc/sys/kernel: 18show up in /proc/sys/kernel:
19- acpi_video_flags
19- acct 20- acct
20- core_pattern 21- core_pattern
21- core_uses_pid 22- core_uses_pid
@@ -57,6 +58,15 @@ show up in /proc/sys/kernel:
57 58
58============================================================== 59==============================================================
59 60
61acpi_video_flags:
62
63flags
64
65See Doc*/kernel/power/video.txt, it allows mode of video boot to be
66set during run time.
67
68==============================================================
69
60acct: 70acct:
61 71
62highwater lowwater frequency 72highwater lowwater frequency
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c
index 48b1e19b131f..e851d86c212c 100644
--- a/arch/arm/common/rtctime.c
+++ b/arch/arm/common/rtctime.c
@@ -128,19 +128,27 @@ EXPORT_SYMBOL(rtc_tm_to_time);
128/* 128/*
129 * Calculate the next alarm time given the requested alarm time mask 129 * Calculate the next alarm time given the requested alarm time mask
130 * and the current time. 130 * and the current time.
131 *
132 * FIXME: for now, we just copy the alarm time because we're lazy (and
133 * is therefore buggy - setting a 10am alarm at 8pm will not result in
134 * the alarm triggering.)
135 */ 131 */
136void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) 132void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
137{ 133{
134 unsigned long next_time;
135 unsigned long now_time;
136
138 next->tm_year = now->tm_year; 137 next->tm_year = now->tm_year;
139 next->tm_mon = now->tm_mon; 138 next->tm_mon = now->tm_mon;
140 next->tm_mday = now->tm_mday; 139 next->tm_mday = now->tm_mday;
141 next->tm_hour = alrm->tm_hour; 140 next->tm_hour = alrm->tm_hour;
142 next->tm_min = alrm->tm_min; 141 next->tm_min = alrm->tm_min;
143 next->tm_sec = alrm->tm_sec; 142 next->tm_sec = alrm->tm_sec;
143
144 rtc_tm_to_time(now, &now_time);
145 rtc_tm_to_time(next, &next_time);
146
147 if (next_time < now_time) {
148 /* Advance one day */
149 next_time += 60 * 60 * 24;
150 rtc_time_to_tm(next_time, next);
151 }
144} 152}
145 153
146static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) 154static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 964cd717506b..ec48d70c6d8b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -566,7 +566,7 @@ ENTRY(__switch_to)
566 ldr r6, [r2, #TI_CPU_DOMAIN]! 566 ldr r6, [r2, #TI_CPU_DOMAIN]!
567#endif 567#endif
568#if __LINUX_ARM_ARCH__ >= 6 568#if __LINUX_ARM_ARCH__ >= 6
569#ifdef CONFIG_CPU_MPCORE 569#ifdef CONFIG_CPU_32v6K
570 clrex 570 clrex
571#else 571#else
572 strex r5, r4, [ip] @ Clear exclusive monitor 572 strex r5, r4, [ip] @ Clear exclusive monitor
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 10235b01582e..03924bcc6129 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
19#include <linux/personality.h> 19#include <linux/personality.h>
20#include <linux/ptrace.h> 20#include <linux/ptrace.h>
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/delay.h>
22#include <linux/init.h> 23#include <linux/init.h>
23 24
24#include <asm/atomic.h> 25#include <asm/atomic.h>
@@ -231,6 +232,13 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
231 __die(str, err, thread, regs); 232 __die(str, err, thread, regs);
232 bust_spinlocks(0); 233 bust_spinlocks(0);
233 spin_unlock_irq(&die_lock); 234 spin_unlock_irq(&die_lock);
235
236 if (panic_on_oops) {
237 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
238 ssleep(5);
239 panic("Fatal exception");
240 }
241
234 do_exit(SIGSEGV); 242 do_exit(SIGSEGV);
235} 243}
236 244
diff --git a/arch/arm/mach-at91rm9200/devices.c b/arch/arm/mach-at91rm9200/devices.c
index 8df3e5245651..57eedd5beaf6 100644
--- a/arch/arm/mach-at91rm9200/devices.c
+++ b/arch/arm/mach-at91rm9200/devices.c
@@ -100,8 +100,10 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
100 at91_set_gpio_input(data->vbus_pin, 0); 100 at91_set_gpio_input(data->vbus_pin, 0);
101 at91_set_deglitch(data->vbus_pin, 1); 101 at91_set_deglitch(data->vbus_pin, 1);
102 } 102 }
103 if (data->pullup_pin) 103 if (data->pullup_pin) {
104 at91_set_gpio_output(data->pullup_pin, 0); 104 at91_set_gpio_output(data->pullup_pin, 0);
105 at91_set_multi_drive(data->pullup_pin, 1);
106 }
105 107
106 udc_data = *data; 108 udc_data = *data;
107 platform_device_register(&at91rm9200_udc_device); 109 platform_device_register(&at91rm9200_udc_device);
diff --git a/arch/arm/mach-at91rm9200/gpio.c b/arch/arm/mach-at91rm9200/gpio.c
index 2fd2ef583e4d..a9f718bf8ba8 100644
--- a/arch/arm/mach-at91rm9200/gpio.c
+++ b/arch/arm/mach-at91rm9200/gpio.c
@@ -159,6 +159,23 @@ int __init_or_module at91_set_deglitch(unsigned pin, int is_on)
159} 159}
160EXPORT_SYMBOL(at91_set_deglitch); 160EXPORT_SYMBOL(at91_set_deglitch);
161 161
162/*
163 * enable/disable the multi-driver; This is only valid for output and
164 * allows the output pin to run as an open collector output.
165 */
166int __init_or_module at91_set_multi_drive(unsigned pin, int is_on)
167{
168 void __iomem *pio = pin_to_controller(pin);
169 unsigned mask = pin_to_mask(pin);
170
171 if (!pio)
172 return -EINVAL;
173
174 __raw_writel(mask, pio + (is_on ? PIO_MDER : PIO_MDDR));
175 return 0;
176}
177EXPORT_SYMBOL(at91_set_multi_drive);
178
162/*--------------------------------------------------------------------------*/ 179/*--------------------------------------------------------------------------*/
163 180
164 181
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 4bdc9d4526cd..fbadf3021b9e 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -111,24 +111,30 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
111 if (line < 0) 111 if (line < 0)
112 return -EINVAL; 112 return -EINVAL;
113 113
114 if (type & IRQT_BOTHEDGE) { 114 switch (type){
115 case IRQT_BOTHEDGE:
115 int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; 116 int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
116 irq_type = IXP4XX_IRQ_EDGE; 117 irq_type = IXP4XX_IRQ_EDGE;
117 } else if (type & IRQT_RISING) { 118 break;
119 case IRQT_RISING:
118 int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; 120 int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
119 irq_type = IXP4XX_IRQ_EDGE; 121 irq_type = IXP4XX_IRQ_EDGE;
120 } else if (type & IRQT_FALLING) { 122 break;
123 case IRQT_FALLING:
121 int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; 124 int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
122 irq_type = IXP4XX_IRQ_EDGE; 125 irq_type = IXP4XX_IRQ_EDGE;
123 } else if (type & IRQT_HIGH) { 126 break;
127 case IRQT_HIGH:
124 int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; 128 int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
125 irq_type = IXP4XX_IRQ_LEVEL; 129 irq_type = IXP4XX_IRQ_LEVEL;
126 } else if (type & IRQT_LOW) { 130 break;
131 case IRQT_LOW:
127 int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; 132 int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
128 irq_type = IXP4XX_IRQ_LEVEL; 133 irq_type = IXP4XX_IRQ_LEVEL;
129 } else 134 break;
135 default:
130 return -EINVAL; 136 return -EINVAL;
131 137 }
132 ixp4xx_config_irq(irq, irq_type); 138 ixp4xx_config_irq(irq, irq_type);
133 139
134 if (line >= 8) { /* pins 8-15 */ 140 if (line >= 8) { /* pins 8-15 */
diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c
index b0ad9e901f6e..d80c362bc539 100644
--- a/arch/arm/mach-ixp4xx/nslu2-power.c
+++ b/arch/arm/mach-ixp4xx/nslu2-power.c
@@ -77,6 +77,9 @@ static int __init nslu2_power_init(void)
77 77
78static void __exit nslu2_power_exit(void) 78static void __exit nslu2_power_exit(void)
79{ 79{
80 if (!(machine_is_nslu2()))
81 return;
82
80 free_irq(NSLU2_RB_IRQ, NULL); 83 free_irq(NSLU2_RB_IRQ, NULL);
81 free_irq(NSLU2_PB_IRQ, NULL); 84 free_irq(NSLU2_PB_IRQ, NULL);
82} 85}
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index f260a9d34f70..55411f21d838 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -50,6 +50,12 @@ static struct platform_device nslu2_i2c_controller = {
50 .num_resources = 0, 50 .num_resources = 0,
51}; 51};
52 52
53static struct platform_device nslu2_beeper = {
54 .name = "ixp4xx-beeper",
55 .id = NSLU2_GPIO_BUZZ,
56 .num_resources = 0,
57};
58
53static struct resource nslu2_uart_resources[] = { 59static struct resource nslu2_uart_resources[] = {
54 { 60 {
55 .start = IXP4XX_UART1_BASE_PHYS, 61 .start = IXP4XX_UART1_BASE_PHYS,
@@ -97,6 +103,7 @@ static struct platform_device *nslu2_devices[] __initdata = {
97 &nslu2_i2c_controller, 103 &nslu2_i2c_controller,
98 &nslu2_flash, 104 &nslu2_flash,
99 &nslu2_uart, 105 &nslu2_uart,
106 &nslu2_beeper,
100}; 107};
101 108
102static void nslu2_power_off(void) 109static void nslu2_power_off(void)
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index b80d57d51699..722fbabc9cfb 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -240,6 +240,14 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
240 int i; 240 int i;
241 int myslot = -1; 241 int myslot = -1;
242 unsigned long val; 242 unsigned long val;
243 void __iomem *local_pci_cfg_base;
244
245 val = __raw_readl(SYS_PCICTL);
246 if (!(val & 1)) {
247 printk("Not plugged into PCI backplane!\n");
248 ret = -EIO;
249 goto out;
250 }
243 251
244 if (nr == 0) { 252 if (nr == 0) {
245 sys->mem_offset = 0; 253 sys->mem_offset = 0;
@@ -253,48 +261,45 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
253 goto out; 261 goto out;
254 } 262 }
255 263
256 __raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28,PCI_IMAP0);
257 __raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28,PCI_IMAP1);
258 __raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28,PCI_IMAP2);
259
260 __raw_writel(1, SYS_PCICTL);
261
262 val = __raw_readl(SYS_PCICTL);
263 if (!(val & 1)) {
264 printk("Not plugged into PCI backplane!\n");
265 ret = -EIO;
266 goto out;
267 }
268
269 /* 264 /*
270 * We need to discover the PCI core first to configure itself 265 * We need to discover the PCI core first to configure itself
271 * before the main PCI probing is performed 266 * before the main PCI probing is performed
272 */ 267 */
273 for (i=0; i<32; i++) { 268 for (i=0; i<32; i++)
274 if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) && 269 if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) &&
275 (__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) { 270 (__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) {
276 myslot = i; 271 myslot = i;
277
278 __raw_writel(myslot, PCI_SELFID);
279 val = __raw_readl(VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
280 val |= (1<<2);
281 __raw_writel(val, VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
282 break; 272 break;
283 } 273 }
284 }
285 274
286 if (myslot == -1) { 275 if (myslot == -1) {
287 printk("Cannot find PCI core!\n"); 276 printk("Cannot find PCI core!\n");
288 ret = -EIO; 277 ret = -EIO;
289 } else { 278 goto out;
290 printk("PCI core found (slot %d)\n",myslot);
291 /* Do not to map Versatile FPGA PCI device
292 into memory space as we are short of
293 mappable memory */
294 pci_slot_ignore |= (1 << myslot);
295 ret = 1;
296 } 279 }
297 280
281 printk("PCI core found (slot %d)\n",myslot);
282
283 __raw_writel(myslot, PCI_SELFID);
284 local_pci_cfg_base = (void *) VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11);
285
286 val = __raw_readl(local_pci_cfg_base + CSR_OFFSET);
287 val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
288 __raw_writel(val, local_pci_cfg_base + CSR_OFFSET);
289
290 /*
291 * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
292 */
293 __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
294 __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
295 __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
296
297 /*
298 * Do not to map Versatile FPGA PCI device into memory space
299 */
300 pci_slot_ignore |= (1 << myslot);
301 ret = 1;
302
298 out: 303 out:
299 return ret; 304 return ret;
300} 305}
@@ -305,18 +310,18 @@ struct pci_bus *pci_versatile_scan_bus(int nr, struct pci_sys_data *sys)
305 return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys); 310 return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys);
306} 311}
307 312
308/*
309 * V3_LB_BASE? - local bus address
310 * V3_LB_MAP? - pci bus address
311 */
312void __init pci_versatile_preinit(void) 313void __init pci_versatile_preinit(void)
313{ 314{
314} 315 __raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28, PCI_IMAP0);
316 __raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28, PCI_IMAP1);
317 __raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28, PCI_IMAP2);
315 318
316void __init pci_versatile_postinit(void) 319 __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP0);
317{ 320 __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP1);
318} 321 __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP2);
319 322
323 __raw_writel(1, SYS_PCICTL);
324}
320 325
321/* 326/*
322 * map the specified device/slot/pin to an IRQ. Different backplanes may need to modify this. 327 * map the specified device/slot/pin to an IRQ. Different backplanes may need to modify this.
@@ -326,16 +331,15 @@ static int __init versatile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
326 int irq; 331 int irq;
327 int devslot = PCI_SLOT(dev->devfn); 332 int devslot = PCI_SLOT(dev->devfn);
328 333
329 /* slot, pin, irq 334 /* slot, pin, irq
330 24 1 27 335 * 24 1 27
331 25 1 28 untested 336 * 25 1 28
332 26 1 29 337 * 26 1 29
333 27 1 30 untested 338 * 27 1 30
334 */ 339 */
335 340 irq = 27 + ((slot + pin - 1) & 3);
336 irq = 27 + ((slot + pin + 2) % 3); /* Fudged */
337 341
338 printk("map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq); 342 printk("PCI map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
339 343
340 return irq; 344 return irq;
341} 345}
@@ -347,7 +351,6 @@ static struct hw_pci versatile_pci __initdata = {
347 .setup = pci_versatile_setup, 351 .setup = pci_versatile_setup,
348 .scan = pci_versatile_scan_bus, 352 .scan = pci_versatile_scan_bus,
349 .preinit = pci_versatile_preinit, 353 .preinit = pci_versatile_preinit,
350 .postinit = pci_versatile_postinit,
351}; 354};
352 355
353static int __init versatile_pci_init(void) 356static int __init versatile_pci_init(void)
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index dbd346033122..8a7f65ba14b7 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -20,7 +20,7 @@
20 */ 20 */
21 .align 5 21 .align 5
22ENTRY(v6_early_abort) 22ENTRY(v6_early_abort)
23#ifdef CONFIG_CPU_MPCORE 23#ifdef CONFIG_CPU_32v6K
24 clrex 24 clrex
25#else 25#else
26 strex r0, r1, [sp] @ Clear the exclusive monitor 26 strex r0, r1, [sp] @ Clear the exclusive monitor
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index d0f9bb5e9023..8ab5300dcb94 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon Jan 9 12:56:42 2006 15# Last update: Mon Feb 20 10:18:02 2006
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -904,7 +904,7 @@ wg302v2 MACH_WG302V2 WG302V2 890
904eb42x MACH_EB42X EB42X 891 904eb42x MACH_EB42X EB42X 891
905iq331es MACH_IQ331ES IQ331ES 892 905iq331es MACH_IQ331ES IQ331ES 892
906cosydsp MACH_COSYDSP COSYDSP 893 906cosydsp MACH_COSYDSP COSYDSP 893
907uplat7d MACH_UPLAT7D UPLAT7D 894 907uplat7d_proto MACH_UPLAT7D UPLAT7D 894
908ptdavinci MACH_PTDAVINCI PTDAVINCI 895 908ptdavinci MACH_PTDAVINCI PTDAVINCI 895
909mbus MACH_MBUS MBUS 896 909mbus MACH_MBUS MBUS 896
910nadia2vb MACH_NADIA2VB NADIA2VB 897 910nadia2vb MACH_NADIA2VB NADIA2VB 897
@@ -938,3 +938,34 @@ auckland MACH_AUCKLAND AUCKLAND 924
938ak3220m MACH_AK3320M AK3320M 925 938ak3220m MACH_AK3320M AK3320M 925
939duramax MACH_DURAMAX DURAMAX 926 939duramax MACH_DURAMAX DURAMAX 926
940n35 MACH_N35 N35 927 940n35 MACH_N35 N35 927
941pronghorn MACH_PRONGHORN PRONGHORN 928
942fundy MACH_FUNDY FUNDY 929
943logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930
944cpu777 MACH_CPU777 CPU777 931
945simicon9201 MACH_SIMICON9201 SIMICON9201 932
946leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933
947cm922txa10 MACH_CM922TXA10 CM922TXA10 934
948sandgate MACH_PXA PXA 935
949sandgate2 MACH_SANDGATE2 SANDGATE2 936
950sandgate2g MACH_SANDGATE2G SANDGATE2G 937
951sandgate2p MACH_SANDGATE2P SANDGATE2P 938
952fred_jack MACH_FRED_JACK FRED_JACK 939
953ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940
954nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941
955netdcu8 MACH_NETDCU8 NETDCU8 942
956ml675050_cpu_boa MACH_ML675050_CPU_BOA ML675050_CPU_BOA 943
957ng_fvx538 MACH_NG_FVX538 NG_FVX538 944
958ng_fvs338 MACH_NG_FVS338 NG_FVS338 945
959pnx4103 MACH_PNX4103 PNX4103 946
960hesdb MACH_HESDB HESDB 947
961xsilo MACH_XSILO XSILO 948
962espresso MACH_ESPRESSO ESPRESSO 949
963emlc MACH_EMLC EMLC 950
964sisteron MACH_SISTERON SISTERON 951
965rx1950 MACH_RX1950 RX1950 952
966tsc_venus MACH_TSC_VENUS TSC_VENUS 953
967ds101j MACH_DS101J DS101J 954
968mxc300_30ads MACH_MXC30030ADS MXC30030ADS 955
969fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956
970dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957
971gesbc9312 MACH_GESBC9312 GESBC9312 958
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index 55034d08abff..e0e9bcb015a9 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -34,7 +34,7 @@ config GDB_DEBUG
34 help 34 help
35 gdb stub exception support 35 gdb stub exception support
36 36
37config CONFIG_SH_STANDARD_BIOS 37config SH_STANDARD_BIOS
38 bool "Use gdb protocol serial console" 38 bool "Use gdb protocol serial console"
39 depends on (!H8300H_SIM && !H8S_SIM) 39 depends on (!H8300H_SIM && !H8S_SIM)
40 help 40 help
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
index 9d9b491cfc2c..8f1ec3297150 100644
--- a/arch/h8300/defconfig
+++ b/arch/h8300/defconfig
@@ -328,7 +328,7 @@ CONFIG_FULLDEBUG=y
328CONFIG_NO_KERNEL_MSG=y 328CONFIG_NO_KERNEL_MSG=y
329# CONFIG_SYSCALL_PRINT is not set 329# CONFIG_SYSCALL_PRINT is not set
330# CONFIG_GDB_DEBUG is not set 330# CONFIG_GDB_DEBUG is not set
331# CONFIG_CONFIG_SH_STANDARD_BIOS is not set 331# CONFIG_SH_STANDARD_BIOS is not set
332# CONFIG_DEFAULT_CMDLINE is not set 332# CONFIG_DEFAULT_CMDLINE is not set
333# CONFIG_BLKDEV_RESERVE is not set 333# CONFIG_BLKDEV_RESERVE is not set
334 334
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 0102f3d50e57..e7609abf3796 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -710,7 +710,7 @@ void __init get_smp_config (void)
710 * Read the physical hardware table. Anything here will 710 * Read the physical hardware table. Anything here will
711 * override the defaults. 711 * override the defaults.
712 */ 712 */
713 if (!smp_read_mpc((void *)mpf->mpf_physptr)) { 713 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
714 smp_found_config = 0; 714 smp_found_config = 0;
715 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); 715 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
716 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); 716 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index fe55b28d3725..670cb49210af 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -29,28 +29,7 @@
29 29
30/* 30/*
31 * sys_tas() - test-and-set 31 * sys_tas() - test-and-set
32 * linuxthreads testing version
33 */ 32 */
34#ifndef CONFIG_SMP
35asmlinkage int sys_tas(int *addr)
36{
37 int oldval;
38 unsigned long flags;
39
40 if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
41 return -EFAULT;
42 local_irq_save(flags);
43 oldval = *addr;
44 if (!oldval)
45 *addr = 1;
46 local_irq_restore(flags);
47 return oldval;
48}
49#else /* CONFIG_SMP */
50#include <linux/spinlock.h>
51
52static DEFINE_SPINLOCK(tas_lock);
53
54asmlinkage int sys_tas(int *addr) 33asmlinkage int sys_tas(int *addr)
55{ 34{
56 int oldval; 35 int oldval;
@@ -58,15 +37,43 @@ asmlinkage int sys_tas(int *addr)
58 if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) 37 if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
59 return -EFAULT; 38 return -EFAULT;
60 39
61 _raw_spin_lock(&tas_lock); 40 /* atomic operation:
62 oldval = *addr; 41 * oldval = *addr; *addr = 1;
63 if (!oldval) 42 */
64 *addr = 1; 43 __asm__ __volatile__ (
65 _raw_spin_unlock(&tas_lock); 44 DCACHE_CLEAR("%0", "r4", "%1")
45 " .fillinsn\n"
46 "1:\n"
47 " lock %0, @%1 -> unlock %2, @%1\n"
48 "2:\n"
49 /* NOTE:
50 * The m32r processor can accept interrupts only
51 * at the 32-bit instruction boundary.
52 * So, in the above code, the "unlock" instruction
53 * can be executed continuously after the "lock"
54 * instruction execution without any interruptions.
55 */
56 ".section .fixup,\"ax\"\n"
57 " .balign 4\n"
58 "3: ldi %0, #%3\n"
59 " seth r14, #high(2b)\n"
60 " or3 r14, r14, #low(2b)\n"
61 " jmp r14\n"
62 ".previous\n"
63 ".section __ex_table,\"a\"\n"
64 " .balign 4\n"
65 " .long 1b,3b\n"
66 ".previous\n"
67 : "=&r" (oldval)
68 : "r" (addr), "r" (1), "i"(-EFAULT)
69 : "r14", "memory"
70#ifdef CONFIG_CHIP_M32700_TS1
71 , "r4"
72#endif /* CONFIG_CHIP_M32700_TS1 */
73 );
66 74
67 return oldval; 75 return oldval;
68} 76}
69#endif /* CONFIG_SMP */
70 77
71/* 78/*
72 * sys_pipe() is the normal C calling standard for creating 79 * sys_pipe() is the normal C calling standard for creating
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 3f9cb55d0356..2d8ad0727b6b 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -129,6 +129,9 @@ void machine_power_off(void)
129 for (;;); 129 for (;;);
130} 130}
131 131
132void (*pm_power_off)(void) = machine_power_off;
133EXPORT_SYMBOL(pm_power_off);
134
132void show_regs(struct pt_regs * regs) 135void show_regs(struct pt_regs * regs)
133{ 136{
134 printk("\n"); 137 printk("\n");
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index e17d3adff021..58c22cd344d3 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.15-rc2 3# Linux kernel version: 2.6.16-rc4
4# Thu Nov 24 01:06:21 2005 4# Tue Feb 21 13:44:31 2006
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -144,7 +144,6 @@ CONFIG_PREEMPT_BKL=y
144# Code maturity level options 144# Code maturity level options
145# 145#
146CONFIG_EXPERIMENTAL=y 146CONFIG_EXPERIMENTAL=y
147CONFIG_CLEAN_COMPILE=y
148CONFIG_LOCK_KERNEL=y 147CONFIG_LOCK_KERNEL=y
149CONFIG_INIT_ENV_ARG_LIMIT=32 148CONFIG_INIT_ENV_ARG_LIMIT=32
150 149
@@ -250,6 +249,7 @@ CONFIG_NET=y
250# 249#
251# Networking options 250# Networking options
252# 251#
252# CONFIG_NETDEBUG is not set
253CONFIG_PACKET=y 253CONFIG_PACKET=y
254CONFIG_PACKET_MMAP=y 254CONFIG_PACKET_MMAP=y
255CONFIG_UNIX=y 255CONFIG_UNIX=y
@@ -289,6 +289,7 @@ CONFIG_TCP_CONG_BIC=y
289# SCTP Configuration (EXPERIMENTAL) 289# SCTP Configuration (EXPERIMENTAL)
290# 290#
291# CONFIG_IP_SCTP is not set 291# CONFIG_IP_SCTP is not set
292
292# CONFIG_ATM is not set 293# CONFIG_ATM is not set
293# CONFIG_BRIDGE is not set 294# CONFIG_BRIDGE is not set
294# CONFIG_VLAN_8021Q is not set 295# CONFIG_VLAN_8021Q is not set
@@ -448,7 +449,7 @@ CONFIG_SCSI_SAS_ATTRS=m
448# 449#
449# SCSI low-level drivers 450# SCSI low-level drivers
450# 451#
451CONFIG_ISCSI_TCP=m 452# CONFIG_ISCSI_TCP is not set
452# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 453# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
453# CONFIG_SCSI_3W_9XXX is not set 454# CONFIG_SCSI_3W_9XXX is not set
454# CONFIG_SCSI_ACARD is not set 455# CONFIG_SCSI_ACARD is not set
@@ -774,6 +775,10 @@ CONFIG_USB_ARCH_HAS_OHCI=y
774# 775#
775 776
776# 777#
778# EDAC - error detection and reporting (RAS)
779#
780
781#
777# File systems 782# File systems
778# 783#
779CONFIG_EXT2_FS=y 784CONFIG_EXT2_FS=y
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index d8e2674a1543..4a9f1ecefaf2 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -103,8 +103,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
103 * one divide. 103 * one divide.
104 */ 104 */
105 u64 nsec = (u64)jiffies * TICK_NSEC; 105 u64 nsec = (u64)jiffies * TICK_NSEC;
106 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec); 106 long rem;
107 value->tv_usec /= NSEC_PER_USEC; 107 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
108 value->tv_usec = rem / NSEC_PER_USEC;
108} 109}
109 110
110#define ELF_CORE_EFLAGS EF_MIPS_ABI2 111#define ELF_CORE_EFLAGS EF_MIPS_ABI2
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index cec5f327e360..e31813779895 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -105,8 +105,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
105 * one divide. 105 * one divide.
106 */ 106 */
107 u64 nsec = (u64)jiffies * TICK_NSEC; 107 u64 nsec = (u64)jiffies * TICK_NSEC;
108 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec); 108 long rem;
109 value->tv_usec /= NSEC_PER_USEC; 109 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
110 value->tv_usec = rem / NSEC_PER_USEC;
110} 111}
111 112
112#undef ELF_CORE_COPY_REGS 113#undef ELF_CORE_COPY_REGS
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 60353f5acc48..5f68b220c26d 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -230,6 +230,9 @@ sysn32_waitid(int which, compat_pid_t pid,
230 long ret; 230 long ret;
231 mm_segment_t old_fs = get_fs(); 231 mm_segment_t old_fs = get_fs();
232 232
233 if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo)))
234 return -EFAULT;
235
233 set_fs (KERNEL_DS); 236 set_fs (KERNEL_DS);
234 ret = sys_waitid(which, pid, uinfo, options, 237 ret = sys_waitid(which, pid, uinfo, options,
235 uru ? (struct rusage __user *) &ru : NULL); 238 uru ? (struct rusage __user *) &ru : NULL);
@@ -1450,25 +1453,6 @@ sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *ti
1450 return sys_timer_create(clock, p, timer_id); 1453 return sys_timer_create(clock, p, timer_id);
1451} 1454}
1452 1455
1453asmlinkage long
1454sysn32_rt_sigtimedwait(const sigset_t __user *uthese,
1455 siginfo_t __user *uinfo,
1456 const struct compat_timespec __user *uts32,
1457 size_t sigsetsize)
1458{
1459 struct timespec __user *uts = NULL;
1460
1461 if (uts32) {
1462 struct timespec ts;
1463 uts = compat_alloc_user_space(sizeof(struct timespec));
1464 if (get_user(ts.tv_sec, &uts32->tv_sec) ||
1465 get_user(ts.tv_nsec, &uts32->tv_nsec) ||
1466 copy_to_user (uts, &ts, sizeof (ts)))
1467 return -EFAULT;
1468 }
1469 return sys_rt_sigtimedwait(uthese, uinfo, uts, sigsetsize);
1470}
1471
1472save_static_function(sys32_clone); 1456save_static_function(sys32_clone);
1473__attribute_used__ noinline static int 1457__attribute_used__ noinline static int
1474_sys32_clone(nabi_no_regargs struct pt_regs regs) 1458_sys32_clone(nabi_no_regargs struct pt_regs regs)
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index bc4980cefc8b..d87b5446fa13 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -245,9 +245,9 @@ EXPORT(sysn32_call_table)
245 PTR sys_capget 245 PTR sys_capget
246 PTR sys_capset 246 PTR sys_capset
247 PTR sys32_rt_sigpending /* 6125 */ 247 PTR sys32_rt_sigpending /* 6125 */
248 PTR sysn32_rt_sigtimedwait 248 PTR compat_sys_rt_sigtimedwait
249 PTR sys_rt_sigqueueinfo 249 PTR sys_rt_sigqueueinfo
250 PTR sys32_rt_sigsuspend 250 PTR sysn32_rt_sigsuspend
251 PTR sys32_sigaltstack 251 PTR sys32_sigaltstack
252 PTR compat_sys_utime /* 6130 */ 252 PTR compat_sys_utime /* 6130 */
253 PTR sys_mknod 253 PTR sys_mknod
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 8a8b8dd90417..237cd8a2cd32 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10#include <linux/cache.h> 10#include <linux/cache.h>
@@ -106,8 +106,6 @@ typedef struct compat_siginfo {
106 106
107#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 107#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
108 108
109extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
110
111/* 32-bit compatibility types */ 109/* 32-bit compatibility types */
112 110
113#define _NSIG_BPW32 32 111#define _NSIG_BPW32 32
@@ -198,7 +196,7 @@ __attribute_used__ noinline static int
198_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) 196_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
199{ 197{
200 compat_sigset_t *uset; 198 compat_sigset_t *uset;
201 sigset_t newset, saveset; 199 sigset_t newset;
202 200
203 uset = (compat_sigset_t *) regs.regs[4]; 201 uset = (compat_sigset_t *) regs.regs[4];
204 if (get_sigset(&newset, uset)) 202 if (get_sigset(&newset, uset))
@@ -206,19 +204,15 @@ _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
206 sigdelsetmask(&newset, ~_BLOCKABLE); 204 sigdelsetmask(&newset, ~_BLOCKABLE);
207 205
208 spin_lock_irq(&current->sighand->siglock); 206 spin_lock_irq(&current->sighand->siglock);
209 saveset = current->blocked; 207 current->saved_sigmask = current->blocked;
210 current->blocked = newset; 208 current->blocked = newset;
211 recalc_sigpending(); 209 recalc_sigpending();
212 spin_unlock_irq(&current->sighand->siglock); 210 spin_unlock_irq(&current->sighand->siglock);
213 211
214 regs.regs[2] = EINTR; 212 current->state = TASK_INTERRUPTIBLE;
215 regs.regs[7] = 1; 213 schedule();
216 while (1) { 214 set_thread_flag(TIF_RESTORE_SIGMASK);
217 current->state = TASK_INTERRUPTIBLE; 215 return -ERESTARTNOHAND;
218 schedule();
219 if (do_signal32(&saveset, &regs))
220 return -EINTR;
221 }
222} 216}
223 217
224save_static_function(sys32_rt_sigsuspend); 218save_static_function(sys32_rt_sigsuspend);
@@ -226,8 +220,8 @@ __attribute_used__ noinline static int
226_sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 220_sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
227{ 221{
228 compat_sigset_t *uset; 222 compat_sigset_t *uset;
229 sigset_t newset, saveset; 223 sigset_t newset;
230 size_t sigsetsize; 224 size_t sigsetsize;
231 225
232 /* XXX Don't preclude handling different sized sigset_t's. */ 226 /* XXX Don't preclude handling different sized sigset_t's. */
233 sigsetsize = regs.regs[5]; 227 sigsetsize = regs.regs[5];
@@ -240,19 +234,15 @@ _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
240 sigdelsetmask(&newset, ~_BLOCKABLE); 234 sigdelsetmask(&newset, ~_BLOCKABLE);
241 235
242 spin_lock_irq(&current->sighand->siglock); 236 spin_lock_irq(&current->sighand->siglock);
243 saveset = current->blocked; 237 current->saved_sigmask = current->blocked;
244 current->blocked = newset; 238 current->blocked = newset;
245 recalc_sigpending(); 239 recalc_sigpending();
246 spin_unlock_irq(&current->sighand->siglock); 240 spin_unlock_irq(&current->sighand->siglock);
247 241
248 regs.regs[2] = EINTR; 242 current->state = TASK_INTERRUPTIBLE;
249 regs.regs[7] = 1; 243 schedule();
250 while (1) { 244 set_thread_flag(TIF_RESTORE_SIGMASK);
251 current->state = TASK_INTERRUPTIBLE; 245 return -ERESTARTNOHAND;
252 schedule();
253 if (do_signal32(&saveset, &regs))
254 return -EINTR;
255 }
256} 246}
257 247
258asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act, 248asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act,
@@ -783,7 +773,7 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
783 regs->regs[2] = EINTR; 773 regs->regs[2] = EINTR;
784 break; 774 break;
785 case ERESTARTSYS: 775 case ERESTARTSYS:
786 if(!(ka->sa.sa_flags & SA_RESTART)) { 776 if (!(ka->sa.sa_flags & SA_RESTART)) {
787 regs->regs[2] = EINTR; 777 regs->regs[2] = EINTR;
788 break; 778 break;
789 } 779 }
@@ -810,9 +800,10 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
810 return ret; 800 return ret;
811} 801}
812 802
813int do_signal32(sigset_t *oldset, struct pt_regs *regs) 803void do_signal32(struct pt_regs *regs)
814{ 804{
815 struct k_sigaction ka; 805 struct k_sigaction ka;
806 sigset_t *oldset;
816 siginfo_t info; 807 siginfo_t info;
817 int signr; 808 int signr;
818 809
@@ -822,17 +813,30 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
822 * if so. 813 * if so.
823 */ 814 */
824 if (!user_mode(regs)) 815 if (!user_mode(regs))
825 return 1; 816 return;
826 817
827 if (try_to_freeze()) 818 if (try_to_freeze())
828 goto no_signal; 819 goto no_signal;
829 820
830 if (!oldset) 821 if (test_thread_flag(TIF_RESTORE_SIGMASK))
822 oldset = &current->saved_sigmask;
823 else
831 oldset = &current->blocked; 824 oldset = &current->blocked;
832 825
833 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 826 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
834 if (signr > 0) 827 if (signr > 0) {
835 return handle_signal(signr, &info, &ka, oldset, regs); 828 /* Whee! Actually deliver the signal. */
829 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
830 /*
831 * A signal was successfully delivered; the saved
832 * sigmask will have been stored in the signal frame,
833 * and will be restored by sigreturn, so we can simply
834 * clear the TIF_RESTORE_SIGMASK flag.
835 */
836 if (test_thread_flag(TIF_RESTORE_SIGMASK))
837 clear_thread_flag(TIF_RESTORE_SIGMASK);
838 }
839 }
836 840
837no_signal: 841no_signal:
838 /* 842 /*
@@ -853,7 +857,15 @@ no_signal:
853 regs->cp0_epc -= 4; 857 regs->cp0_epc -= 4;
854 } 858 }
855 } 859 }
856 return 0; 860
861 /*
862 * If there's no signal to deliver, we just put the saved sigmask
863 * back
864 */
865 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
866 clear_thread_flag(TIF_RESTORE_SIGMASK);
867 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
868 }
857} 869}
858 870
859asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act, 871asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act,
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 5a3776096f07..3e168c08a3a8 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -81,6 +81,39 @@ struct rt_sigframe_n32 {
81#endif 81#endif
82}; 82};
83 83
84extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat);
85
86save_static_function(sysn32_rt_sigsuspend);
87__attribute_used__ noinline static int
88_sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
89{
90 compat_sigset_t __user *unewset, uset;
91 size_t sigsetsize;
92 sigset_t newset;
93
94 /* XXX Don't preclude handling different sized sigset_t's. */
95 sigsetsize = regs.regs[5];
96 if (sigsetsize != sizeof(sigset_t))
97 return -EINVAL;
98
99 unewset = (compat_sigset_t __user *) regs.regs[4];
100 if (copy_from_user(&uset, unewset, sizeof(uset)))
101 return -EFAULT;
102 sigset_from_compat (&newset, &uset);
103 sigdelsetmask(&newset, ~_BLOCKABLE);
104
105 spin_lock_irq(&current->sighand->siglock);
106 current->saved_sigmask = current->blocked;
107 current->blocked = newset;
108 recalc_sigpending();
109 spin_unlock_irq(&current->sighand->siglock);
110
111 current->state = TASK_INTERRUPTIBLE;
112 schedule();
113 set_thread_flag(TIF_RESTORE_SIGMASK);
114 return -ERESTARTNOHAND;
115}
116
84save_static_function(sysn32_rt_sigreturn); 117save_static_function(sysn32_rt_sigreturn);
85__attribute_used__ noinline static void 118__attribute_used__ noinline static void
86_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 119_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 25472fcaf715..5e189862e523 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -29,6 +29,7 @@
29#include <linux/timex.h> 29#include <linux/timex.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/cpumask.h> 31#include <linux/cpumask.h>
32#include <linux/cpu.h>
32 33
33#include <asm/atomic.h> 34#include <asm/atomic.h>
34#include <asm/cpu.h> 35#include <asm/cpu.h>
@@ -424,6 +425,25 @@ void flush_tlb_one(unsigned long vaddr)
424 local_flush_tlb_one(vaddr); 425 local_flush_tlb_one(vaddr);
425} 426}
426 427
428static DEFINE_PER_CPU(struct cpu, cpu_devices);
429
430static int __init topology_init(void)
431{
432 int cpu;
433 int ret;
434
435 for_each_cpu(cpu) {
436 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
437 if (ret)
438 printk(KERN_WARNING "topology_init: register_cpu %d "
439 "failed (%d)\n", cpu, ret);
440 }
441
442 return 0;
443}
444
445subsys_initcall(topology_init);
446
427EXPORT_SYMBOL(flush_tlb_page); 447EXPORT_SYMBOL(flush_tlb_page);
428EXPORT_SYMBOL(flush_tlb_one); 448EXPORT_SYMBOL(flush_tlb_one);
429EXPORT_SYMBOL(cpu_data); 449EXPORT_SYMBOL(cpu_data);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c9d2b5147ca3..005debbfbe84 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine 7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson 8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
@@ -548,6 +548,8 @@ asmlinkage void do_ov(struct pt_regs *regs)
548{ 548{
549 siginfo_t info; 549 siginfo_t info;
550 550
551 die_if_kernel("Integer overflow", regs);
552
551 info.si_code = FPE_INTOVF; 553 info.si_code = FPE_INTOVF;
552 info.si_signo = SIGFPE; 554 info.si_signo = SIGFPE;
553 info.si_errno = 0; 555 info.si_errno = 0;
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 0e71580774ff..e54a62f2807c 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -64,7 +64,7 @@ LEAF(except_vec2_sb1)
64 sd k0,0x170($0) 64 sd k0,0x170($0)
65 sd k1,0x178($0) 65 sd k1,0x178($0)
66 66
67#if CONFIG_SB1_CEX_ALWAYS_FATAL 67#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
68 j handle_vec2_sb1 68 j handle_vec2_sb1
69 nop 69 nop
70#else 70#else
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index f17f575f58f0..7f8fda962190 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -94,7 +94,7 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
94void prom_boot_secondary(int cpu, struct task_struct *idle) 94void prom_boot_secondary(int cpu, struct task_struct *idle)
95{ 95{
96 unsigned long gp = (unsigned long) task_thread_info(idle); 96 unsigned long gp = (unsigned long) task_thread_info(idle);
97 unsigned long sp = __KSTK_TOP(idle); 97 unsigned long sp = __KSTK_TOS(idle);
98 98
99 secondary_sp = sp; 99 secondary_sp = sp;
100 secondary_gp = gp; 100 secondary_gp = gp;
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index de46f62ac462..816aee7fcd25 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -102,11 +102,11 @@ config SIMULATION
102 Build a kernel suitable for running under the GDB simulator. 102 Build a kernel suitable for running under the GDB simulator.
103 Primarily adjusts the kernel's notion of time. 103 Primarily adjusts the kernel's notion of time.
104 104
105config CONFIG_SB1_CEX_ALWAYS_FATAL 105config SB1_CEX_ALWAYS_FATAL
106 bool "All cache exceptions considered fatal (no recovery attempted)" 106 bool "All cache exceptions considered fatal (no recovery attempted)"
107 depends on SIBYTE_SB1xxx_SOC 107 depends on SIBYTE_SB1xxx_SOC
108 108
109config CONFIG_SB1_CERR_STALL 109config SB1_CERR_STALL
110 bool "Stall (rather than panic) on fatal cache error" 110 bool "Stall (rather than panic) on fatal cache error"
111 depends on SIBYTE_SB1xxx_SOC 111 depends on SIBYTE_SB1xxx_SOC
112 112
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index b2a1ba5d23df..9cf7d713b13c 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -139,7 +139,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
139#ifdef CONFIG_SMP 139#ifdef CONFIG_SMP
140static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) 140static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
141{ 141{
142 int i = 0, old_cpu, cpu, int_on; 142 int i = 0, old_cpu, cpu, int_on, k;
143 u64 cur_ints; 143 u64 cur_ints;
144 irq_desc_t *desc = irq_desc + irq; 144 irq_desc_t *desc = irq_desc + irq;
145 unsigned long flags; 145 unsigned long flags;
@@ -165,7 +165,6 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
165 irq_dirty -= BCM1480_NR_IRQS_HALF; 165 irq_dirty -= BCM1480_NR_IRQS_HALF;
166 } 166 }
167 167
168 int k;
169 for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */ 168 for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
170 cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); 169 cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
171 int_on = !(cur_ints & (((u64) 1) << irq_dirty)); 170 int_on = !(cur_ints & (((u64) 1) << irq_dirty));
@@ -216,6 +215,7 @@ static void ack_bcm1480_irq(unsigned int irq)
216{ 215{
217 u64 pending; 216 u64 pending;
218 unsigned int irq_dirty; 217 unsigned int irq_dirty;
218 int k;
219 219
220 /* 220 /*
221 * If the interrupt was an HT interrupt, now is the time to 221 * If the interrupt was an HT interrupt, now is the time to
@@ -227,7 +227,6 @@ static void ack_bcm1480_irq(unsigned int irq)
227 if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) { 227 if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
228 irq_dirty -= BCM1480_NR_IRQS_HALF; 228 irq_dirty -= BCM1480_NR_IRQS_HALF;
229 } 229 }
230 int k;
231 for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */ 230 for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
232 pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq], 231 pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
233 R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING)))); 232 R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 415659629394..2b03a09fe5e9 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -157,8 +157,7 @@ _GLOBAL(__secondary_hold)
157 SET_REG_IMMEDIATE(r4, .hmt_init) 157 SET_REG_IMMEDIATE(r4, .hmt_init)
158 mtctr r4 158 mtctr r4
159 bctr 159 bctr
160#else 160#elif defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
161#ifdef CONFIG_SMP
162 LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) 161 LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
163 mtctr r4 162 mtctr r4
164 mr r3,r24 163 mr r3,r24
@@ -166,7 +165,6 @@ _GLOBAL(__secondary_hold)
166#else 165#else
167 BUG_OPCODE 166 BUG_OPCODE
168#endif 167#endif
169#endif
170 168
171/* This value is used to mark exception frames on the stack. */ 169/* This value is used to mark exception frames on the stack. */
172 .section ".toc","aw" 170 .section ".toc","aw"
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1ae96a8ed7e2..e789fef4eb8a 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
341 const char *system_id = ""; 341 const char *system_id = "";
342 unsigned int *lp_index_ptr, lp_index = 0; 342 unsigned int *lp_index_ptr, lp_index = 0;
343 struct device_node *rtas_node; 343 struct device_node *rtas_node;
344 int *lrdrp; 344 int *lrdrp = NULL;
345 345
346 rootdn = find_path_device("/"); 346 rootdn = find_path_device("/");
347 if (rootdn) { 347 if (rootdn) {
@@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v)
362 seq_printf(m, "partition_id=%d\n", (int)lp_index); 362 seq_printf(m, "partition_id=%d\n", (int)lp_index);
363 363
364 rtas_node = find_path_device("/rtas"); 364 rtas_node = find_path_device("/rtas");
365 lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); 365 if (rtas_node)
366 lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity",
367 NULL);
366 368
367 if (lrdrp == NULL) { 369 if (lrdrp == NULL) {
368 partition_potential_processors = vdso_data->processorCount; 370 partition_potential_processors = vdso_data->processorCount;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d9a459c144d8..8a731ea877b7 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn);
79EXPORT_SYMBOL(strcpy); 79EXPORT_SYMBOL(strcpy);
80EXPORT_SYMBOL(strncpy); 80EXPORT_SYMBOL(strncpy);
81EXPORT_SYMBOL(strcat); 81EXPORT_SYMBOL(strcat);
82EXPORT_SYMBOL(strncat);
83EXPORT_SYMBOL(strchr);
84EXPORT_SYMBOL(strrchr);
85EXPORT_SYMBOL(strpbrk);
86EXPORT_SYMBOL(strstr);
87EXPORT_SYMBOL(strlen); 82EXPORT_SYMBOL(strlen);
88EXPORT_SYMBOL(strnlen);
89EXPORT_SYMBOL(strcmp); 83EXPORT_SYMBOL(strcmp);
90EXPORT_SYMBOL(strncmp);
91EXPORT_SYMBOL(strcasecmp); 84EXPORT_SYMBOL(strcasecmp);
92 85
93EXPORT_SYMBOL(csum_partial); 86EXPORT_SYMBOL(csum_partial);
@@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
185EXPORT_SYMBOL(cuda_request); 178EXPORT_SYMBOL(cuda_request);
186EXPORT_SYMBOL(cuda_poll); 179EXPORT_SYMBOL(cuda_poll);
187#endif /* CONFIG_ADB_CUDA */ 180#endif /* CONFIG_ADB_CUDA */
188#ifdef CONFIG_PPC_PMAC
189EXPORT_SYMBOL(sys_ctrler);
190#endif
191#ifdef CONFIG_VT 181#ifdef CONFIG_VT
192EXPORT_SYMBOL(kd_mksound); 182EXPORT_SYMBOL(kd_mksound);
193#endif 183#endif
@@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3);
205EXPORT_SYMBOL(memcpy); 195EXPORT_SYMBOL(memcpy);
206EXPORT_SYMBOL(memset); 196EXPORT_SYMBOL(memset);
207EXPORT_SYMBOL(memmove); 197EXPORT_SYMBOL(memmove);
208EXPORT_SYMBOL(memscan);
209EXPORT_SYMBOL(memcmp); 198EXPORT_SYMBOL(memcmp);
210EXPORT_SYMBOL(memchr); 199EXPORT_SYMBOL(memchr);
211 200
@@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info);
214#endif 203#endif
215 204
216#ifdef CONFIG_PPC32 205#ifdef CONFIG_PPC32
217EXPORT_SYMBOL(__delay);
218EXPORT_SYMBOL(timer_interrupt); 206EXPORT_SYMBOL(timer_interrupt);
219EXPORT_SYMBOL(irq_desc); 207EXPORT_SYMBOL(irq_desc);
220EXPORT_SYMBOL(tb_ticks_per_jiffy); 208EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers);
222EXPORT_SYMBOL(cacheable_memcpy); 210EXPORT_SYMBOL(cacheable_memcpy);
223#endif 211#endif
224 212
225EXPORT_SYMBOL(__up);
226EXPORT_SYMBOL(__down);
227EXPORT_SYMBOL(__down_interruptible);
228
229#ifdef CONFIG_8xx 213#ifdef CONFIG_8xx
230EXPORT_SYMBOL(cpm_install_handler); 214EXPORT_SYMBOL(cpm_install_handler);
231EXPORT_SYMBOL(cpm_free_handler); 215EXPORT_SYMBOL(cpm_free_handler);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a717dff695ef..f96c49b03ba0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -311,8 +311,6 @@ void smp_release_cpus(void)
311 311
312 DBG(" <- smp_release_cpus()\n"); 312 DBG(" <- smp_release_cpus()\n");
313} 313}
314#else
315#define smp_release_cpus()
316#endif /* CONFIG_SMP || CONFIG_KEXEC */ 314#endif /* CONFIG_SMP || CONFIG_KEXEC */
317 315
318/* 316/*
@@ -473,10 +471,12 @@ void __init setup_system(void)
473 check_smt_enabled(); 471 check_smt_enabled();
474 smp_setup_cpu_maps(); 472 smp_setup_cpu_maps();
475 473
474#ifdef CONFIG_SMP
476 /* Release secondary cpus out of their spinloops at 0x60 now that 475 /* Release secondary cpus out of their spinloops at 0x60 now that
477 * we can map physical -> logical CPU ids 476 * we can map physical -> logical CPU ids
478 */ 477 */
479 smp_release_cpus(); 478 smp_release_cpus();
479#endif
480 480
481 printk("Starting Linux PPC64 %s\n", system_utsname.version); 481 printk("Starting Linux PPC64 %s\n", system_utsname.version);
482 482
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 475249dc2350..cd75ab2908fa 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -176,7 +176,6 @@ struct timex32 {
176}; 176};
177 177
178extern int do_adjtimex(struct timex *); 178extern int do_adjtimex(struct timex *);
179extern void ppc_adjtimex(void);
180 179
181asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) 180asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
182{ 181{
@@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
209 208
210 ret = do_adjtimex(&txc); 209 ret = do_adjtimex(&txc);
211 210
212 /* adjust the conversion of TB to time of day to track adjtimex */
213 ppc_adjtimex();
214
215 if(put_user(txc.modes, &utp->modes) || 211 if(put_user(txc.modes, &utp->modes) ||
216 __put_user(txc.offset, &utp->offset) || 212 __put_user(txc.offset, &utp->offset) ||
217 __put_user(txc.freq, &utp->freq) || 213 __put_user(txc.freq, &utp->freq) ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1886045a2fd8..2a7ddc579379 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -50,6 +50,7 @@
50#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h> 51#include <linux/percpu.h>
52#include <linux/rtc.h> 52#include <linux/rtc.h>
53#include <linux/jiffies.h>
53 54
54#include <asm/io.h> 55#include <asm/io.h>
55#include <asm/processor.h> 56#include <asm/processor.h>
@@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec);
99unsigned long tb_ticks_per_sec; 100unsigned long tb_ticks_per_sec;
100u64 tb_to_xs; 101u64 tb_to_xs;
101unsigned tb_to_us; 102unsigned tb_to_us;
102unsigned long processor_freq; 103
104#define TICKLEN_SCALE (SHIFT_SCALE - 10)
105u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106u64 ticklen_to_xs; /* 0.64 fraction */
107
108/* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
111
103DEFINE_SPINLOCK(rtc_lock); 112DEFINE_SPINLOCK(rtc_lock);
104EXPORT_SYMBOL_GPL(rtc_lock); 113EXPORT_SYMBOL_GPL(rtc_lock);
105 114
@@ -113,10 +122,6 @@ extern unsigned long wall_jiffies;
113extern struct timezone sys_tz; 122extern struct timezone sys_tz;
114static long timezone_offset; 123static long timezone_offset;
115 124
116void ppc_adjtimex(void);
117
118static unsigned adjusting_time = 0;
119
120unsigned long ppc_proc_freq; 125unsigned long ppc_proc_freq;
121unsigned long ppc_tb_freq; 126unsigned long ppc_tb_freq;
122 127
@@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void)
178 */ 183 */
179 if (ppc_md.set_rtc_time && ntp_synced() && 184 if (ppc_md.set_rtc_time && ntp_synced() &&
180 xtime.tv_sec - last_rtc_update >= 659 && 185 xtime.tv_sec - last_rtc_update >= 659 &&
181 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 186 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
182 jiffies - wall_jiffies == 1) {
183 struct rtc_time tm; 187 struct rtc_time tm;
184 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); 188 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
185 tm.tm_year -= 1900; 189 tm.tm_year -= 1900;
@@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv)
226 if (__USE_RTC()) { 230 if (__USE_RTC()) {
227 /* do this the old way */ 231 /* do this the old way */
228 unsigned long flags, seq; 232 unsigned long flags, seq;
229 unsigned int sec, nsec, usec, lost; 233 unsigned int sec, nsec, usec;
230 234
231 do { 235 do {
232 seq = read_seqbegin_irqsave(&xtime_lock, flags); 236 seq = read_seqbegin_irqsave(&xtime_lock, flags);
233 sec = xtime.tv_sec; 237 sec = xtime.tv_sec;
234 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); 238 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
235 lost = jiffies - wall_jiffies;
236 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 239 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
237 usec = nsec / 1000 + lost * (1000000 / HZ); 240 usec = nsec / 1000;
238 while (usec >= 1000000) { 241 while (usec >= 1000000) {
239 usec -= 1000000; 242 usec -= 1000000;
240 ++sec; 243 ++sec;
@@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv)
248 251
249EXPORT_SYMBOL(do_gettimeofday); 252EXPORT_SYMBOL(do_gettimeofday);
250 253
251/* Synchronize xtime with do_gettimeofday */
252
253static inline void timer_sync_xtime(unsigned long cur_tb)
254{
255#ifdef CONFIG_PPC64
256 /* why do we do this? */
257 struct timeval my_tv;
258
259 __do_gettimeofday(&my_tv, cur_tb);
260
261 if (xtime.tv_sec <= my_tv.tv_sec) {
262 xtime.tv_sec = my_tv.tv_sec;
263 xtime.tv_nsec = my_tv.tv_usec * 1000;
264 }
265#endif
266}
267
268/* 254/*
269 * There are two copies of tb_to_xs and stamp_xsec so that no 255 * There are two copies of tb_to_xs and stamp_xsec so that no
270 * lock is needed to access and use these values in 256 * lock is needed to access and use these values in
@@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
323{ 309{
324 unsigned long offset; 310 unsigned long offset;
325 u64 new_stamp_xsec; 311 u64 new_stamp_xsec;
312 u64 tlen, t2x;
326 313
327 if (__USE_RTC()) 314 if (__USE_RTC())
328 return; 315 return;
316 tlen = current_tick_length();
329 offset = cur_tb - do_gtod.varp->tb_orig_stamp; 317 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
330 if ((offset & 0x80000000u) == 0) 318 if (tlen == last_tick_len && offset < 0x80000000u) {
331 return; 319 /* check that we're still in sync; if not, resync */
332 new_stamp_xsec = do_gtod.varp->stamp_xsec 320 struct timeval tv;
333 + mulhdu(offset, do_gtod.varp->tb_to_xs); 321 __do_gettimeofday(&tv, cur_tb);
334 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); 322 if (tv.tv_sec <= xtime.tv_sec &&
323 (tv.tv_sec < xtime.tv_sec ||
324 tv.tv_usec * 1000 <= xtime.tv_nsec))
325 return;
326 }
327 if (tlen != last_tick_len) {
328 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
329 last_tick_len = tlen;
330 } else
331 t2x = do_gtod.varp->tb_to_xs;
332 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
333 do_div(new_stamp_xsec, 1000000000);
334 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
335 update_gtod(cur_tb, new_stamp_xsec, t2x);
335} 336}
336 337
337#ifdef CONFIG_SMP 338#ifdef CONFIG_SMP
@@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs)
462 write_seqlock(&xtime_lock); 463 write_seqlock(&xtime_lock);
463 tb_last_jiffy += tb_ticks_per_jiffy; 464 tb_last_jiffy += tb_ticks_per_jiffy;
464 tb_last_stamp = per_cpu(last_jiffy, cpu); 465 tb_last_stamp = per_cpu(last_jiffy, cpu);
465 timer_recalc_offset(tb_last_jiffy);
466 do_timer(regs); 466 do_timer(regs);
467 timer_sync_xtime(tb_last_jiffy); 467 timer_recalc_offset(tb_last_jiffy);
468 timer_check_rtc(); 468 timer_check_rtc();
469 write_sequnlock(&xtime_lock); 469 write_sequnlock(&xtime_lock);
470 if (adjusting_time && (time_adjust == 0))
471 ppc_adjtimex();
472 } 470 }
473 471
474 next_dec = tb_ticks_per_jiffy - ticks; 472 next_dec = tb_ticks_per_jiffy - ticks;
@@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs)
492 490
493void wakeup_decrementer(void) 491void wakeup_decrementer(void)
494{ 492{
495 int i; 493 unsigned long ticks;
496 494
497 set_dec(tb_ticks_per_jiffy);
498 /* 495 /*
499 * We don't expect this to be called on a machine with a 601, 496 * The timebase gets saved on sleep and restored on wakeup,
500 * so using get_tbl is fine. 497 * so all we need to do is to reset the decrementer.
501 */ 498 */
502 tb_last_stamp = tb_last_jiffy = get_tb(); 499 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
503 for_each_cpu(i) 500 if (ticks < tb_ticks_per_jiffy)
504 per_cpu(last_jiffy, i) = tb_last_stamp; 501 ticks = tb_ticks_per_jiffy - ticks;
502 else
503 ticks = 1;
504 set_dec(ticks);
505} 505}
506 506
507#ifdef CONFIG_SMP 507#ifdef CONFIG_SMP
@@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv)
541 time_t wtm_sec, new_sec = tv->tv_sec; 541 time_t wtm_sec, new_sec = tv->tv_sec;
542 long wtm_nsec, new_nsec = tv->tv_nsec; 542 long wtm_nsec, new_nsec = tv->tv_nsec;
543 unsigned long flags; 543 unsigned long flags;
544 long int tb_delta; 544 u64 new_xsec;
545 u64 new_xsec, tb_delta_xs; 545 unsigned long tb_delta;
546 546
547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
548 return -EINVAL; 548 return -EINVAL;
@@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv)
563 first_settimeofday = 0; 563 first_settimeofday = 0;
564 } 564 }
565#endif 565#endif
566
567 /*
568 * Subtract off the number of nanoseconds since the
569 * beginning of the last tick.
570 * Note that since we don't increment jiffies_64 anywhere other
571 * than in do_timer (since we don't have a lost tick problem),
572 * wall_jiffies will always be the same as jiffies,
573 * and therefore the (jiffies - wall_jiffies) computation
574 * has been removed.
575 */
566 tb_delta = tb_ticks_since(tb_last_stamp); 576 tb_delta = tb_ticks_since(tb_last_stamp);
567 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 577 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
568 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); 578 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
569 579
570 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 580 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
571 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 581 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv)
580 590
581 ntp_clear(); 591 ntp_clear();
582 592
583 new_xsec = 0; 593 new_xsec = xtime.tv_nsec;
584 if (new_nsec != 0) { 594 if (new_xsec != 0) {
585 new_xsec = (u64)new_nsec * XSEC_PER_SEC; 595 new_xsec *= XSEC_PER_SEC;
586 do_div(new_xsec, NSEC_PER_SEC); 596 do_div(new_xsec, NSEC_PER_SEC);
587 } 597 }
588 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; 598 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
589 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 599 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
590 600
591 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 601 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@@ -671,7 +681,7 @@ void __init time_init(void)
671 unsigned long flags; 681 unsigned long flags;
672 unsigned long tm = 0; 682 unsigned long tm = 0;
673 struct div_result res; 683 struct div_result res;
674 u64 scale; 684 u64 scale, x;
675 unsigned shift; 685 unsigned shift;
676 686
677 if (ppc_md.time_init != NULL) 687 if (ppc_md.time_init != NULL)
@@ -693,11 +703,36 @@ void __init time_init(void)
693 } 703 }
694 704
695 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 705 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
696 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; 706 tb_ticks_per_sec = ppc_tb_freq;
697 tb_ticks_per_usec = ppc_tb_freq / 1000000; 707 tb_ticks_per_usec = ppc_tb_freq / 1000000;
698 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 708 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
699 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); 709
700 tb_to_xs = res.result_low; 710 /*
711 * Calculate the length of each tick in ns. It will not be
712 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
713 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
714 * rounded up.
715 */
716 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
717 do_div(x, ppc_tb_freq);
718 tick_nsec = x;
719 last_tick_len = x << TICKLEN_SCALE;
720
721 /*
722 * Compute ticklen_to_xs, which is a factor which gets multiplied
723 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
724 * It is computed as:
725 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
726 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
727 * so as to give the result as a 0.64 fixed-point fraction.
728 */
729 div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
730 tb_ticks_per_jiffy, &res);
731 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
732 ticklen_to_xs = res.result_low;
733
734 /* Compute tb_to_xs from tick_nsec */
735 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
701 736
702 /* 737 /*
703 * Compute scale factor for sched_clock. 738 * Compute scale factor for sched_clock.
@@ -724,6 +759,14 @@ void __init time_init(void)
724 tm = get_boot_time(); 759 tm = get_boot_time();
725 760
726 write_seqlock_irqsave(&xtime_lock, flags); 761 write_seqlock_irqsave(&xtime_lock, flags);
762
763 /* If platform provided a timezone (pmac), we correct the time */
764 if (timezone_offset) {
765 sys_tz.tz_minuteswest = -timezone_offset / 60;
766 sys_tz.tz_dsttime = 0;
767 tm -= timezone_offset;
768 }
769
727 xtime.tv_sec = tm; 770 xtime.tv_sec = tm;
728 xtime.tv_nsec = 0; 771 xtime.tv_nsec = 0;
729 do_gtod.varp = &do_gtod.vars[0]; 772 do_gtod.varp = &do_gtod.vars[0];
@@ -738,18 +781,11 @@ void __init time_init(void)
738 vdso_data->tb_orig_stamp = tb_last_jiffy; 781 vdso_data->tb_orig_stamp = tb_last_jiffy;
739 vdso_data->tb_update_count = 0; 782 vdso_data->tb_update_count = 0;
740 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 783 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
741 vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 784 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
742 vdso_data->tb_to_xs = tb_to_xs; 785 vdso_data->tb_to_xs = tb_to_xs;
743 786
744 time_freq = 0; 787 time_freq = 0;
745 788
746 /* If platform provided a timezone (pmac), we correct the time */
747 if (timezone_offset) {
748 sys_tz.tz_minuteswest = -timezone_offset / 60;
749 sys_tz.tz_dsttime = 0;
750 xtime.tv_sec -= timezone_offset;
751 }
752
753 last_rtc_update = xtime.tv_sec; 789 last_rtc_update = xtime.tv_sec;
754 set_normalized_timespec(&wall_to_monotonic, 790 set_normalized_timespec(&wall_to_monotonic,
755 -xtime.tv_sec, -xtime.tv_nsec); 791 -xtime.tv_sec, -xtime.tv_nsec);
@@ -759,126 +795,6 @@ void __init time_init(void)
759 set_dec(tb_ticks_per_jiffy); 795 set_dec(tb_ticks_per_jiffy);
760} 796}
761 797
762/*
763 * After adjtimex is called, adjust the conversion of tb ticks
764 * to microseconds to keep do_gettimeofday synchronized
765 * with ntpd.
766 *
767 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
768 * adjust the frequency.
769 */
770
771/* #define DEBUG_PPC_ADJTIMEX 1 */
772
773void ppc_adjtimex(void)
774{
775#ifdef CONFIG_PPC64
776 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
777 new_tb_to_xs, new_xsec, new_stamp_xsec;
778 unsigned long tb_ticks_per_sec_delta;
779 long delta_freq, ltemp;
780 struct div_result divres;
781 unsigned long flags;
782 long singleshot_ppm = 0;
783
784 /*
785 * Compute parts per million frequency adjustment to
786 * accomplish the time adjustment implied by time_offset to be
787 * applied over the elapsed time indicated by time_constant.
788 * Use SHIFT_USEC to get it into the same units as
789 * time_freq.
790 */
791 if ( time_offset < 0 ) {
792 ltemp = -time_offset;
793 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
794 ltemp >>= SHIFT_KG + time_constant;
795 ltemp = -ltemp;
796 } else {
797 ltemp = time_offset;
798 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
799 ltemp >>= SHIFT_KG + time_constant;
800 }
801
802 /* If there is a single shot time adjustment in progress */
803 if ( time_adjust ) {
804#ifdef DEBUG_PPC_ADJTIMEX
805 printk("ppc_adjtimex: ");
806 if ( adjusting_time == 0 )
807 printk("starting ");
808 printk("single shot time_adjust = %ld\n", time_adjust);
809#endif
810
811 adjusting_time = 1;
812
813 /*
814 * Compute parts per million frequency adjustment
815 * to match time_adjust
816 */
817 singleshot_ppm = tickadj * HZ;
818 /*
819 * The adjustment should be tickadj*HZ to match the code in
820 * linux/kernel/timer.c, but experiments show that this is too
821 * large. 3/4 of tickadj*HZ seems about right
822 */
823 singleshot_ppm -= singleshot_ppm / 4;
824 /* Use SHIFT_USEC to get it into the same units as time_freq */
825 singleshot_ppm <<= SHIFT_USEC;
826 if ( time_adjust < 0 )
827 singleshot_ppm = -singleshot_ppm;
828 }
829 else {
830#ifdef DEBUG_PPC_ADJTIMEX
831 if ( adjusting_time )
832 printk("ppc_adjtimex: ending single shot time_adjust\n");
833#endif
834 adjusting_time = 0;
835 }
836
837 /* Add up all of the frequency adjustments */
838 delta_freq = time_freq + ltemp + singleshot_ppm;
839
840 /*
841 * Compute a new value for tb_ticks_per_sec based on
842 * the frequency adjustment
843 */
844 den = 1000000 * (1 << (SHIFT_USEC - 8));
845 if ( delta_freq < 0 ) {
846 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
847 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
848 }
849 else {
850 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
851 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
852 }
853
854#ifdef DEBUG_PPC_ADJTIMEX
855 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
856 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
857#endif
858
859 /*
860 * Compute a new value of tb_to_xs (used to convert tb to
861 * microseconds) and a new value of stamp_xsec which is the
862 * time (in 1/2^20 second units) corresponding to
863 * tb_orig_stamp. This new value of stamp_xsec compensates
864 * for the change in frequency (implied by the new tb_to_xs)
865 * which guarantees that the current time remains the same.
866 */
867 write_seqlock_irqsave( &xtime_lock, flags );
868 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
869 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
870 new_tb_to_xs = divres.result_low;
871 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
872
873 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
874 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
875
876 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
877
878 write_sequnlock_irqrestore( &xtime_lock, flags );
879#endif /* CONFIG_PPC64 */
880}
881
882 798
883#define FEBRUARY 2 799#define FEBRUARY 2
884#define STARTOFTIME 1970 800#define STARTOFTIME 1970
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 558dd0692092..34714d3ea69a 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1646,10 +1646,10 @@ static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1646 KL0_SCC_CELL_ENABLE); 1646 KL0_SCC_CELL_ENABLE);
1647 1647
1648 MACIO_BIC(KEYLARGO_FCR1, 1648 MACIO_BIC(KEYLARGO_FCR1,
1649 /*KL1_USB2_CELL_ENABLE |*/
1650 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | 1649 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1651 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | 1650 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1652 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE); 1651 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1652 KL1_EIDE0_ENABLE);
1653 if (pmac_mb.board_flags & PMAC_MB_MOBILE) 1653 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1654 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); 1654 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1655 1655
@@ -2183,7 +2183,7 @@ static struct pmac_mb_def pmac_mb_defs[] = {
2183 }, 2183 },
2184 { "PowerMac10,1", "Mac mini", 2184 { "PowerMac10,1", "Mac mini",
2185 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2185 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2186 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER, 2186 PMAC_MB_MAY_SLEEP,
2187 }, 2187 },
2188 { "iMac,1", "iMac (first generation)", 2188 { "iMac,1", "iMac (first generation)",
2189 PMAC_TYPE_ORIG_IMAC, paddington_features, 2189 PMAC_TYPE_ORIG_IMAC, paddington_features,
@@ -2295,11 +2295,11 @@ static struct pmac_mb_def pmac_mb_defs[] = {
2295 }, 2295 },
2296 { "PowerBook5,8", "PowerBook G4 15\"", 2296 { "PowerBook5,8", "PowerBook G4 15\"",
2297 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2297 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2298 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, 2298 PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
2299 }, 2299 },
2300 { "PowerBook5,9", "PowerBook G4 17\"", 2300 { "PowerBook5,9", "PowerBook G4 17\"",
2301 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2301 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2302 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, 2302 PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
2303 }, 2303 },
2304 { "PowerBook6,1", "PowerBook G4 12\"", 2304 { "PowerBook6,1", "PowerBook G4 12\"",
2305 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, 2305 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 8e6b1ed1396e..8d710af50756 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -292,7 +292,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
292 if (start_cpu == RTAS_UNKNOWN_SERVICE) 292 if (start_cpu == RTAS_UNKNOWN_SERVICE)
293 return 1; 293 return 1;
294 294
295 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); 295 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu);
296 if (status != 0) { 296 if (status != 0) {
297 printk(KERN_ERR "start-cpu failed: %i\n", status); 297 printk(KERN_ERR "start-cpu failed: %i\n", status);
298 return 0; 298 return 0;
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 15bd9b448a48..82adb4601348 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -93,15 +93,8 @@ EXPORT_SYMBOL(test_and_change_bit);
93EXPORT_SYMBOL(strcpy); 93EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy); 94EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat); 95EXPORT_SYMBOL(strcat);
96EXPORT_SYMBOL(strncat);
97EXPORT_SYMBOL(strchr);
98EXPORT_SYMBOL(strrchr);
99EXPORT_SYMBOL(strpbrk);
100EXPORT_SYMBOL(strstr);
101EXPORT_SYMBOL(strlen); 96EXPORT_SYMBOL(strlen);
102EXPORT_SYMBOL(strnlen);
103EXPORT_SYMBOL(strcmp); 97EXPORT_SYMBOL(strcmp);
104EXPORT_SYMBOL(strncmp);
105EXPORT_SYMBOL(strcasecmp); 98EXPORT_SYMBOL(strcasecmp);
106EXPORT_SYMBOL(__div64_32); 99EXPORT_SYMBOL(__div64_32);
107 100
@@ -253,7 +246,6 @@ EXPORT_SYMBOL(memcpy);
253EXPORT_SYMBOL(cacheable_memcpy); 246EXPORT_SYMBOL(cacheable_memcpy);
254EXPORT_SYMBOL(memset); 247EXPORT_SYMBOL(memset);
255EXPORT_SYMBOL(memmove); 248EXPORT_SYMBOL(memmove);
256EXPORT_SYMBOL(memscan);
257EXPORT_SYMBOL(memcmp); 249EXPORT_SYMBOL(memcmp);
258EXPORT_SYMBOL(memchr); 250EXPORT_SYMBOL(memchr);
259 251
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index 4344cbe9b5c5..484f5bb1aa3e 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -146,19 +146,6 @@ xmon_map_scc(void)
146static int scc_initialized = 0; 146static int scc_initialized = 0;
147 147
148void xmon_init_scc(void); 148void xmon_init_scc(void);
149extern void cuda_poll(void);
150
151static inline void do_poll_adb(void)
152{
153#ifdef CONFIG_ADB_PMU
154 if (sys_ctrler == SYS_CTRLER_PMU)
155 pmu_poll_adb();
156#endif /* CONFIG_ADB_PMU */
157#ifdef CONFIG_ADB_CUDA
158 if (sys_ctrler == SYS_CTRLER_CUDA)
159 cuda_poll();
160#endif /* CONFIG_ADB_CUDA */
161}
162 149
163int 150int
164xmon_write(void *handle, void *ptr, int nb) 151xmon_write(void *handle, void *ptr, int nb)
@@ -189,7 +176,7 @@ xmon_write(void *handle, void *ptr, int nb)
189 ct = 0; 176 ct = 0;
190 for (i = 0; i < nb; ++i) { 177 for (i = 0; i < nb; ++i) {
191 while ((*sccc & TXRDY) == 0) 178 while ((*sccc & TXRDY) == 0)
192 do_poll_adb(); 179 ;
193 c = p[i]; 180 c = p[i];
194 if (c == '\n' && !ct) { 181 if (c == '\n' && !ct) {
195 c = '\r'; 182 c = '\r';
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index b337136f28b6..ce4de61ed85d 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc3 3# Linux kernel version: 2.6.16-rc3-git9
4# Mon Feb 13 22:31:24 2006 4# Sat Feb 18 00:27:03 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -1317,7 +1317,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
1317# CONFIG_DEBUG_SPINLOCK is not set 1317# CONFIG_DEBUG_SPINLOCK is not set
1318# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1318# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1319# CONFIG_DEBUG_KOBJECT is not set 1319# CONFIG_DEBUG_KOBJECT is not set
1320CONFIG_DEBUG_INFO=y 1320# CONFIG_DEBUG_INFO is not set
1321CONFIG_DEBUG_FS=y 1321CONFIG_DEBUG_FS=y
1322# CONFIG_DEBUG_VM is not set 1322# CONFIG_DEBUG_VM is not set
1323# CONFIG_FRAME_POINTER is not set 1323# CONFIG_FRAME_POINTER is not set
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 93e44d0292ab..bc9b2bcd7dba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -58,6 +58,7 @@
58#include <linux/suspend.h> 58#include <linux/suspend.h>
59#include <scsi/scsi_cmnd.h> 59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_ioctl.h> 60#include <scsi/scsi_ioctl.h>
61#include <scsi/scsi.h>
61 62
62#include <asm/uaccess.h> 63#include <asm/uaccess.h>
63 64
@@ -380,6 +381,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
380 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 381 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
381 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) 382 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
382 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); 383 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
384 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
383 385
384 rq->ref_count++; 386 rq->ref_count++;
385 rq->flags |= REQ_NOMERGE; 387 rq->flags |= REQ_NOMERGE;
@@ -1495,40 +1497,42 @@ static int pkt_set_write_settings(struct pktcdvd_device *pd)
1495} 1497}
1496 1498
1497/* 1499/*
1498 * 0 -- we can write to this track, 1 -- we can't 1500 * 1 -- we can write to this track, 0 -- we can't
1499 */ 1501 */
1500static int pkt_good_track(track_information *ti) 1502static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1501{ 1503{
1502 /* 1504 switch (pd->mmc3_profile) {
1503 * only good for CD-RW at the moment, not DVD-RW 1505 case 0x1a: /* DVD+RW */
1504 */ 1506 case 0x12: /* DVD-RAM */
1507 /* The track is always writable on DVD+RW/DVD-RAM */
1508 return 1;
1509 default:
1510 break;
1511 }
1505 1512
1506 /* 1513 if (!ti->packet || !ti->fp)
1507 * FIXME: only for FP
1508 */
1509 if (ti->fp == 0)
1510 return 0; 1514 return 0;
1511 1515
1512 /* 1516 /*
1513 * "good" settings as per Mt Fuji. 1517 * "good" settings as per Mt Fuji.
1514 */ 1518 */
1515 if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1) 1519 if (ti->rt == 0 && ti->blank == 0)
1516 return 0; 1520 return 1;
1517 1521
1518 if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1) 1522 if (ti->rt == 0 && ti->blank == 1)
1519 return 0; 1523 return 1;
1520 1524
1521 if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1) 1525 if (ti->rt == 1 && ti->blank == 0)
1522 return 0; 1526 return 1;
1523 1527
1524 printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1528 printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1525 return 1; 1529 return 0;
1526} 1530}
1527 1531
1528/* 1532/*
1529 * 0 -- we can write to this disc, 1 -- we can't 1533 * 1 -- we can write to this disc, 0 -- we can't
1530 */ 1534 */
1531static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) 1535static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1532{ 1536{
1533 switch (pd->mmc3_profile) { 1537 switch (pd->mmc3_profile) {
1534 case 0x0a: /* CD-RW */ 1538 case 0x0a: /* CD-RW */
@@ -1537,10 +1541,10 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1537 case 0x1a: /* DVD+RW */ 1541 case 0x1a: /* DVD+RW */
1538 case 0x13: /* DVD-RW */ 1542 case 0x13: /* DVD-RW */
1539 case 0x12: /* DVD-RAM */ 1543 case 0x12: /* DVD-RAM */
1540 return 0; 1544 return 1;
1541 default: 1545 default:
1542 VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); 1546 VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
1543 return 1; 1547 return 0;
1544 } 1548 }
1545 1549
1546 /* 1550 /*
@@ -1549,25 +1553,25 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1549 */ 1553 */
1550 if (di->disc_type == 0xff) { 1554 if (di->disc_type == 0xff) {
1551 printk("pktcdvd: Unknown disc. No track?\n"); 1555 printk("pktcdvd: Unknown disc. No track?\n");
1552 return 1; 1556 return 0;
1553 } 1557 }
1554 1558
1555 if (di->disc_type != 0x20 && di->disc_type != 0) { 1559 if (di->disc_type != 0x20 && di->disc_type != 0) {
1556 printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type); 1560 printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1557 return 1; 1561 return 0;
1558 } 1562 }
1559 1563
1560 if (di->erasable == 0) { 1564 if (di->erasable == 0) {
1561 printk("pktcdvd: Disc not erasable\n"); 1565 printk("pktcdvd: Disc not erasable\n");
1562 return 1; 1566 return 0;
1563 } 1567 }
1564 1568
1565 if (di->border_status == PACKET_SESSION_RESERVED) { 1569 if (di->border_status == PACKET_SESSION_RESERVED) {
1566 printk("pktcdvd: Can't write to last track (reserved)\n"); 1570 printk("pktcdvd: Can't write to last track (reserved)\n");
1567 return 1; 1571 return 0;
1568 } 1572 }
1569 1573
1570 return 0; 1574 return 1;
1571} 1575}
1572 1576
1573static int pkt_probe_settings(struct pktcdvd_device *pd) 1577static int pkt_probe_settings(struct pktcdvd_device *pd)
@@ -1592,23 +1596,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1592 return ret; 1596 return ret;
1593 } 1597 }
1594 1598
1595 if (pkt_good_disc(pd, &di)) 1599 if (!pkt_writable_disc(pd, &di))
1596 return -ENXIO; 1600 return -EROFS;
1597 1601
1598 switch (pd->mmc3_profile) {
1599 case 0x1a: /* DVD+RW */
1600 printk("pktcdvd: inserted media is DVD+RW\n");
1601 break;
1602 case 0x13: /* DVD-RW */
1603 printk("pktcdvd: inserted media is DVD-RW\n");
1604 break;
1605 case 0x12: /* DVD-RAM */
1606 printk("pktcdvd: inserted media is DVD-RAM\n");
1607 break;
1608 default:
1609 printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1610 break;
1611 }
1612 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; 1602 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1613 1603
1614 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1604 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
@@ -1617,9 +1607,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1617 return ret; 1607 return ret;
1618 } 1608 }
1619 1609
1620 if (pkt_good_track(&ti)) { 1610 if (!pkt_writable_track(pd, &ti)) {
1621 printk("pktcdvd: can't write to this track\n"); 1611 printk("pktcdvd: can't write to this track\n");
1622 return -ENXIO; 1612 return -EROFS;
1623 } 1613 }
1624 1614
1625 /* 1615 /*
@@ -1633,7 +1623,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1633 } 1623 }
1634 if (pd->settings.size > PACKET_MAX_SECTORS) { 1624 if (pd->settings.size > PACKET_MAX_SECTORS) {
1635 printk("pktcdvd: packet size is too big\n"); 1625 printk("pktcdvd: packet size is too big\n");
1636 return -ENXIO; 1626 return -EROFS;
1637 } 1627 }
1638 pd->settings.fp = ti.fp; 1628 pd->settings.fp = ti.fp;
1639 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); 1629 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
@@ -1675,7 +1665,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1675 break; 1665 break;
1676 default: 1666 default:
1677 printk("pktcdvd: unknown data mode\n"); 1667 printk("pktcdvd: unknown data mode\n");
1678 return 1; 1668 return -EROFS;
1679 } 1669 }
1680 return 0; 1670 return 0;
1681} 1671}
@@ -1886,7 +1876,7 @@ static int pkt_open_write(struct pktcdvd_device *pd)
1886 1876
1887 if ((ret = pkt_probe_settings(pd))) { 1877 if ((ret = pkt_probe_settings(pd))) {
1888 VPRINTK("pktcdvd: %s failed probe\n", pd->name); 1878 VPRINTK("pktcdvd: %s failed probe\n", pd->name);
1889 return -EROFS; 1879 return ret;
1890 } 1880 }
1891 1881
1892 if ((ret = pkt_set_write_settings(pd))) { 1882 if ((ret = pkt_set_write_settings(pd))) {
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 486ed8a11b59..a4d425d2dce2 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -15,22 +15,23 @@ config AGP
15 due to kernel allocation issues), you could use PCI accesses 15 due to kernel allocation issues), you could use PCI accesses
16 and have up to a couple gigs of texture space. 16 and have up to a couple gigs of texture space.
17 17
18 Note that this is the only means to have XFree4/GLX use 18 Note that this is the only means to have X/GLX use
19 write-combining with MTRR support on the AGP bus. Without it, OpenGL 19 write-combining with MTRR support on the AGP bus. Without it, OpenGL
20 direct rendering will be a lot slower but still faster than PIO. 20 direct rendering will be a lot slower but still faster than PIO.
21 21
22 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
23 use GLX or DRI. If unsure, say N.
24
25 To compile this driver as a module, choose M here: the 22 To compile this driver as a module, choose M here: the
26 module will be called agpgart. 23 module will be called agpgart.
27 24
25 You should say Y here if you want to use GLX or DRI.
26
27 If unsure, say N.
28
28config AGP_ALI 29config AGP_ALI
29 tristate "ALI chipset support" 30 tristate "ALI chipset support"
30 depends on AGP && X86_32 31 depends on AGP && X86_32
31 ---help--- 32 ---help---
32 This option gives you AGP support for the GLX component of 33 This option gives you AGP support for the GLX component of
33 XFree86 4.x on the following ALi chipsets. The supported chipsets 34 X on the following ALi chipsets. The supported chipsets
34 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. 35 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
35 For the ALi-chipset question, ALi suggests you refer to 36 For the ALi-chipset question, ALi suggests you refer to
36 <http://www.ali.com.tw/eng/support/index.shtml>. 37 <http://www.ali.com.tw/eng/support/index.shtml>.
@@ -40,28 +41,19 @@ config AGP_ALI
40 timing issues, this chipset cannot do AGP 2x with the G200. 41 timing issues, this chipset cannot do AGP 2x with the G200.
41 This is a hardware limitation. AGP 1x seems to be fine, though. 42 This is a hardware limitation. AGP 1x seems to be fine, though.
42 43
43 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
44 use GLX or DRI. If unsure, say N.
45
46config AGP_ATI 44config AGP_ATI
47 tristate "ATI chipset support" 45 tristate "ATI chipset support"
48 depends on AGP && X86_32 46 depends on AGP && X86_32
49 ---help--- 47 ---help---
50 This option gives you AGP support for the GLX component of 48 This option gives you AGP support for the GLX component of
51 XFree86 4.x on the ATI RadeonIGP family of chipsets. 49 X on the ATI RadeonIGP family of chipsets.
52
53 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
54 use GLX or DRI. If unsure, say N.
55 50
56config AGP_AMD 51config AGP_AMD
57 tristate "AMD Irongate, 761, and 762 chipset support" 52 tristate "AMD Irongate, 761, and 762 chipset support"
58 depends on AGP && X86_32 53 depends on AGP && X86_32
59 help 54 help
60 This option gives you AGP support for the GLX component of 55 This option gives you AGP support for the GLX component of
61 XFree86 4.x on AMD Irongate, 761, and 762 chipsets. 56 X on AMD Irongate, 761, and 762 chipsets.
62
63 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
64 use GLX or DRI. If unsure, say N.
65 57
66config AGP_AMD64 58config AGP_AMD64
67 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU 59 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
@@ -69,45 +61,38 @@ config AGP_AMD64
69 default y if GART_IOMMU 61 default y if GART_IOMMU
70 help 62 help
71 This option gives you AGP support for the GLX component of 63 This option gives you AGP support for the GLX component of
72 XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 64 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
73 You still need an external AGP bridge like the AMD 8151, VIA 65 You still need an external AGP bridge like the AMD 8151, VIA
74 K8T400M, SiS755. It may also support other AGP bridges when loaded 66 K8T400M, SiS755. It may also support other AGP bridges when loaded
75 with agp_try_unsupported=1. 67 with agp_try_unsupported=1.
76 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
77 use GLX or DRI. If unsure, say Y
78 68
79config AGP_INTEL 69config AGP_INTEL
80 tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" 70 tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
81 depends on AGP && X86 71 depends on AGP && X86
82 help 72 help
83 This option gives you AGP support for the GLX component of XFree86 4.x 73 This option gives you AGP support for the GLX component of X
84 on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, 74 on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
85 E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G, 75 E7205 and E7505 chipsets and full support for the 810, 815, 830M,
86 852GM, 855GM, 865G and I915 integrated graphics chipsets. 76 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
77
87 78
88 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
89 use GLX or DRI, or if you have any Intel integrated graphics
90 chipsets. If unsure, say Y.
91 79
92config AGP_NVIDIA 80config AGP_NVIDIA
93 tristate "NVIDIA nForce/nForce2 chipset support" 81 tristate "NVIDIA nForce/nForce2 chipset support"
94 depends on AGP && X86_32 82 depends on AGP && X86_32
95 help 83 help
96 This option gives you AGP support for the GLX component of 84 This option gives you AGP support for the GLX component of
97 XFree86 4.x on the following NVIDIA chipsets. The supported chipsets 85 X on NVIDIA chipsets including nForce and nForce2
98 include nForce and nForce2
99 86
100config AGP_SIS 87config AGP_SIS
101 tristate "SiS chipset support" 88 tristate "SiS chipset support"
102 depends on AGP && X86_32 89 depends on AGP && X86_32
103 help 90 help
104 This option gives you AGP support for the GLX component of 91 This option gives you AGP support for the GLX component of
105 XFree86 4.x on Silicon Integrated Systems [SiS] chipsets. 92 X on Silicon Integrated Systems [SiS] chipsets.
106 93
107 Note that 5591/5592 AGP chipsets are NOT supported. 94 Note that 5591/5592 AGP chipsets are NOT supported.
108 95
109 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
110 use GLX or DRI. If unsure, say N.
111 96
112config AGP_SWORKS 97config AGP_SWORKS
113 tristate "Serverworks LE/HE chipset support" 98 tristate "Serverworks LE/HE chipset support"
@@ -121,10 +106,7 @@ config AGP_VIA
121 depends on AGP && X86_32 106 depends on AGP && X86_32
122 help 107 help
123 This option gives you AGP support for the GLX component of 108 This option gives you AGP support for the GLX component of
124 XFree86 4.x on VIA MVP3/Apollo Pro chipsets. 109 X on VIA MVP3/Apollo Pro chipsets.
125
126 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
127 use GLX or DRI. If unsure, say N.
128 110
129config AGP_I460 111config AGP_I460
130 tristate "Intel 460GX chipset support" 112 tristate "Intel 460GX chipset support"
@@ -159,9 +141,6 @@ config AGP_EFFICEON
159 This option gives you AGP support for the Transmeta Efficeon 141 This option gives you AGP support for the Transmeta Efficeon
160 series processors with integrated northbridges. 142 series processors with integrated northbridges.
161 143
162 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
163 use GLX or DRI. If unsure, say Y.
164
165config AGP_SGI_TIOCA 144config AGP_SGI_TIOCA
166 tristate "SGI TIO chipset AGP support" 145 tristate "SGI TIO chipset AGP support"
167 depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC) 146 depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9964c508c111..1251b2515bbe 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -516,8 +516,10 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
516 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase); 516 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
517 517
518 /* if x86-64 aperture base is beyond 4G, exit here */ 518 /* if x86-64 aperture base is beyond 4G, exit here */
519 if ( (apbase & 0x7fff) >> (32 - 25) ) 519 if ( (apbase & 0x7fff) >> (32 - 25) ) {
520 return -ENODEV; 520 printk(KERN_INFO PFX "aperture base > 4G\n");
521 return -ENODEV;
522 }
521 523
522 apbase = (apbase & 0x7fff) << 25; 524 apbase = (apbase & 0x7fff) << 25;
523 525
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 268f78d926d3..efef9999f1cf 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -468,9 +468,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
468 468
469 switch (pdev->device) { 469 switch (pdev->device) {
470 case 0x0006: 470 case 0x0006:
471 /* ServerWorks CNB20HE 471 printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
472 Fail silently.*/
473 printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
474 return -ENODEV; 472 return -ENODEV;
475 473
476 case PCI_DEVICE_ID_SERVERWORKS_HE: 474 case PCI_DEVICE_ID_SERVERWORKS_HE:
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index a1381c61aa63..d3879ac9970f 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -202,10 +202,15 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
202void i915_driver_irq_uninstall(drm_device_t * dev) 202void i915_driver_irq_uninstall(drm_device_t * dev)
203{ 203{
204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
205 u16 temp;
206
205 if (!dev_priv) 207 if (!dev_priv)
206 return; 208 return;
207 209
208 I915_WRITE16(I915REG_HWSTAM, 0xffff); 210 I915_WRITE16(I915REG_HWSTAM, 0xffff);
209 I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 211 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
210 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 212 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
213
214 temp = I915_READ16(I915REG_INT_IDENTITY_R);
215 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
211} 216}
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 291dbf4c8186..c08fa5076f05 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -161,6 +161,7 @@ void r300_init_reg_flags(void)
161 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); 161 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
162 ADD_RANGE(R300_GB_ENABLE, 1); 162 ADD_RANGE(R300_GB_ENABLE, 1);
163 ADD_RANGE(R300_GB_MSPOS0, 5); 163 ADD_RANGE(R300_GB_MSPOS0, 5);
164 ADD_RANGE(R300_TX_CNTL, 1);
164 ADD_RANGE(R300_TX_ENABLE, 1); 165 ADD_RANGE(R300_TX_ENABLE, 1);
165 ADD_RANGE(0x4200, 4); 166 ADD_RANGE(0x4200, 4);
166 ADD_RANGE(0x4214, 1); 167 ADD_RANGE(0x4214, 1);
@@ -489,6 +490,50 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
489 490
490 return 0; 491 return 0;
491} 492}
493static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
494 drm_radeon_kcmd_buffer_t *cmdbuf)
495{
496 u32 *cmd = (u32 *) cmdbuf->buf;
497 int count, ret;
498 RING_LOCALS;
499
500 count=(cmd[0]>>16) & 0x3fff;
501
502 if (cmd[0] & 0x8000) {
503 u32 offset;
504
505 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
506 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
507 offset = cmd[2] << 10;
508 ret = r300_check_offset(dev_priv, offset);
509 if (ret) {
510 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
511 return DRM_ERR(EINVAL);
512 }
513 }
514
515 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
516 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
517 offset = cmd[3] << 10;
518 ret = r300_check_offset(dev_priv, offset);
519 if (ret) {
520 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
521 return DRM_ERR(EINVAL);
522 }
523
524 }
525 }
526
527 BEGIN_RING(count+2);
528 OUT_RING(cmd[0]);
529 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
530 ADVANCE_RING();
531
532 cmdbuf->buf += (count+2)*4;
533 cmdbuf->bufsz -= (count+2)*4;
534
535 return 0;
536}
492 537
493static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 538static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
494 drm_radeon_kcmd_buffer_t *cmdbuf) 539 drm_radeon_kcmd_buffer_t *cmdbuf)
@@ -527,6 +572,9 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
527 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ 572 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
528 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); 573 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
529 574
575 case RADEON_CNTL_BITBLT_MULTI:
576 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
577
530 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ 578 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
531 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ 579 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
532 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ 580 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a0ed20e25221..d1e19954406b 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -451,6 +451,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
451/* END */ 451/* END */
452 452
453/* gap */ 453/* gap */
454/* Zero to flush caches. */
455#define R300_TX_CNTL 0x4100
456
454/* The upper enable bits are guessed, based on fglrx reported limits. */ 457/* The upper enable bits are guessed, based on fglrx reported limits. */
455#define R300_TX_ENABLE 0x4104 458#define R300_TX_ENABLE 0x4104
456# define R300_TX_ENABLE_0 (1 << 0) 459# define R300_TX_ENABLE_0 (1 << 0)
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 498b19b1d641..1f7d2ab8c4fc 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -90,9 +90,10 @@
90 * 1.19- Add support for gart table in FB memory and PCIE r300 90 * 1.19- Add support for gart table in FB memory and PCIE r300
91 * 1.20- Add support for r300 texrect 91 * 1.20- Add support for r300 texrect
92 * 1.21- Add support for card type getparam 92 * 1.21- Add support for card type getparam
93 * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
93 */ 94 */
94#define DRIVER_MAJOR 1 95#define DRIVER_MAJOR 1
95#define DRIVER_MINOR 21 96#define DRIVER_MINOR 22
96#define DRIVER_PATCHLEVEL 0 97#define DRIVER_PATCHLEVEL 0
97 98
98/* 99/*
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5765f672e853..d58f82318853 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = {
243 243
244static void moom_callback(void *ignored) 244static void moom_callback(void *ignored)
245{ 245{
246 out_of_memory(GFP_KERNEL, 0); 246 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0);
247} 247}
248 248
249static DECLARE_WORK(moom_work, moom_callback, NULL); 249static DECLARE_WORK(moom_work, moom_callback, NULL);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 64819aa7cac4..0c08c58252be 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -348,10 +348,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
348 break; 348 break;
349 349
350 case 32: 350 case 32:
351 E_KEY[4] = le32_to_cpu(in_key[4]); 351 E_KEY[4] = le32_to_cpu(key[4]);
352 E_KEY[5] = le32_to_cpu(in_key[5]); 352 E_KEY[5] = le32_to_cpu(key[5]);
353 E_KEY[6] = le32_to_cpu(in_key[6]); 353 E_KEY[6] = le32_to_cpu(key[6]);
354 t = E_KEY[7] = le32_to_cpu(in_key[7]); 354 t = E_KEY[7] = le32_to_cpu(key[7]);
355 for (i = 0; i < 7; ++i) 355 for (i = 0; i < 7; ++i)
356 loop8 (i); 356 loop8 (i);
357 break; 357 break;
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 5c8943509cc1..66d03f242d3c 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -1053,7 +1053,7 @@ static int fc_do_els(fc_channel *fc, unsigned int alpa, void *data, int len)
1053 int i; 1053 int i;
1054 1054
1055 fcmd = &_fcmd; 1055 fcmd = &_fcmd;
1056 memset(fcmd, 0, sizeof(fcmd)); 1056 memset(fcmd, 0, sizeof(fcp_cmnd));
1057 FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa)) 1057 FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa))
1058 fch = &fcmd->fch; 1058 fch = &fcmd->fch;
1059 FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa); 1059 FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 18d7eda38851..c2c776fbda01 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -2082,9 +2082,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2082 2082
2083 SBP2_DEBUG("sbp2_check_sbp2_response"); 2083 SBP2_DEBUG("sbp2_check_sbp2_response");
2084 2084
2085 switch (SCpnt->cmnd[0]) { 2085 if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) {
2086
2087 case INQUIRY:
2088 /* 2086 /*
2089 * Make sure data length is ok. Minimum length is 36 bytes 2087 * Make sure data length is ok. Minimum length is 36 bytes
2090 */ 2088 */
@@ -2097,13 +2095,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2097 */ 2095 */
2098 scsi_buf[2] |= 2; 2096 scsi_buf[2] |= 2;
2099 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2; 2097 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2100
2101 break;
2102
2103 default:
2104 break;
2105 } 2098 }
2106 return;
2107} 2099}
2108 2100
2109/* 2101/*
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 69c04945591f..ded2c33f5b85 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1019,8 +1019,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1019#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1019#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1020 INVALIDATE_CACHED_RANGE(map, from, size) 1020 INVALIDATE_CACHED_RANGE(map, from, size)
1021 1021
1022#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1022#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
1023 UDELAY(map, chip, adr, usec) 1023 UDELAY(map, chip, cmd_adr, usec)
1024 1024
1025/* 1025/*
1026 * Extra notes: 1026 * Extra notes:
@@ -1052,7 +1052,7 @@ do { \
1052 spin_lock(chip->mutex); \ 1052 spin_lock(chip->mutex); \
1053} while (0) 1053} while (0)
1054 1054
1055#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1055#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
1056do { \ 1056do { \
1057 spin_unlock(chip->mutex); \ 1057 spin_unlock(chip->mutex); \
1058 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1058 INVALIDATE_CACHED_RANGE(map, adr, len); \
@@ -1284,7 +1284,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1284 map_write(map, datum, adr); 1284 map_write(map, datum, adr);
1285 chip->state = mode; 1285 chip->state = mode;
1286 1286
1287 INVALIDATE_CACHE_UDELAY(map, chip, 1287 INVALIDATE_CACHE_UDELAY(map, chip, adr,
1288 adr, map_bankwidth(map), 1288 adr, map_bankwidth(map),
1289 chip->word_write_time); 1289 chip->word_write_time);
1290 1290
@@ -1572,8 +1572,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1572 map_write(map, CMD(0xd0), cmd_adr); 1572 map_write(map, CMD(0xd0), cmd_adr);
1573 chip->state = FL_WRITING; 1573 chip->state = FL_WRITING;
1574 1574
1575 INVALIDATE_CACHE_UDELAY(map, chip, 1575 INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
1576 cmd_adr, len, 1576 adr, len,
1577 chip->buffer_write_time); 1577 chip->buffer_write_time);
1578 1578
1579 timeo = jiffies + (HZ/2); 1579 timeo = jiffies + (HZ/2);
@@ -1744,7 +1744,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1744 chip->state = FL_ERASING; 1744 chip->state = FL_ERASING;
1745 chip->erase_suspended = 0; 1745 chip->erase_suspended = 0;
1746 1746
1747 INVALIDATE_CACHE_UDELAY(map, chip, 1747 INVALIDATE_CACHE_UDELAY(map, chip, adr,
1748 adr, len, 1748 adr, len,
1749 chip->erase_time*1000/2); 1749 chip->erase_time*1000/2);
1750 1750
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index fa176ffb4ad5..8936058a3cce 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); 108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); 109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); 110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
111static void irda_usb_rx_defer_expired(unsigned long data);
111static int irda_usb_net_open(struct net_device *dev); 112static int irda_usb_net_open(struct net_device *dev);
112static int irda_usb_net_close(struct net_device *dev); 113static int irda_usb_net_close(struct net_device *dev);
113static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 114static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
677 * on the interrupt pipe and hang the Rx URB only when an interrupt is 678 * on the interrupt pipe and hang the Rx URB only when an interrupt is
678 * received. 679 * received.
679 * Jean II 680 * Jean II
681 *
682 * Note : don't read the above as what we are currently doing, but as
683 * something we could do with KC dongle. Also don't forget that the
684 * interrupt pipe is not part of the original standard, so this would
685 * need to be optional...
686 * Jean II
680 */ 687 */
681 688
682/*------------------------------------------------------------------*/ 689/*------------------------------------------------------------------*/
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
704 /* Reinitialize URB */ 711 /* Reinitialize URB */
705 usb_fill_bulk_urb(urb, self->usbdev, 712 usb_fill_bulk_urb(urb, self->usbdev,
706 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), 713 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
707 skb->data, skb->truesize, 714 skb->data, IRDA_SKB_MAX_MTU,
708 irda_usb_receive, skb); 715 irda_usb_receive, skb);
709 /* Note : unlink *must* be synchronous because of the code in
710 * irda_usb_net_close() -> free the skb - Jean II */
711 urb->status = 0; 716 urb->status = 0;
712 717
713 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ 718 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
734 struct irda_skb_cb *cb; 739 struct irda_skb_cb *cb;
735 struct sk_buff *newskb; 740 struct sk_buff *newskb;
736 struct sk_buff *dataskb; 741 struct sk_buff *dataskb;
742 struct urb *next_urb;
737 int docopy; 743 int docopy;
738 744
739 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 745 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
755 if (urb->status != 0) { 761 if (urb->status != 0) {
756 switch (urb->status) { 762 switch (urb->status) {
757 case -EILSEQ: 763 case -EILSEQ:
758 self->stats.rx_errors++;
759 self->stats.rx_crc_errors++; 764 self->stats.rx_crc_errors++;
760 break; 765 /* Also precursor to a hot-unplug on UHCI. */
766 /* Fallthrough... */
761 case -ECONNRESET: /* -104 */ 767 case -ECONNRESET: /* -104 */
762 IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags); 768 /* Random error, if I remember correctly */
763 /* uhci_cleanup_unlink() is going to kill the Rx 769 /* uhci_cleanup_unlink() is going to kill the Rx
764 * URB just after we return. No problem, at this 770 * URB just after we return. No problem, at this
765 * point the URB will be idle ;-) - Jean II */ 771 * point the URB will be idle ;-) - Jean II */
766 break; 772 case -ESHUTDOWN: /* -108 */
773 /* That's usually a hot-unplug. Submit will fail... */
774 case -ETIMEDOUT: /* -110 */
775 /* Usually precursor to a hot-unplug on OHCI. */
767 default: 776 default:
768 IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); 777 self->stats.rx_errors++;
778 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
769 break; 779 break;
770 } 780 }
771 goto done; 781 /* If we received an error, we don't want to resubmit the
782 * Rx URB straight away but to give the USB layer a little
783 * bit of breathing room.
784 * We are in the USB thread context, therefore there is a
785 * danger of recursion (new URB we submit fails, we come
786 * back here).
787 * With recent USB stack (2.6.15+), I'm seeing that on
788 * hot unplug of the dongle...
789 * Lowest effective timer is 10ms...
790 * Jean II */
791 self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
792 self->rx_defer_timer.data = (unsigned long) urb;
793 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
794 return;
772 } 795 }
773 796
774 /* Check for empty frames */ 797 /* Check for empty frames */
@@ -845,13 +868,45 @@ done:
845 * idle slot.... 868 * idle slot....
846 * Jean II */ 869 * Jean II */
847 /* Note : with this scheme, we could submit the idle URB before 870 /* Note : with this scheme, we could submit the idle URB before
848 * processing the Rx URB. Another time... Jean II */ 871 * processing the Rx URB. I don't think it would buy us anything as
872 * we are running in the USB thread context. Jean II */
873 next_urb = self->idle_rx_urb;
849 874
850 /* Submit the idle URB to replace the URB we've just received */
851 irda_usb_submit(self, skb, self->idle_rx_urb);
852 /* Recycle Rx URB : Now, the idle URB is the present one */ 875 /* Recycle Rx URB : Now, the idle URB is the present one */
853 urb->context = NULL; 876 urb->context = NULL;
854 self->idle_rx_urb = urb; 877 self->idle_rx_urb = urb;
878
879 /* Submit the idle URB to replace the URB we've just received.
880 * Do it last to avoid race conditions... Jean II */
881 irda_usb_submit(self, skb, next_urb);
882}
883
884/*------------------------------------------------------------------*/
885/*
886 * In case of errors, we want the USB layer to have time to recover.
887 * Now, it is time to resubmit ouur Rx URB...
888 */
889static void irda_usb_rx_defer_expired(unsigned long data)
890{
891 struct urb *urb = (struct urb *) data;
892 struct sk_buff *skb = (struct sk_buff *) urb->context;
893 struct irda_usb_cb *self;
894 struct irda_skb_cb *cb;
895 struct urb *next_urb;
896
897 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
898
899 /* Find ourselves */
900 cb = (struct irda_skb_cb *) skb->cb;
901 IRDA_ASSERT(cb != NULL, return;);
902 self = (struct irda_usb_cb *) cb->context;
903 IRDA_ASSERT(self != NULL, return;);
904
905 /* Same stuff as when Rx is done, see above... */
906 next_urb = self->idle_rx_urb;
907 urb->context = NULL;
908 self->idle_rx_urb = urb;
909 irda_usb_submit(self, skb, next_urb);
855} 910}
856 911
857/*------------------------------------------------------------------*/ 912/*------------------------------------------------------------------*/
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
990 /* Stop network Tx queue */ 1045 /* Stop network Tx queue */
991 netif_stop_queue(netdev); 1046 netif_stop_queue(netdev);
992 1047
1048 /* Kill defered Rx URB */
1049 del_timer(&self->rx_defer_timer);
1050
993 /* Deallocate all the Rx path buffers (URBs and skb) */ 1051 /* Deallocate all the Rx path buffers (URBs and skb) */
994 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1052 for (i = 0; i < IU_MAX_RX_URBS; i++) {
995 struct urb *urb = self->rx_urb[i]; 1053 struct urb *urb = self->rx_urb[i];
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1365 self = net->priv; 1423 self = net->priv;
1366 self->netdev = net; 1424 self->netdev = net;
1367 spin_lock_init(&self->lock); 1425 spin_lock_init(&self->lock);
1426 init_timer(&self->rx_defer_timer);
1368 1427
1369 /* Create all of the needed urbs */ 1428 /* Create all of the needed urbs */
1370 for (i = 0; i < IU_MAX_RX_URBS; i++) { 1429 for (i = 0; i < IU_MAX_RX_URBS; i++) {
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1498 * This will stop/desactivate the Tx path. - Jean II */ 1557 * This will stop/desactivate the Tx path. - Jean II */
1499 self->present = 0; 1558 self->present = 0;
1500 1559
1560 /* Kill defered Rx URB */
1561 del_timer(&self->rx_defer_timer);
1562
1501 /* We need to have irq enabled to unlink the URBs. That's OK, 1563 /* We need to have irq enabled to unlink the URBs. That's OK,
1502 * at this point the Tx path is gone - Jean II */ 1564 * at this point the Tx path is gone - Jean II */
1503 spin_unlock_irqrestore(&self->lock, flags); 1565 spin_unlock_irqrestore(&self->lock, flags);
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1507 /* Accept no more transmissions */ 1569 /* Accept no more transmissions */
1508 /*netif_device_detach(self->netdev);*/ 1570 /*netif_device_detach(self->netdev);*/
1509 netif_stop_queue(self->netdev); 1571 netif_stop_queue(self->netdev);
1510 /* Stop all the receive URBs */ 1572 /* Stop all the receive URBs. Must be synchronous. */
1511 for (i = 0; i < IU_MAX_RX_URBS; i++) 1573 for (i = 0; i < IU_MAX_RX_URBS; i++)
1512 usb_kill_urb(self->rx_urb[i]); 1574 usb_kill_urb(self->rx_urb[i]);
1513 /* Cancel Tx and speed URB. 1575 /* Cancel Tx and speed URB.
1514 * Toggle flags to make sure it's synchronous. */ 1576 * Make sure it's synchronous to avoid races. */
1515 usb_kill_urb(self->tx_urb); 1577 usb_kill_urb(self->tx_urb);
1516 usb_kill_urb(self->speed_urb); 1578 usb_kill_urb(self->speed_urb);
1517 } 1579 }
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index bd8f66542322..4026af42dd47 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -136,8 +136,6 @@ struct irda_usb_cb {
136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ 136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ 137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
138 138
139 wait_queue_head_t wait_q; /* for timeouts */
140
141 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ 139 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
142 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ 140 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
143 struct urb *tx_urb; /* URB used to send data frames */ 141 struct urb *tx_urb; /* URB used to send data frames */
@@ -147,17 +145,18 @@ struct irda_usb_cb {
147 struct net_device_stats stats; 145 struct net_device_stats stats;
148 struct irlap_cb *irlap; /* The link layer we are binded to */ 146 struct irlap_cb *irlap; /* The link layer we are binded to */
149 struct qos_info qos; 147 struct qos_info qos;
150 hashbin_t *tx_list; /* Queued transmit skb's */
151 char *speed_buff; /* Buffer for speed changes */ 148 char *speed_buff; /* Buffer for speed changes */
152 149
153 struct timeval stamp; 150 struct timeval stamp;
154 struct timeval now; 151 struct timeval now;
155 152
156 spinlock_t lock; /* For serializing operations */ 153 spinlock_t lock; /* For serializing Tx operations */
157 154
158 __u16 xbofs; /* Current xbofs setting */ 155 __u16 xbofs; /* Current xbofs setting */
159 __s16 new_xbofs; /* xbofs we need to set */ 156 __s16 new_xbofs; /* xbofs we need to set */
160 __u32 speed; /* Current speed */ 157 __u32 speed; /* Current speed */
161 __s32 new_speed; /* speed we need to set */ 158 __s32 new_speed; /* speed we need to set */
159
160 struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
162}; 161};
163 162
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 14beab4bc91c..287676ad80df 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4616,9 +4616,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4616 } 4616 }
4617 4617
4618 default: 4618 default:
4619 IPW_ERROR("Unknown notification: " 4619 IPW_DEBUG_NOTIF("Unknown notification: "
4620 "subtype=%d,flags=0x%2x,size=%d\n", 4620 "subtype=%d,flags=0x%2x,size=%d\n",
4621 notif->subtype, notif->flags, notif->size); 4621 notif->subtype, notif->flags, notif->size);
4622 } 4622 }
4623} 4623}
4624 4624
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6912399d0937..6f50cc9323d9 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -55,21 +55,13 @@ config DASD_DIAG
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
56 say "N". 56 say "N".
57 57
58config DASD_EER
59 tristate "Extended error reporting (EER)"
60 depends on DASD
61 help
62 This driver provides a character device interface to the
63 DASD extended error reporting. This is only needed if you want to
64 use applications written for the EER facility.
65
66config DASD_CMB 58config DASD_CMB
67 tristate "Compatibility interface for DASD channel measurement blocks" 59 tristate "Compatibility interface for DASD channel measurement blocks"
68 depends on DASD 60 depends on DASD
69 help 61 help
70 This driver provides an additional interface to the channel 62 This driver provides an additional interface to the channel measurement
71 measurement facility, which is normally accessed though sysfs, with 63 facility, which is normally accessed though sysfs, with a set of
72 a set of ioctl functions specific to the dasd driver. 64 ioctl functions specific to the dasd driver.
73 This is only needed if you want to use applications written for 65 This is only needed if you want to use applications written for
74 linux-2.4 dasd channel measurement facility interface. 66 linux-2.4 dasd channel measurement facility interface.
75 67
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 0c0d871e8f51..58c6780134f7 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -5,7 +5,6 @@
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o 5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o 6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
7dasd_diag_mod-objs := dasd_diag.o 7dasd_diag_mod-objs := dasd_diag.o
8dasd_eer_mod-objs := dasd_eer.o
9dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ 8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
10 dasd_genhd.o dasd_erp.o 9 dasd_genhd.o dasd_erp.o
11 10
@@ -14,6 +13,5 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
14obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o 13obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
15obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o 14obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
16obj-$(CONFIG_DASD_CMB) += dasd_cmb.o 15obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
17obj-$(CONFIG_DASD_EER) += dasd_eer.o
18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o 16obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
19obj-$(CONFIG_DCSSBLK) += dcssblk.o 17obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 08c88fcd8963..af1d5b404cee 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -18,7 +18,6 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/hdreg.h> 20#include <linux/hdreg.h>
21#include <linux/notifier.h>
22 21
23#include <asm/ccwdev.h> 22#include <asm/ccwdev.h>
24#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
@@ -58,7 +57,6 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
58static void dasd_flush_ccw_queue(struct dasd_device *, int); 57static void dasd_flush_ccw_queue(struct dasd_device *, int);
59static void dasd_tasklet(struct dasd_device *); 58static void dasd_tasklet(struct dasd_device *);
60static void do_kick_device(void *data); 59static void do_kick_device(void *data);
61static void dasd_disable_eer(struct dasd_device *device);
62 60
63/* 61/*
64 * SECTION: Operations on the device structure. 62 * SECTION: Operations on the device structure.
@@ -153,10 +151,13 @@ dasd_state_new_to_known(struct dasd_device *device)
153static inline void 151static inline void
154dasd_state_known_to_new(struct dasd_device * device) 152dasd_state_known_to_new(struct dasd_device * device)
155{ 153{
156 /* disable extended error reporting for this device */
157 dasd_disable_eer(device);
158 /* Forget the discipline information. */ 154 /* Forget the discipline information. */
155 if (device->discipline)
156 module_put(device->discipline->owner);
159 device->discipline = NULL; 157 device->discipline = NULL;
158 if (device->base_discipline)
159 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL;
160 device->state = DASD_STATE_NEW; 161 device->state = DASD_STATE_NEW;
161 162
162 dasd_free_queue(device); 163 dasd_free_queue(device);
@@ -871,9 +872,6 @@ dasd_handle_state_change_pending(struct dasd_device *device)
871 struct dasd_ccw_req *cqr; 872 struct dasd_ccw_req *cqr;
872 struct list_head *l, *n; 873 struct list_head *l, *n;
873 874
874 /* first of all call extended error reporting */
875 dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
876
877 device->stopped &= ~DASD_STOPPED_PENDING; 875 device->stopped &= ~DASD_STOPPED_PENDING;
878 876
879 /* restart all 'running' IO on queue */ 877 /* restart all 'running' IO on queue */
@@ -1093,19 +1091,6 @@ restart:
1093 } 1091 }
1094 goto restart; 1092 goto restart;
1095 } 1093 }
1096
1097 /* first of all call extended error reporting */
1098 if (device->eer && cqr->status == DASD_CQR_FAILED) {
1099 dasd_write_eer_trigger(DASD_EER_FATALERROR,
1100 device, cqr);
1101
1102 /* restart request */
1103 cqr->status = DASD_CQR_QUEUED;
1104 cqr->retries = 255;
1105 device->stopped |= DASD_STOPPED_QUIESCE;
1106 goto restart;
1107 }
1108
1109 /* Process finished ERP request. */ 1094 /* Process finished ERP request. */
1110 if (cqr->refers) { 1095 if (cqr->refers) {
1111 __dasd_process_erp(device, cqr); 1096 __dasd_process_erp(device, cqr);
@@ -1243,8 +1228,7 @@ __dasd_start_head(struct dasd_device * device)
1243 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1228 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1244 /* check FAILFAST */ 1229 /* check FAILFAST */
1245 if (device->stopped & ~DASD_STOPPED_PENDING && 1230 if (device->stopped & ~DASD_STOPPED_PENDING &&
1246 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1231 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
1247 (!device->eer)) {
1248 cqr->status = DASD_CQR_FAILED; 1232 cqr->status = DASD_CQR_FAILED;
1249 dasd_schedule_bh(device); 1233 dasd_schedule_bh(device);
1250 } 1234 }
@@ -1880,9 +1864,10 @@ dasd_generic_remove (struct ccw_device *cdev)
1880 */ 1864 */
1881int 1865int
1882dasd_generic_set_online (struct ccw_device *cdev, 1866dasd_generic_set_online (struct ccw_device *cdev,
1883 struct dasd_discipline *discipline) 1867 struct dasd_discipline *base_discipline)
1884 1868
1885{ 1869{
1870 struct dasd_discipline *discipline;
1886 struct dasd_device *device; 1871 struct dasd_device *device;
1887 int rc; 1872 int rc;
1888 1873
@@ -1890,6 +1875,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
1890 if (IS_ERR(device)) 1875 if (IS_ERR(device))
1891 return PTR_ERR(device); 1876 return PTR_ERR(device);
1892 1877
1878 discipline = base_discipline;
1893 if (device->features & DASD_FEATURE_USEDIAG) { 1879 if (device->features & DASD_FEATURE_USEDIAG) {
1894 if (!dasd_diag_discipline_pointer) { 1880 if (!dasd_diag_discipline_pointer) {
1895 printk (KERN_WARNING 1881 printk (KERN_WARNING
@@ -1901,6 +1887,16 @@ dasd_generic_set_online (struct ccw_device *cdev,
1901 } 1887 }
1902 discipline = dasd_diag_discipline_pointer; 1888 discipline = dasd_diag_discipline_pointer;
1903 } 1889 }
1890 if (!try_module_get(base_discipline->owner)) {
1891 dasd_delete_device(device);
1892 return -EINVAL;
1893 }
1894 if (!try_module_get(discipline->owner)) {
1895 module_put(base_discipline->owner);
1896 dasd_delete_device(device);
1897 return -EINVAL;
1898 }
1899 device->base_discipline = base_discipline;
1904 device->discipline = discipline; 1900 device->discipline = discipline;
1905 1901
1906 rc = discipline->check_device(device); 1902 rc = discipline->check_device(device);
@@ -1909,6 +1905,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
1909 "dasd_generic couldn't online device %s " 1905 "dasd_generic couldn't online device %s "
1910 "with discipline %s rc=%i\n", 1906 "with discipline %s rc=%i\n",
1911 cdev->dev.bus_id, discipline->name, rc); 1907 cdev->dev.bus_id, discipline->name, rc);
1908 module_put(discipline->owner);
1909 module_put(base_discipline->owner);
1912 dasd_delete_device(device); 1910 dasd_delete_device(device);
1913 return rc; 1911 return rc;
1914 } 1912 }
@@ -1986,9 +1984,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1986 switch (event) { 1984 switch (event) {
1987 case CIO_GONE: 1985 case CIO_GONE:
1988 case CIO_NO_PATH: 1986 case CIO_NO_PATH:
1989 /* first of all call extended error reporting */
1990 dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
1991
1992 if (device->state < DASD_STATE_BASIC) 1987 if (device->state < DASD_STATE_BASIC)
1993 break; 1988 break;
1994 /* Device is active. We want to keep it. */ 1989 /* Device is active. We want to keep it. */
@@ -2046,51 +2041,6 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2046 put_driver(drv); 2041 put_driver(drv);
2047} 2042}
2048 2043
2049/*
2050 * notifications for extended error reports
2051 */
2052static struct notifier_block *dasd_eer_chain;
2053
2054int
2055dasd_register_eer_notifier(struct notifier_block *nb)
2056{
2057 return notifier_chain_register(&dasd_eer_chain, nb);
2058}
2059
2060int
2061dasd_unregister_eer_notifier(struct notifier_block *nb)
2062{
2063 return notifier_chain_unregister(&dasd_eer_chain, nb);
2064}
2065
2066/*
2067 * Notify the registered error reporting module of a problem
2068 */
2069void
2070dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
2071 struct dasd_ccw_req *cqr)
2072{
2073 if (device->eer) {
2074 struct dasd_eer_trigger temp;
2075 temp.id = id;
2076 temp.device = device;
2077 temp.cqr = cqr;
2078 notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
2079 (void *)&temp);
2080 }
2081}
2082
2083/*
2084 * Tell the registered error reporting module to disable error reporting for
2085 * a given device and to cleanup any private data structures on that device.
2086 */
2087static void
2088dasd_disable_eer(struct dasd_device *device)
2089{
2090 notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
2091}
2092
2093
2094static int __init 2044static int __init
2095dasd_init(void) 2045dasd_init(void)
2096{ 2046{
@@ -2172,11 +2122,6 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2172EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2122EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2173EXPORT_SYMBOL_GPL(dasd_generic_auto_online); 2123EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2174 2124
2175EXPORT_SYMBOL(dasd_register_eer_notifier);
2176EXPORT_SYMBOL(dasd_unregister_eer_notifier);
2177EXPORT_SYMBOL(dasd_write_eer_trigger);
2178
2179
2180/* 2125/*
2181 * Overrides for Emacs so that we follow Linus's tabbing style. 2126 * Overrides for Emacs so that we follow Linus's tabbing style.
2182 * Emacs will notice this stuff at the end of the file and automatically 2127 * Emacs will notice this stuff at the end of the file and automatically
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c811380b9079..4ee0f934e325 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1108,9 +1108,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
1108 case 0x0B: 1108 case 0x0B:
1109 DEV_MESSAGE(KERN_WARNING, device, "%s", 1109 DEV_MESSAGE(KERN_WARNING, device, "%s",
1110 "FORMAT F - Volume is suspended duplex"); 1110 "FORMAT F - Volume is suspended duplex");
1111 /* call extended error reporting (EER) */
1112 dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device,
1113 erp->refers);
1114 break; 1111 break;
1115 case 0x0C: 1112 case 0x0C:
1116 DEV_MESSAGE(KERN_WARNING, device, "%s", 1113 DEV_MESSAGE(KERN_WARNING, device, "%s",
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index e15dd7978050..bc3823d35223 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -29,7 +29,6 @@
29#define DASD_ECKD_CCW_PSF 0x27 29#define DASD_ECKD_CCW_PSF 0x27
30#define DASD_ECKD_CCW_RSSD 0x3e 30#define DASD_ECKD_CCW_RSSD 0x3e
31#define DASD_ECKD_CCW_LOCATE_RECORD 0x47 31#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
32#define DASD_ECKD_CCW_SNSS 0x54
33#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 32#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
34#define DASD_ECKD_CCW_WRITE_MT 0x85 33#define DASD_ECKD_CCW_WRITE_MT 0x85
35#define DASD_ECKD_CCW_READ_MT 0x86 34#define DASD_ECKD_CCW_READ_MT 0x86
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
deleted file mode 100644
index f70cd7716b24..000000000000
--- a/drivers/s390/block/dasd_eer.c
+++ /dev/null
@@ -1,1090 +0,0 @@
1/*
2 * character device driver for extended error reporting
3 *
4 *
5 * Copyright (C) 2005 IBM Corporation
6 * extended error reporting for DASD ECKD devices
7 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 *
9 */
10
11#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/kernel.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/device.h>
18#include <linux/workqueue.h>
19#include <linux/poll.h>
20#include <linux/notifier.h>
21
22#include <asm/uaccess.h>
23#include <asm/semaphore.h>
24#include <asm/atomic.h>
25#include <asm/ebcdic.h>
26
27#include "dasd_int.h"
28#include "dasd_eckd.h"
29
30
31MODULE_LICENSE("GPL");
32
33MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>");
34MODULE_DESCRIPTION("DASD extended error reporting module");
35
36
37#ifdef PRINTK_HEADER
38#undef PRINTK_HEADER
39#endif /* PRINTK_HEADER */
40#define PRINTK_HEADER "dasd(eer):"
41
42
43
44
45
46/*****************************************************************************/
47/* the internal buffer */
48/*****************************************************************************/
49
50/*
51 * The internal buffer is meant to store obaque blobs of data, so it doesn't
52 * know of higher level concepts like triggers.
53 * It consists of a number of pages that are used as a ringbuffer. Each data
54 * blob is stored in a simple record that consists of an integer, which
55 * contains the size of the following data, and the data bytes themselfes.
56 *
57 * To allow for multiple independent readers we create one internal buffer
58 * each time the device is opened and destroy the buffer when the file is
59 * closed again.
60 *
61 * One record can be written to a buffer by using the functions
62 * - dasd_eer_start_record (one time per record to write the size to the buffer
63 * and reserve the space for the data)
64 * - dasd_eer_write_buffer (one or more times per record to write the data)
65 * The data can be written in several steps but you will have to compute
66 * the total size up front for the invocation of dasd_eer_start_record.
67 * If the ringbuffer is full, dasd_eer_start_record will remove the required
68 * number of old records.
69 *
70 * A record is typically read in two steps, first read the integer that
71 * specifies the size of the following data, then read the data.
72 * Both can be done by
73 * - dasd_eer_read_buffer
74 *
75 * For all mentioned functions you need to get the bufferlock first and keep it
76 * until a complete record is written or read.
77 */
78
79
80/*
81 * Alle information necessary to keep track of an internal buffer is kept in
82 * a struct eerbuffer. The buffer specific to a file pointer is strored in
83 * the private_data field of that file. To be able to write data to all
84 * existing buffers, each buffer is also added to the bufferlist.
85 * If the user doesn't want to read a complete record in one go, we have to
86 * keep track of the rest of the record. residual stores the number of bytes
87 * that are still to deliver. If the rest of the record is invalidated between
88 * two reads then residual will be set to -1 so that the next read will fail.
89 * All entries in the eerbuffer structure are protected with the bufferlock.
90 * To avoid races between writing to a buffer on the one side and creating
91 * and destroying buffers on the other side, the bufferlock must also be used
92 * to protect the bufferlist.
93 */
94
95struct eerbuffer {
96 struct list_head list;
97 char **buffer;
98 int buffersize;
99 int buffer_page_count;
100 int head;
101 int tail;
102 int residual;
103};
104
105LIST_HEAD(bufferlist);
106
107static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
108
109DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
110
111/*
112 * How many free bytes are available on the buffer.
113 * needs to be called with bufferlock held
114 */
115static int
116dasd_eer_get_free_bytes(struct eerbuffer *eerb)
117{
118 if (eerb->head < eerb->tail) {
119 return eerb->tail - eerb->head - 1;
120 } else
121 return eerb->buffersize - eerb->head + eerb->tail -1;
122}
123
124/*
125 * How many bytes of buffer space are used.
126 * needs to be called with bufferlock held
127 */
128static int
129dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
130{
131
132 if (eerb->head >= eerb->tail) {
133 return eerb->head - eerb->tail;
134 } else
135 return eerb->buffersize - eerb->tail + eerb->head;
136}
137
138/*
139 * The dasd_eer_write_buffer function just copies count bytes of data
140 * to the buffer. Make sure to call dasd_eer_start_record first, to
141 * make sure that enough free space is available.
142 * needs to be called with bufferlock held
143 */
144static void
145dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data)
146{
147
148 unsigned long headindex,localhead;
149 unsigned long rest, len;
150 char *nextdata;
151
152 nextdata = data;
153 rest = count;
154 while (rest > 0) {
155 headindex = eerb->head / PAGE_SIZE;
156 localhead = eerb->head % PAGE_SIZE;
157 len = min(rest, (PAGE_SIZE - localhead));
158 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
159 nextdata += len;
160 rest -= len;
161 eerb->head += len;
162 if ( eerb->head == eerb->buffersize )
163 eerb->head = 0; /* wrap around */
164 if (eerb->head > eerb->buffersize) {
165 MESSAGE(KERN_ERR, "%s", "runaway buffer head.");
166 BUG();
167 }
168 }
169}
170
171/*
172 * needs to be called with bufferlock held
173 */
174static int
175dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data)
176{
177
178 unsigned long tailindex,localtail;
179 unsigned long rest, len, finalcount;
180 char *nextdata;
181
182 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
183 nextdata = data;
184 rest = finalcount;
185 while (rest > 0) {
186 tailindex = eerb->tail / PAGE_SIZE;
187 localtail = eerb->tail % PAGE_SIZE;
188 len = min(rest, (PAGE_SIZE - localtail));
189 memcpy(nextdata, eerb->buffer[tailindex]+localtail, len);
190 nextdata += len;
191 rest -= len;
192 eerb->tail += len;
193 if ( eerb->tail == eerb->buffersize )
194 eerb->tail = 0; /* wrap around */
195 if (eerb->tail > eerb->buffersize) {
196 MESSAGE(KERN_ERR, "%s", "runaway buffer tail.");
197 BUG();
198 }
199 }
200 return finalcount;
201}
202
203/*
204 * Whenever you want to write a blob of data to the internal buffer you
205 * have to start by using this function first. It will write the number
206 * of bytes that will be written to the buffer. If necessary it will remove
207 * old records to make room for the new one.
208 * needs to be called with bufferlock held
209 */
210static int
211dasd_eer_start_record(struct eerbuffer *eerb, int count)
212{
213 int tailcount;
214 if (count + sizeof(count) > eerb->buffersize)
215 return -ENOMEM;
216 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
217 if (eerb->residual > 0) {
218 eerb->tail += eerb->residual;
219 if (eerb->tail >= eerb->buffersize)
220 eerb->tail -= eerb->buffersize;
221 eerb->residual = -1;
222 }
223 dasd_eer_read_buffer(eerb, sizeof(tailcount),
224 (char*)(&tailcount));
225 eerb->tail += tailcount;
226 if (eerb->tail >= eerb->buffersize)
227 eerb->tail -= eerb->buffersize;
228 }
229 dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count));
230
231 return 0;
232};
233
234/*
235 * release pages that are not used anymore
236 */
237static void
238dasd_eer_free_buffer_pages(char **buf, int no_pages)
239{
240 int i;
241
242 for (i = 0; i < no_pages; ++i) {
243 free_page((unsigned long)buf[i]);
244 }
245}
246
247/*
248 * allocate a new set of memory pages
249 */
250static int
251dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
252{
253 int i;
254
255 for (i = 0; i < no_pages; ++i) {
256 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
257 if (!buf[i]) {
258 dasd_eer_free_buffer_pages(buf, i);
259 return -ENOMEM;
260 }
261 }
262 return 0;
263}
264
265/*
266 * empty the buffer by resetting head and tail
267 * In case there is a half read data blob in the buffer, we set residual
268 * to -1 to indicate that the remainder of the blob is lost.
269 */
270static void
271dasd_eer_purge_buffer(struct eerbuffer *eerb)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&bufferlock, flags);
276 if (eerb->residual > 0)
277 eerb->residual = -1;
278 eerb->tail=0;
279 eerb->head=0;
280 spin_unlock_irqrestore(&bufferlock, flags);
281}
282
283/*
284 * set the size of the buffer, newsize is the new number of pages to be used
285 * we don't try to copy any data back an forth, so any resize will also purge
286 * the buffer
287 */
288static int
289dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize)
290{
291 int i, oldcount, reuse;
292 char **new;
293 char **old;
294 unsigned long flags;
295
296 if (newsize < 1)
297 return -EINVAL;
298 if (eerb->buffer_page_count == newsize) {
299 /* documented behaviour is that any successfull invocation
300 * will purge all records */
301 dasd_eer_purge_buffer(eerb);
302 return 0;
303 }
304 new = kmalloc(newsize*sizeof(char*), GFP_KERNEL);
305 if (!new)
306 return -ENOMEM;
307
308 reuse=min(eerb->buffer_page_count, newsize);
309 for (i = 0; i < reuse; ++i) {
310 new[i] = eerb->buffer[i];
311 }
312 if (eerb->buffer_page_count < newsize) {
313 if (dasd_eer_allocate_buffer_pages(
314 &new[eerb->buffer_page_count],
315 newsize - eerb->buffer_page_count)) {
316 kfree(new);
317 return -ENOMEM;
318 }
319 }
320
321 spin_lock_irqsave(&bufferlock, flags);
322 old = eerb->buffer;
323 eerb->buffer = new;
324 if (eerb->residual > 0)
325 eerb->residual = -1;
326 eerb->tail = 0;
327 eerb->head = 0;
328 oldcount = eerb->buffer_page_count;
329 eerb->buffer_page_count = newsize;
330 spin_unlock_irqrestore(&bufferlock, flags);
331
332 if (oldcount > newsize) {
333 for (i = newsize; i < oldcount; ++i) {
334 free_page((unsigned long)old[i]);
335 }
336 }
337 kfree(old);
338
339 return 0;
340}
341
342
343/*****************************************************************************/
344/* The extended error reporting functionality */
345/*****************************************************************************/
346
347/*
348 * When a DASD device driver wants to report an error, it calls the
349 * function dasd_eer_write_trigger (via a notifier mechanism) and gives the
350 * respective trigger ID as parameter.
351 * Currently there are four kinds of triggers:
352 *
353 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
354 * DASD_EER_PPRCSUSPEND: PPRC was suspended
355 * DASD_EER_NOPATH: There is no path to the device left.
356 * DASD_EER_STATECHANGE: The state of the device has changed.
357 *
358 * For the first three triggers all required information can be supplied by
359 * the caller. For these triggers a record is written by the function
360 * dasd_eer_write_standard_trigger.
361 *
362 * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE
363 * trigger, we have to gather the necessary sense data first. We cannot queue
364 * the necessary SNSS (sense subsystem status) request immediatly, since we
365 * are likely to run in a deadlock situation. Instead, we schedule a
366 * work_struct that calls the function dasd_eer_sense_subsystem_status to
367 * create and start an SNSS request asynchronously.
368 *
369 * To avoid memory allocations at runtime, the necessary memory is allocated
370 * when the extended error reporting is enabled for a device (by
371 * dasd_eer_probe). There is one private eer data structure for each eer
372 * enabled DASD device. It contains memory for the work_struct, one SNSS cqr
373 * and a flags field that is used to coordinate the use of the cqr. The call
374 * to write a state change trigger can come in at any time, so we have one flag
375 * CQR_IN_USE that protects the cqr itself. When this flag indicates that the
376 * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a
377 * second request but sets the SNSS_REQUESTED flag instead.
378 *
379 * When the request is finished, the callback function dasd_eer_SNSS_cb
380 * is called. This function will invoke the function
381 * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also
382 * check the SNSS_REQUESTED flag and if it is set it will call
383 * dasd_eer_sense_subsystem_status again.
384 *
385 * To avoid race conditions during the handling of the lock, the flags must
386 * be protected by the snsslock.
387 */
388
389struct dasd_eer_private {
390 struct dasd_ccw_req *cqr;
391 unsigned long flags;
392 struct work_struct worker;
393};
394
395static void dasd_eer_destroy(struct dasd_device *device,
396 struct dasd_eer_private *eer);
397static int
398dasd_eer_write_trigger(struct dasd_eer_trigger *trigger);
399static void dasd_eer_sense_subsystem_status(void *data);
400static int dasd_eer_notify(struct notifier_block *self,
401 unsigned long action, void *data);
402
403struct workqueue_struct *dasd_eer_workqueue;
404
405#define SNSS_DATA_SIZE 44
406static spinlock_t snsslock = SPIN_LOCK_UNLOCKED;
407
408#define DASD_EER_BUSID_SIZE 10
409struct dasd_eer_header {
410 __u32 total_size;
411 __u32 trigger;
412 __u64 tv_sec;
413 __u64 tv_usec;
414 char busid[DASD_EER_BUSID_SIZE];
415} __attribute__ ((packed));
416
417static struct notifier_block dasd_eer_nb = {
418 .notifier_call = dasd_eer_notify,
419};
420
421/*
422 * flags for use with dasd_eer_private
423 */
424#define CQR_IN_USE 0
425#define SNSS_REQUESTED 1
426
427/*
428 * This function checks if extended error reporting is available for a given
429 * dasd_device. If yes, then it creates and returns a struct dasd_eer,
430 * otherwise it returns an -EPERM error pointer.
431 */
432struct dasd_eer_private *
433dasd_eer_probe(struct dasd_device *device)
434{
435 struct dasd_eer_private *private;
436
437 if (!(device && device->discipline
438 && !strcmp(device->discipline->name, "ECKD"))) {
439 return ERR_PTR(-EPERM);
440 }
441 /* allocate the private data structure */
442 private = (struct dasd_eer_private *)kmalloc(
443 sizeof(struct dasd_eer_private), GFP_KERNEL);
444 if (!private) {
445 return ERR_PTR(-ENOMEM);
446 }
447 INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status,
448 (void *)device);
449 private->cqr = dasd_kmalloc_request("ECKD",
450 1 /* SNSS */ ,
451 SNSS_DATA_SIZE ,
452 device);
453 if (!private->cqr) {
454 kfree(private);
455 return ERR_PTR(-ENOMEM);
456 }
457 private->flags = 0;
458 return private;
459};
460
461/*
462 * If our private SNSS request is queued, remove it from the
463 * dasd ccw queue so we can free the requests memory.
464 */
465static void
466dasd_eer_dequeue_SNSS_request(struct dasd_device *device,
467 struct dasd_eer_private *eer)
468{
469 struct list_head *lst, *nxt;
470 struct dasd_ccw_req *cqr, *erpcqr;
471 dasd_erp_fn_t erp_fn;
472
473 spin_lock_irq(get_ccwdev_lock(device->cdev));
474 list_for_each_safe(lst, nxt, &device->ccw_queue) {
475 cqr = list_entry(lst, struct dasd_ccw_req, list);
476 /* we are looking for two kinds or requests */
477 /* first kind: our SNSS request: */
478 if (cqr == eer->cqr) {
479 if (cqr->status == DASD_CQR_IN_IO)
480 device->discipline->term_IO(cqr);
481 list_del(&cqr->list);
482 break;
483 }
484 /* second kind: ERP requests for our SNSS request */
485 if (cqr->refers) {
486 /* If this erp request chain ends in our cqr, then */
487 /* cal the erp_postaction to clean it up */
488 erpcqr = cqr;
489 while (erpcqr->refers) {
490 erpcqr = erpcqr->refers;
491 }
492 if (erpcqr == eer->cqr) {
493 erp_fn = device->discipline->erp_postaction(
494 cqr);
495 erp_fn(cqr);
496 }
497 continue;
498 }
499 }
500 spin_unlock_irq(get_ccwdev_lock(device->cdev));
501}
502
503/*
504 * This function dismantles a struct dasd_eer that was created by
505 * dasd_eer_probe. Since we want to free our private data structure,
506 * we must make sure that the memory is not in use anymore.
507 * We have to flush the work queue and remove a possible SNSS request
508 * from the dasd queue.
509 */
510static void
511dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer)
512{
513 flush_workqueue(dasd_eer_workqueue);
514 dasd_eer_dequeue_SNSS_request(device, eer);
515 dasd_kfree_request(eer->cqr, device);
516 kfree(eer);
517};
518
519/*
520 * enable the extended error reporting for a particular device
521 */
522static int
523dasd_eer_enable_on_device(struct dasd_device *device)
524{
525 void *eer;
526 if (!device)
527 return -ENODEV;
528 if (device->eer)
529 return 0;
530 if (!try_module_get(THIS_MODULE)) {
531 return -EINVAL;
532 }
533 eer = (void *)dasd_eer_probe(device);
534 if (IS_ERR(eer)) {
535 module_put(THIS_MODULE);
536 return PTR_ERR(eer);
537 }
538 device->eer = eer;
539 return 0;
540}
541
542/*
543 * enable the extended error reporting for a particular device
544 */
545static int
546dasd_eer_disable_on_device(struct dasd_device *device)
547{
548 struct dasd_eer_private *eer = device->eer;
549
550 if (!device)
551 return -ENODEV;
552 if (!device->eer)
553 return 0;
554 device->eer = NULL;
555 dasd_eer_destroy(device,eer);
556 module_put(THIS_MODULE);
557
558 return 0;
559}
560
561/*
562 * Set extended error reporting (eer)
563 * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
564 */
565static int
566dasd_ioctl_set_eer(struct block_device *bdev, int no, long args)
567{
568 struct dasd_device *device;
569 int intval;
570
571 if (!capable(CAP_SYS_ADMIN))
572 return -EACCES;
573 if (bdev != bdev->bd_contains)
574 /* Error-reporting is not allowed for partitions */
575 return -EINVAL;
576 if (get_user(intval, (int __user *) args))
577 return -EFAULT;
578 device = bdev->bd_disk->private_data;
579 if (device == NULL)
580 return -ENODEV;
581
582 intval = (intval != 0);
583 DEV_MESSAGE (KERN_DEBUG, device,
584 "set eer on device to %d", intval);
585 if (intval)
586 return dasd_eer_enable_on_device(device);
587 else
588 return dasd_eer_disable_on_device(device);
589}
590
591/*
592 * Get value of extended error reporting.
593 * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
594 */
595static int
596dasd_ioctl_get_eer(struct block_device *bdev, int no, long args)
597{
598 struct dasd_device *device;
599
600 device = bdev->bd_disk->private_data;
601 if (device == NULL)
602 return -ENODEV;
603 return put_user((device->eer != NULL), (int __user *) args);
604}
605
606/*
607 * The following function can be used for those triggers that have
608 * all necessary data available when the function is called.
609 * If the parameter cqr is not NULL, the chain of requests will be searched
610 * for valid sense data, and all valid sense data sets will be added to
611 * the triggers data.
612 */
613static int
614dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device,
615 struct dasd_ccw_req *cqr)
616{
617 struct dasd_ccw_req *temp_cqr;
618 int data_size;
619 struct timeval tv;
620 struct dasd_eer_header header;
621 unsigned long flags;
622 struct eerbuffer *eerb;
623
624 /* go through cqr chain and count the valid sense data sets */
625 temp_cqr = cqr;
626 data_size = 0;
627 while (temp_cqr) {
628 if (temp_cqr->irb.esw.esw0.erw.cons)
629 data_size += 32;
630 temp_cqr = temp_cqr->refers;
631 }
632
633 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
634 header.trigger = trigger;
635 do_gettimeofday(&tv);
636 header.tv_sec = tv.tv_sec;
637 header.tv_usec = tv.tv_usec;
638 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
639
640 spin_lock_irqsave(&bufferlock, flags);
641 list_for_each_entry(eerb, &bufferlist, list) {
642 dasd_eer_start_record(eerb, header.total_size);
643 dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header));
644 temp_cqr = cqr;
645 while (temp_cqr) {
646 if (temp_cqr->irb.esw.esw0.erw.cons)
647 dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw);
648 temp_cqr = temp_cqr->refers;
649 }
650 dasd_eer_write_buffer(eerb, 4,"EOR");
651 }
652 spin_unlock_irqrestore(&bufferlock, flags);
653
654 wake_up_interruptible(&dasd_eer_read_wait_queue);
655
656 return 0;
657}
658
659/*
660 * This function writes a DASD_EER_STATECHANGE trigger.
661 */
662static void
663dasd_eer_write_SNSS_trigger(struct dasd_device *device,
664 struct dasd_ccw_req *cqr)
665{
666 int data_size;
667 int snss_rc;
668 struct timeval tv;
669 struct dasd_eer_header header;
670 unsigned long flags;
671 struct eerbuffer *eerb;
672
673 snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
674 if (snss_rc)
675 data_size = 0;
676 else
677 data_size = SNSS_DATA_SIZE;
678
679 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
680 header.trigger = DASD_EER_STATECHANGE;
681 do_gettimeofday(&tv);
682 header.tv_sec = tv.tv_sec;
683 header.tv_usec = tv.tv_usec;
684 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
685
686 spin_lock_irqsave(&bufferlock, flags);
687 list_for_each_entry(eerb, &bufferlist, list) {
688 dasd_eer_start_record(eerb, header.total_size);
689 dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header));
690 if (!snss_rc)
691 dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data);
692 dasd_eer_write_buffer(eerb, 4,"EOR");
693 }
694 spin_unlock_irqrestore(&bufferlock, flags);
695
696 wake_up_interruptible(&dasd_eer_read_wait_queue);
697}
698
699/*
700 * callback function for use with SNSS request
701 */
702static void
703dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data)
704{
705 struct dasd_device *device;
706 struct dasd_eer_private *private;
707 unsigned long irqflags;
708
709 device = (struct dasd_device *)data;
710 private = (struct dasd_eer_private *)device->eer;
711 dasd_eer_write_SNSS_trigger(device, cqr);
712 spin_lock_irqsave(&snsslock, irqflags);
713 if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) {
714 clear_bit(CQR_IN_USE, &private->flags);
715 spin_unlock_irqrestore(&snsslock, irqflags);
716 return;
717 };
718 clear_bit(CQR_IN_USE, &private->flags);
719 spin_unlock_irqrestore(&snsslock, irqflags);
720 dasd_eer_sense_subsystem_status(device);
721 return;
722}
723
724/*
725 * clean a used cqr before using it again
726 */
727static void
728dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr)
729{
730 struct ccw1 *cpaddr = cqr->cpaddr;
731 void *data = cqr->data;
732
733 memset(cqr, 0, sizeof(struct dasd_ccw_req));
734 memset(cpaddr, 0, sizeof(struct ccw1));
735 memset(data, 0, SNSS_DATA_SIZE);
736 cqr->cpaddr = cpaddr;
737 cqr->data = data;
738 strncpy((char *) &cqr->magic, "ECKD", 4);
739 ASCEBC((char *) &cqr->magic, 4);
740 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
741}
742
743/*
744 * build and start an SNSS request
745 * This function is called from a work queue so we have to
746 * pass the dasd_device pointer as a void pointer.
747 */
748static void
749dasd_eer_sense_subsystem_status(void *data)
750{
751 struct dasd_device *device;
752 struct dasd_eer_private *private;
753 struct dasd_ccw_req *cqr;
754 struct ccw1 *ccw;
755 unsigned long irqflags;
756
757 device = (struct dasd_device *)data;
758 private = (struct dasd_eer_private *)device->eer;
759 if (!private) /* device not eer enabled any more */
760 return;
761 cqr = private->cqr;
762 spin_lock_irqsave(&snsslock, irqflags);
763 if(test_and_set_bit(CQR_IN_USE, &private->flags)) {
764 set_bit(SNSS_REQUESTED, &private->flags);
765 spin_unlock_irqrestore(&snsslock, irqflags);
766 return;
767 };
768 spin_unlock_irqrestore(&snsslock, irqflags);
769 dasd_eer_clean_SNSS_request(cqr);
770 cqr->device = device;
771 cqr->retries = 255;
772 cqr->expires = 10 * HZ;
773
774 ccw = cqr->cpaddr;
775 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
776 ccw->count = SNSS_DATA_SIZE;
777 ccw->flags = 0;
778 ccw->cda = (__u32)(addr_t)cqr->data;
779
780 cqr->buildclk = get_clock();
781 cqr->status = DASD_CQR_FILLED;
782 cqr->callback = dasd_eer_SNSS_cb;
783 cqr->callback_data = (void *)device;
784 dasd_add_request_head(cqr);
785
786 return;
787}
788
789/*
790 * This function is called for all triggers. It calls the appropriate
791 * function that writes the actual trigger records.
792 */
793static int
794dasd_eer_write_trigger(struct dasd_eer_trigger *trigger)
795{
796 int rc;
797 struct dasd_eer_private *private = trigger->device->eer;
798
799 switch (trigger->id) {
800 case DASD_EER_FATALERROR:
801 case DASD_EER_PPRCSUSPEND:
802 rc = dasd_eer_write_standard_trigger(
803 trigger->id, trigger->device, trigger->cqr);
804 break;
805 case DASD_EER_NOPATH:
806 rc = dasd_eer_write_standard_trigger(
807 trigger->id, trigger->device, NULL);
808 break;
809 case DASD_EER_STATECHANGE:
810 if (queue_work(dasd_eer_workqueue, &private->worker)) {
811 rc=0;
812 } else {
813 /* If the work_struct was already queued, it can't
814 * be queued again. But this is OK since we don't
815 * need to have it queued twice.
816 */
817 rc = -EBUSY;
818 }
819 break;
820 default: /* unknown trigger, so we write it without any sense data */
821 rc = dasd_eer_write_standard_trigger(
822 trigger->id, trigger->device, NULL);
823 break;
824 }
825 return rc;
826}
827
828/*
829 * This function is registered with the dasd device driver and gets called
830 * for all dasd eer notifications.
831 */
832static int dasd_eer_notify(struct notifier_block *self,
833 unsigned long action, void *data)
834{
835 switch (action) {
836 case DASD_EER_DISABLE:
837 dasd_eer_disable_on_device((struct dasd_device *)data);
838 break;
839 case DASD_EER_TRIGGER:
840 dasd_eer_write_trigger((struct dasd_eer_trigger *)data);
841 break;
842 }
843 return NOTIFY_OK;
844}
845
846
847/*****************************************************************************/
848/* the device operations */
849/*****************************************************************************/
850
851/*
852 * On the one side we need a lock to access our internal buffer, on the
853 * other side a copy_to_user can sleep. So we need to copy the data we have
854 * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
855 */
856static char readbuffer[PAGE_SIZE];
857DECLARE_MUTEX(readbuffer_mutex);
858
859
860static int
861dasd_eer_open(struct inode *inp, struct file *filp)
862{
863 struct eerbuffer *eerb;
864 unsigned long flags;
865
866 eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL);
867 eerb->head = 0;
868 eerb->tail = 0;
869 eerb->residual = 0;
870 eerb->buffer_page_count = 1;
871 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
872 eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*),
873 GFP_KERNEL);
874 if (!eerb->buffer)
875 return -ENOMEM;
876 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
877 eerb->buffer_page_count)) {
878 kfree(eerb->buffer);
879 return -ENOMEM;
880 }
881 filp->private_data = eerb;
882 spin_lock_irqsave(&bufferlock, flags);
883 list_add(&eerb->list, &bufferlist);
884 spin_unlock_irqrestore(&bufferlock, flags);
885
886 return nonseekable_open(inp,filp);
887}
888
889static int
890dasd_eer_close(struct inode *inp, struct file *filp)
891{
892 struct eerbuffer *eerb;
893 unsigned long flags;
894
895 eerb = (struct eerbuffer *)filp->private_data;
896 spin_lock_irqsave(&bufferlock, flags);
897 list_del(&eerb->list);
898 spin_unlock_irqrestore(&bufferlock, flags);
899 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
900 kfree(eerb->buffer);
901 kfree(eerb);
902
903 return 0;
904}
905
906static long
907dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
908{
909 int intval;
910 struct eerbuffer *eerb;
911
912 eerb = (struct eerbuffer *)filp->private_data;
913 switch (cmd) {
914 case DASD_EER_PURGE:
915 dasd_eer_purge_buffer(eerb);
916 return 0;
917 case DASD_EER_SETBUFSIZE:
918 if (get_user(intval, (int __user *)arg))
919 return -EFAULT;
920 return dasd_eer_resize_buffer(eerb, intval);
921 default:
922 return -ENOIOCTLCMD;
923 }
924}
925
926static ssize_t
927dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
928{
929 int tc,rc;
930 int tailcount,effective_count;
931 unsigned long flags;
932 struct eerbuffer *eerb;
933
934 eerb = (struct eerbuffer *)filp->private_data;
935 if(down_interruptible(&readbuffer_mutex))
936 return -ERESTARTSYS;
937
938 spin_lock_irqsave(&bufferlock, flags);
939
940 if (eerb->residual < 0) { /* the remainder of this record */
941 /* has been deleted */
942 eerb->residual = 0;
943 spin_unlock_irqrestore(&bufferlock, flags);
944 up(&readbuffer_mutex);
945 return -EIO;
946 } else if (eerb->residual > 0) {
947 /* OK we still have a second half of a record to deliver */
948 effective_count = min(eerb->residual, (int)count);
949 eerb->residual -= effective_count;
950 } else {
951 tc = 0;
952 while (!tc) {
953 tc = dasd_eer_read_buffer(eerb,
954 sizeof(tailcount), (char*)(&tailcount));
955 if (!tc) {
956 /* no data available */
957 spin_unlock_irqrestore(&bufferlock, flags);
958 up(&readbuffer_mutex);
959 if (filp->f_flags & O_NONBLOCK)
960 return -EAGAIN;
961 rc = wait_event_interruptible(
962 dasd_eer_read_wait_queue,
963 eerb->head != eerb->tail);
964 if (rc) {
965 return rc;
966 }
967 if(down_interruptible(&readbuffer_mutex))
968 return -ERESTARTSYS;
969 spin_lock_irqsave(&bufferlock, flags);
970 }
971 }
972 WARN_ON(tc != sizeof(tailcount));
973 effective_count = min(tailcount,(int)count);
974 eerb->residual = tailcount - effective_count;
975 }
976
977 tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer);
978 WARN_ON(tc != effective_count);
979
980 spin_unlock_irqrestore(&bufferlock, flags);
981
982 if (copy_to_user(buf, readbuffer, effective_count)) {
983 up(&readbuffer_mutex);
984 return -EFAULT;
985 }
986
987 up(&readbuffer_mutex);
988 return effective_count;
989}
990
991static unsigned int
992dasd_eer_poll (struct file *filp, poll_table *ptable)
993{
994 unsigned int mask;
995 unsigned long flags;
996 struct eerbuffer *eerb;
997
998 eerb = (struct eerbuffer *)filp->private_data;
999 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
1000 spin_lock_irqsave(&bufferlock, flags);
1001 if (eerb->head != eerb->tail)
1002 mask = POLLIN | POLLRDNORM ;
1003 else
1004 mask = 0;
1005 spin_unlock_irqrestore(&bufferlock, flags);
1006 return mask;
1007}
1008
1009static struct file_operations dasd_eer_fops = {
1010 .open = &dasd_eer_open,
1011 .release = &dasd_eer_close,
1012 .unlocked_ioctl = &dasd_eer_ioctl,
1013 .compat_ioctl = &dasd_eer_ioctl,
1014 .read = &dasd_eer_read,
1015 .poll = &dasd_eer_poll,
1016 .owner = THIS_MODULE,
1017};
1018
1019static struct miscdevice dasd_eer_dev = {
1020 .minor = MISC_DYNAMIC_MINOR,
1021 .name = "dasd_eer",
1022 .fops = &dasd_eer_fops,
1023};
1024
1025
1026/*****************************************************************************/
1027/* Init and exit */
1028/*****************************************************************************/
1029
1030static int
1031__init dasd_eer_init(void)
1032{
1033 int rc;
1034
1035 dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer");
1036 if (!dasd_eer_workqueue) {
1037 MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not "
1038 "create workqueue \n");
1039 rc = -ENOMEM;
1040 goto out;
1041 }
1042
1043 rc = dasd_register_eer_notifier(&dasd_eer_nb);
1044 if (rc) {
1045 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1046 "register error reporting");
1047 goto queue;
1048 }
1049
1050 dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer);
1051 dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer);
1052
1053 /* we don't need our own character device,
1054 * so we just register as misc device */
1055 rc = misc_register(&dasd_eer_dev);
1056 if (rc) {
1057 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1058 "register misc device");
1059 goto unregister;
1060 }
1061
1062 return 0;
1063
1064unregister:
1065 dasd_unregister_eer_notifier(&dasd_eer_nb);
1066 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1067 dasd_ioctl_set_eer);
1068 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1069 dasd_ioctl_get_eer);
1070queue:
1071 destroy_workqueue(dasd_eer_workqueue);
1072out:
1073 return rc;
1074
1075}
1076module_init(dasd_eer_init);
1077
1078static void
1079__exit dasd_eer_exit(void)
1080{
1081 dasd_unregister_eer_notifier(&dasd_eer_nb);
1082 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1083 dasd_ioctl_set_eer);
1084 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1085 dasd_ioctl_get_eer);
1086 destroy_workqueue(dasd_eer_workqueue);
1087
1088 WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
1089}
1090module_exit(dasd_eer_exit);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d1b08fa13fd2..0592354cc604 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -275,34 +275,6 @@ struct dasd_discipline {
275 275
276extern struct dasd_discipline *dasd_diag_discipline_pointer; 276extern struct dasd_discipline *dasd_diag_discipline_pointer;
277 277
278
279/*
280 * Notification numbers for extended error reporting notifications:
281 * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
282 * eer pointer) is freed. The error reporting module needs to do all necessary
283 * cleanup steps.
284 * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
285 */
286#define DASD_EER_DISABLE 0
287#define DASD_EER_TRIGGER 1
288
289/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
290#define DASD_EER_FATALERROR 1
291#define DASD_EER_NOPATH 2
292#define DASD_EER_STATECHANGE 3
293#define DASD_EER_PPRCSUSPEND 4
294
295/*
296 * The dasd_eer_trigger structure contains all data that we need to send
297 * along with an DASD_EER_TRIGGER notification.
298 */
299struct dasd_eer_trigger {
300 unsigned int id;
301 struct dasd_device *device;
302 struct dasd_ccw_req *cqr;
303};
304
305
306struct dasd_device { 278struct dasd_device {
307 /* Block device stuff. */ 279 /* Block device stuff. */
308 struct gendisk *gdp; 280 struct gendisk *gdp;
@@ -316,11 +288,9 @@ struct dasd_device {
316 unsigned long flags; /* per device flags */ 288 unsigned long flags; /* per device flags */
317 unsigned short features; /* copy of devmap-features (read-only!) */ 289 unsigned short features; /* copy of devmap-features (read-only!) */
318 290
319 /* extended error reporting stuff (eer) */
320 void *eer;
321
322 /* Device discipline stuff. */ 291 /* Device discipline stuff. */
323 struct dasd_discipline *discipline; 292 struct dasd_discipline *discipline;
293 struct dasd_discipline *base_discipline;
324 char *private; 294 char *private;
325 295
326 /* Device state and target state. */ 296 /* Device state and target state. */
@@ -519,12 +489,6 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
519int dasd_generic_set_offline (struct ccw_device *cdev); 489int dasd_generic_set_offline (struct ccw_device *cdev);
520int dasd_generic_notify(struct ccw_device *, int); 490int dasd_generic_notify(struct ccw_device *, int);
521void dasd_generic_auto_online (struct ccw_driver *); 491void dasd_generic_auto_online (struct ccw_driver *);
522int dasd_register_eer_notifier(struct notifier_block *);
523int dasd_unregister_eer_notifier(struct notifier_block *);
524void dasd_write_eer_trigger(unsigned int , struct dasd_device *,
525 struct dasd_ccw_req *);
526
527
528 492
529/* externals in dasd_devmap.c */ 493/* externals in dasd_devmap.c */
530extern int dasd_max_devindex; 494extern int dasd_max_devindex;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 45ce032772f4..9ed37dc9a1b0 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -165,8 +165,13 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
165 q_no = q->q_no; 165 q_no = q->q_no;
166 if(!q->is_input_q) 166 if(!q->is_input_q)
167 q_no += irq->no_input_qs; 167 q_no += irq->no_input_qs;
168again:
168 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); 169 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
169 rc = qdio_check_ccq(q, ccq); 170 rc = qdio_check_ccq(q, ccq);
171 if (rc == 1) {
172 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
173 goto again;
174 }
170 if (rc < 0) { 175 if (rc < 0) {
171 QDIO_DBF_TEXT2(1,trace,"eqberr"); 176 QDIO_DBF_TEXT2(1,trace,"eqberr");
172 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); 177 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
@@ -195,8 +200,13 @@ qdio_do_sqbs(struct qdio_q *q, unsigned char state,
195 q_no = q->q_no; 200 q_no = q->q_no;
196 if(!q->is_input_q) 201 if(!q->is_input_q)
197 q_no += irq->no_input_qs; 202 q_no += irq->no_input_qs;
203again:
198 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); 204 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
199 rc = qdio_check_ccq(q, ccq); 205 rc = qdio_check_ccq(q, ccq);
206 if (rc == 1) {
207 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
208 goto again;
209 }
200 if (rc < 0) { 210 if (rc < 0) {
201 QDIO_DBF_TEXT3(1,trace,"sqberr"); 211 QDIO_DBF_TEXT3(1,trace,"sqberr");
202 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); 212 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
@@ -1187,8 +1197,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1187 1197
1188 if (!no_used) 1198 if (!no_used)
1189 return 1; 1199 return 1;
1190 1200 if (!q->siga_sync && !irq->is_qebsm)
1191 if (!q->siga_sync)
1192 /* we'll check for more primed buffers in qeth_stop_polling */ 1201 /* we'll check for more primed buffers in qeth_stop_polling */
1193 return 0; 1202 return 0;
1194 if (irq->is_qebsm) { 1203 if (irq->is_qebsm) {
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index f6900538be90..87a8c3d2072c 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -2068,14 +2068,12 @@ static int esp_reset(struct scsi_cmnd *SCptr)
2068{ 2068{
2069 struct esp *esp = (struct esp *) SCptr->device->host->hostdata; 2069 struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
2070 2070
2071 spin_lock_irq(esp->ehost->host_lock);
2071 (void) esp_do_resetbus(esp); 2072 (void) esp_do_resetbus(esp);
2072
2073 spin_unlock_irq(esp->ehost->host_lock); 2073 spin_unlock_irq(esp->ehost->host_lock);
2074 2074
2075 wait_event(esp->reset_queue, (esp->resetting_bus == 0)); 2075 wait_event(esp->reset_queue, (esp->resetting_bus == 0));
2076 2076
2077 spin_lock_irq(esp->ehost->host_lock);
2078
2079 return SUCCESS; 2077 return SUCCESS;
2080} 2078}
2081 2079
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 7ddd5a69352a..5f1d7580218d 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2514,7 +2514,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2514 assert(sg != NULL); 2514 assert(sg != NULL);
2515 2515
2516 if (qc->flags & ATA_QCFLAG_SINGLE) 2516 if (qc->flags & ATA_QCFLAG_SINGLE)
2517 assert(qc->n_elem == 1); 2517 assert(qc->n_elem <= 1);
2518 2518
2519 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2519 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2520 2520
@@ -2537,7 +2537,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2537 kunmap_atomic(addr, KM_IRQ0); 2537 kunmap_atomic(addr, KM_IRQ0);
2538 } 2538 }
2539 } else { 2539 } else {
2540 if (sg_dma_len(&sg[0]) > 0) 2540 if (qc->n_elem)
2541 dma_unmap_single(ap->host_set->dev, 2541 dma_unmap_single(ap->host_set->dev,
2542 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), 2542 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2543 dir); 2543 dir);
@@ -2570,7 +2570,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
2570 unsigned int idx; 2570 unsigned int idx;
2571 2571
2572 assert(qc->__sg != NULL); 2572 assert(qc->__sg != NULL);
2573 assert(qc->n_elem > 0); 2573 assert(qc->n_elem > 0 || qc->pad_len > 0);
2574 2574
2575 idx = 0; 2575 idx = 0;
2576 ata_for_each_sg(sg, qc) { 2576 ata_for_each_sg(sg, qc) {
@@ -2715,6 +2715,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2715 int dir = qc->dma_dir; 2715 int dir = qc->dma_dir;
2716 struct scatterlist *sg = qc->__sg; 2716 struct scatterlist *sg = qc->__sg;
2717 dma_addr_t dma_address; 2717 dma_addr_t dma_address;
2718 int trim_sg = 0;
2718 2719
2719 /* we must lengthen transfers to end on a 32-bit boundary */ 2720 /* we must lengthen transfers to end on a 32-bit boundary */
2720 qc->pad_len = sg->length & 3; 2721 qc->pad_len = sg->length & 3;
@@ -2734,13 +2735,15 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2734 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2735 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2735 /* trim sg */ 2736 /* trim sg */
2736 sg->length -= qc->pad_len; 2737 sg->length -= qc->pad_len;
2738 if (sg->length == 0)
2739 trim_sg = 1;
2737 2740
2738 DPRINTK("padding done, sg->length=%u pad_len=%u\n", 2741 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2739 sg->length, qc->pad_len); 2742 sg->length, qc->pad_len);
2740 } 2743 }
2741 2744
2742 if (!sg->length) { 2745 if (trim_sg) {
2743 sg_dma_address(sg) = 0; 2746 qc->n_elem--;
2744 goto skip_map; 2747 goto skip_map;
2745 } 2748 }
2746 2749
@@ -2753,9 +2756,9 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2753 } 2756 }
2754 2757
2755 sg_dma_address(sg) = dma_address; 2758 sg_dma_address(sg) = dma_address;
2756skip_map:
2757 sg_dma_len(sg) = sg->length; 2759 sg_dma_len(sg) = sg->length;
2758 2760
2761skip_map:
2759 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 2762 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2760 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 2763 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2761 2764
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index de05e2883f9c..80480f0fb2b8 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -277,7 +277,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
277 u8 *prd = pp->pkt + QS_CPB_BYTES; 277 u8 *prd = pp->pkt + QS_CPB_BYTES;
278 278
279 assert(qc->__sg != NULL); 279 assert(qc->__sg != NULL);
280 assert(qc->n_elem > 0); 280 assert(qc->n_elem > 0 || qc->pad_len > 0);
281 281
282 nelem = 0; 282 nelem = 0;
283 ata_for_each_sg(sg, qc) { 283 ata_for_each_sg(sg, qc) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 791c4dc550ae..94f5e8ed83a7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -90,7 +90,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)
90 int value; 90 int value;
91 struct spi_driver *drv = to_spi_driver(dev->driver); 91 struct spi_driver *drv = to_spi_driver(dev->driver);
92 92
93 if (!drv->suspend) 93 if (!drv || !drv->suspend)
94 return 0; 94 return 0;
95 95
96 /* suspend will stop irqs and dma; no more i/o */ 96 /* suspend will stop irqs and dma; no more i/o */
@@ -105,7 +105,7 @@ static int spi_resume(struct device *dev)
105 int value; 105 int value;
106 struct spi_driver *drv = to_spi_driver(dev->driver); 106 struct spi_driver *drv = to_spi_driver(dev->driver);
107 107
108 if (!drv->resume) 108 if (!drv || !drv->resume)
109 return 0; 109 return 0;
110 110
111 /* resume may restart the i/o queue */ 111 /* resume may restart the i/o queue */
@@ -449,7 +449,6 @@ void spi_unregister_master(struct spi_master *master)
449{ 449{
450 (void) device_for_each_child(master->cdev.dev, NULL, __unregister); 450 (void) device_for_each_child(master->cdev.dev, NULL, __unregister);
451 class_device_unregister(&master->cdev); 451 class_device_unregister(&master->cdev);
452 master->cdev.dev = NULL;
453} 452}
454EXPORT_SYMBOL_GPL(spi_unregister_master); 453EXPORT_SYMBOL_GPL(spi_unregister_master);
455 454
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 556895e99645..1f8d805c61e5 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1321,8 +1321,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
1321 mdelay( 15); 1321 mdelay( 15);
1322} 1322}
1323 1323
1324#ifdef CONFIG_PPC_OF
1325
1326static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo) 1324static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
1327{ 1325{
1328 u32 tmp, tmp2; 1326 u32 tmp, tmp2;
@@ -1836,6 +1834,8 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
1836 radeon_pm_m10_enable_lvds_spread_spectrum(rinfo); 1834 radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
1837} 1835}
1838 1836
1837#ifdef CONFIG_PPC_OF
1838
1839static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo) 1839static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
1840{ 1840{
1841 OUTREG(MC_CNTL, rinfo->save_regs[46]); 1841 OUTREG(MC_CNTL, rinfo->save_regs[46]);
@@ -2728,13 +2728,23 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2728 printk("radeonfb: Dynamic Clock Power Management disabled\n"); 2728 printk("radeonfb: Dynamic Clock Power Management disabled\n");
2729 } 2729 }
2730 2730
2731#if defined(CONFIG_PM)
2731 /* Check if we can power manage on suspend/resume. We can do 2732 /* Check if we can power manage on suspend/resume. We can do
2732 * D2 on M6, M7 and M9, and we can resume from D3 cold a few other 2733 * D2 on M6, M7 and M9, and we can resume from D3 cold a few other
2733 * "Mac" cards, but that's all. We need more infos about what the 2734 * "Mac" cards, but that's all. We need more infos about what the
2734 * BIOS does tho. Right now, all this PM stuff is pmac-only for that 2735 * BIOS does tho. Right now, all this PM stuff is pmac-only for that
2735 * reason. --BenH 2736 * reason. --BenH
2736 */ 2737 */
2737#if defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) 2738 /* Special case for Samsung P35 laptops
2739 */
2740 if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) &&
2741 (rinfo->pdev->device == PCI_CHIP_RV350_NP) &&
2742 (rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) &&
2743 (rinfo->pdev->subsystem_device == 0xc00c)) {
2744 rinfo->reinit_func = radeon_reinitialize_M10;
2745 rinfo->pm_mode |= radeon_pm_off;
2746 }
2747#if defined(CONFIG_PPC_PMAC)
2738 if (_machine == _MACH_Pmac && rinfo->of_node) { 2748 if (_machine == _MACH_Pmac && rinfo->of_node) {
2739 if (rinfo->is_mobility && rinfo->pm_reg && 2749 if (rinfo->is_mobility && rinfo->pm_reg &&
2740 rinfo->family <= CHIP_FAMILY_RV250) 2750 rinfo->family <= CHIP_FAMILY_RV250)
@@ -2778,7 +2788,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2778 OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000); 2788 OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000);
2779#endif 2789#endif
2780 } 2790 }
2781#endif /* defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) */ 2791#endif /* defined(CONFIG_PPC_PMAC) */
2792#endif /* defined(CONFIG_PM) */
2782} 2793}
2783 2794
2784void radeonfb_pm_exit(struct radeonfb_info *rinfo) 2795void radeonfb_pm_exit(struct radeonfb_info *rinfo)
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 5250c428fc1f..ef3386549140 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -66,7 +66,7 @@ static match_table_t tokens = {
66 {Opt_afid, "afid=%u"}, 66 {Opt_afid, "afid=%u"},
67 {Opt_rfdno, "rfdno=%u"}, 67 {Opt_rfdno, "rfdno=%u"},
68 {Opt_wfdno, "wfdno=%u"}, 68 {Opt_wfdno, "wfdno=%u"},
69 {Opt_debug, "debug=%u"}, 69 {Opt_debug, "debug=%x"},
70 {Opt_name, "name=%s"}, 70 {Opt_name, "name=%s"},
71 {Opt_remotename, "aname=%s"}, 71 {Opt_remotename, "aname=%s"},
72 {Opt_unix, "proto=unix"}, 72 {Opt_unix, "proto=unix"},
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 38ab9f67c5f4..9d7bbd225eff 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1076,13 +1076,14 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
1076 cifs_small_buf_release(iov[0].iov_base); 1076 cifs_small_buf_release(iov[0].iov_base);
1077 else if(resp_buf_type == CIFS_LARGE_BUFFER) 1077 else if(resp_buf_type == CIFS_LARGE_BUFFER)
1078 cifs_buf_release(iov[0].iov_base); 1078 cifs_buf_release(iov[0].iov_base);
1079 } else /* return buffer to caller to free */ /* BB FIXME how do we tell caller if it is not a large buffer */ { 1079 } else if(resp_buf_type != CIFS_NO_BUFFER) {
1080 *buf = iov[0].iov_base; 1080 /* return buffer to caller to free */
1081 *buf = iov[0].iov_base;
1081 if(resp_buf_type == CIFS_SMALL_BUFFER) 1082 if(resp_buf_type == CIFS_SMALL_BUFFER)
1082 *pbuf_type = CIFS_SMALL_BUFFER; 1083 *pbuf_type = CIFS_SMALL_BUFFER;
1083 else if(resp_buf_type == CIFS_LARGE_BUFFER) 1084 else if(resp_buf_type == CIFS_LARGE_BUFFER)
1084 *pbuf_type = CIFS_LARGE_BUFFER; 1085 *pbuf_type = CIFS_LARGE_BUFFER;
1085 } 1086 } /* else no valid buffer on return - leave as null */
1086 1087
1087 /* Note: On -EAGAIN error only caller can retry on handle based calls 1088 /* Note: On -EAGAIN error only caller can retry on handle based calls
1088 since file handle passed in no longer valid */ 1089 since file handle passed in no longer valid */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0e1560ac5ad7..16535b510a96 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1795,10 +1795,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
1795 conjunction with 52K kvec constraint on arch with 4K 1795 conjunction with 52K kvec constraint on arch with 4K
1796 page size */ 1796 page size */
1797 1797
1798 if(cifs_sb->rsize < PAGE_CACHE_SIZE) { 1798 if(cifs_sb->rsize < 2048) {
1799 cifs_sb->rsize = PAGE_CACHE_SIZE; 1799 cifs_sb->rsize = 2048;
1800 /* Windows ME does this */ 1800 /* Windows ME may prefer this */
1801 cFYI(1,("Attempt to set readsize for mount to less than one page (4096)")); 1801 cFYI(1,("readsize set to minimum 2048"));
1802 } 1802 }
1803 cifs_sb->mnt_uid = volume_info.linux_uid; 1803 cifs_sb->mnt_uid = volume_info.linux_uid;
1804 cifs_sb->mnt_gid = volume_info.linux_gid; 1804 cifs_sb->mnt_gid = volume_info.linux_gid;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6573f31f1fd9..075d3e945602 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -204,10 +204,6 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
204 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); 204 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
205 if (!root_inode) 205 if (!root_inode)
206 goto out_no_root; 206 goto out_no_root;
207 /*
208 * Fixup the root inode's nlink value
209 */
210 root_inode->i_nlink += nr_processes();
211 root_inode->i_uid = 0; 207 root_inode->i_uid = 0;
212 root_inode->i_gid = 0; 208 root_inode->i_gid = 0;
213 s->s_root = d_alloc_root(root_inode); 209 s->s_root = d_alloc_root(root_inode);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 68896283c8ae..c3fd3611112f 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -80,16 +80,16 @@ void __init proc_root_init(void)
80 proc_bus = proc_mkdir("bus", NULL); 80 proc_bus = proc_mkdir("bus", NULL);
81} 81}
82 82
83static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) 83static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
84)
84{ 85{
85 /* 86 generic_fillattr(dentry->d_inode, stat);
86 * nr_threads is actually protected by the tasklist_lock; 87 stat->nlink = proc_root.nlink + nr_processes();
87 * however, it's conventional to do reads, especially for 88 return 0;
88 * reporting, without any locking whatsoever. 89}
89 */
90 if (dir->i_ino == PROC_ROOT_INO) /* check for safety... */
91 dir->i_nlink = proc_root.nlink + nr_threads;
92 90
91static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
92{
93 if (!proc_lookup(dir, dentry, nd)) { 93 if (!proc_lookup(dir, dentry, nd)) {
94 return NULL; 94 return NULL;
95 } 95 }
@@ -134,6 +134,7 @@ static struct file_operations proc_root_operations = {
134 */ 134 */
135static struct inode_operations proc_root_inode_operations = { 135static struct inode_operations proc_root_inode_operations = {
136 .lookup = proc_root_lookup, 136 .lookup = proc_root_lookup,
137 .getattr = proc_root_getattr,
137}; 138};
138 139
139/* 140/*
diff --git a/fs/super.c b/fs/super.c
index 30294218fa63..e20b5580afd5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -666,6 +666,16 @@ static int test_bdev_super(struct super_block *s, void *data)
666 return (void *)s->s_bdev == data; 666 return (void *)s->s_bdev == data;
667} 667}
668 668
669static void bdev_uevent(struct block_device *bdev, enum kobject_action action)
670{
671 if (bdev->bd_disk) {
672 if (bdev->bd_part)
673 kobject_uevent(&bdev->bd_part->kobj, action);
674 else
675 kobject_uevent(&bdev->bd_disk->kobj, action);
676 }
677}
678
669struct super_block *get_sb_bdev(struct file_system_type *fs_type, 679struct super_block *get_sb_bdev(struct file_system_type *fs_type,
670 int flags, const char *dev_name, void *data, 680 int flags, const char *dev_name, void *data,
671 int (*fill_super)(struct super_block *, void *, int)) 681 int (*fill_super)(struct super_block *, void *, int))
@@ -707,8 +717,10 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
707 up_write(&s->s_umount); 717 up_write(&s->s_umount);
708 deactivate_super(s); 718 deactivate_super(s);
709 s = ERR_PTR(error); 719 s = ERR_PTR(error);
710 } else 720 } else {
711 s->s_flags |= MS_ACTIVE; 721 s->s_flags |= MS_ACTIVE;
722 bdev_uevent(bdev, KOBJ_MOUNT);
723 }
712 } 724 }
713 725
714 return s; 726 return s;
@@ -724,6 +736,7 @@ void kill_block_super(struct super_block *sb)
724{ 736{
725 struct block_device *bdev = sb->s_bdev; 737 struct block_device *bdev = sb->s_bdev;
726 738
739 bdev_uevent(bdev, KOBJ_UMOUNT);
727 generic_shutdown_super(sb); 740 generic_shutdown_super(sb);
728 sync_blockdev(bdev); 741 sync_blockdev(bdev);
729 close_bdev_excl(bdev); 742 close_bdev_excl(bdev);
diff --git a/include/asm-arm/arch-at91rm9200/gpio.h b/include/asm-arm/arch-at91rm9200/gpio.h
index 0f0a61e2f129..6176ab2dc417 100644
--- a/include/asm-arm/arch-at91rm9200/gpio.h
+++ b/include/asm-arm/arch-at91rm9200/gpio.h
@@ -183,6 +183,7 @@ extern int at91_set_B_periph(unsigned pin, int use_pullup);
183extern int at91_set_gpio_input(unsigned pin, int use_pullup); 183extern int at91_set_gpio_input(unsigned pin, int use_pullup);
184extern int at91_set_gpio_output(unsigned pin, int value); 184extern int at91_set_gpio_output(unsigned pin, int value);
185extern int at91_set_deglitch(unsigned pin, int is_on); 185extern int at91_set_deglitch(unsigned pin, int is_on);
186extern int at91_set_multi_drive(unsigned pin, int is_on);
186 187
187/* callable at any time */ 188/* callable at any time */
188extern int at91_set_gpio_value(unsigned pin, int value); 189extern int at91_set_gpio_value(unsigned pin, int value);
diff --git a/include/asm-arm/arch-ixp4xx/nas100d.h b/include/asm-arm/arch-ixp4xx/nas100d.h
index 51ac0180427c..84467a5190d0 100644
--- a/include/asm-arm/arch-ixp4xx/nas100d.h
+++ b/include/asm-arm/arch-ixp4xx/nas100d.h
@@ -19,8 +19,8 @@
19#error "Do not include this directly, instead #include <asm/hardware.h>" 19#error "Do not include this directly, instead #include <asm/hardware.h>"
20#endif 20#endif
21 21
22#define NAS100D_SDA_PIN 6 22#define NAS100D_SDA_PIN 5
23#define NAS100D_SCL_PIN 5 23#define NAS100D_SCL_PIN 6
24 24
25/* 25/*
26 * NAS100D PCI IRQs 26 * NAS100D PCI IRQs
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 06c12a037cba..d6a2c613be68 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -239,7 +239,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
239 " bra 2f; \n" 239 " bra 2f; \n"
240 " .fillinsn \n" 240 " .fillinsn \n"
241 "1:" 241 "1:"
242 M32R_UNLOCK" %2, @%1; \n" 242 M32R_UNLOCK" %0, @%1; \n"
243 " .fillinsn \n" 243 " .fillinsn \n"
244 "2:" 244 "2:"
245 : "=&r" (retval) 245 : "=&r" (retval)
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 325c86f8512d..9ac047c400c4 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -79,7 +79,7 @@ static __inline__ int irq_canonicalize(int irq)
79 79
80extern void (*enable_irq)(unsigned int); 80extern void (*enable_irq)(unsigned int);
81extern void (*disable_irq)(unsigned int); 81extern void (*disable_irq)(unsigned int);
82#define enable_irq_nosync enable_irq 82#define disable_irq_nosync disable_irq
83 83
84struct pt_regs; 84struct pt_regs;
85 85
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 5439bcaa57c6..811ccd25d4a6 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -336,6 +336,7 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
336 : "d0", "a0", "a1", "d6"); 336 : "d0", "a0", "a1", "d6");
337} 337}
338 338
339#define __raw_writel raw_outl
339 340
340#endif /* __KERNEL__ */ 341#endif /* __KERNEL__ */
341 342
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 7a553e9d44d3..b96f3e0f3933 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -233,7 +233,7 @@ do { \
233#define __get_user_check(x,ptr,size) \ 233#define __get_user_check(x,ptr,size) \
234({ \ 234({ \
235 long __gu_err = -EFAULT; \ 235 long __gu_err = -EFAULT; \
236 const void __user * __gu_ptr = (ptr); \ 236 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
237 \ 237 \
238 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 238 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
239 __get_user_common((x), size, __gu_ptr); \ 239 __get_user_common((x), size, __gu_ptr); \
@@ -258,7 +258,7 @@ do { \
258 : "=r" (__gu_err), "=r" (__gu_tmp) \ 258 : "=r" (__gu_err), "=r" (__gu_tmp) \
259 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 259 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
260 \ 260 \
261 (val) = (__typeof__(val)) __gu_tmp; \ 261 (val) = (__typeof__(*(addr))) __gu_tmp; \
262} 262}
263 263
264/* 264/*
@@ -284,7 +284,7 @@ do { \
284 " .previous \n" \ 284 " .previous \n" \
285 : "=r" (__gu_err), "=&r" (__gu_tmp) \ 285 : "=r" (__gu_err), "=&r" (__gu_tmp) \
286 : "0" (0), "r" (addr), "i" (-EFAULT)); \ 286 : "0" (0), "r" (addr), "i" (-EFAULT)); \
287 (val) = __gu_tmp; \ 287 (val) = (__typeof__(*(addr))) __gu_tmp; \
288} 288}
289 289
290/* 290/*
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 769305d20108..b5c78a4a0192 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -313,7 +313,7 @@
313#define __NR_mknodat (__NR_Linux + 290) 313#define __NR_mknodat (__NR_Linux + 290)
314#define __NR_fchownat (__NR_Linux + 291) 314#define __NR_fchownat (__NR_Linux + 291)
315#define __NR_futimesat (__NR_Linux + 292) 315#define __NR_futimesat (__NR_Linux + 292)
316#define __NR_newfstatat (__NR_Linux + 293) 316#define __NR_fstatat (__NR_Linux + 293)
317#define __NR_unlinkat (__NR_Linux + 294) 317#define __NR_unlinkat (__NR_Linux + 294)
318#define __NR_renameat (__NR_Linux + 295) 318#define __NR_renameat (__NR_Linux + 295)
319#define __NR_linkat (__NR_Linux + 296) 319#define __NR_linkat (__NR_Linux + 296)
@@ -593,7 +593,7 @@
593#define __NR_mknodat (__NR_Linux + 249) 593#define __NR_mknodat (__NR_Linux + 249)
594#define __NR_fchownat (__NR_Linux + 250) 594#define __NR_fchownat (__NR_Linux + 250)
595#define __NR_futimesat (__NR_Linux + 251) 595#define __NR_futimesat (__NR_Linux + 251)
596#define __NR_newfstatat (__NR_Linux + 252) 596#define __NR_fstatat (__NR_Linux + 252)
597#define __NR_unlinkat (__NR_Linux + 253) 597#define __NR_unlinkat (__NR_Linux + 253)
598#define __NR_renameat (__NR_Linux + 254) 598#define __NR_renameat (__NR_Linux + 254)
599#define __NR_linkat (__NR_Linux + 255) 599#define __NR_linkat (__NR_Linux + 255)
@@ -877,7 +877,7 @@
877#define __NR_mknodat (__NR_Linux + 253) 877#define __NR_mknodat (__NR_Linux + 253)
878#define __NR_fchownat (__NR_Linux + 254) 878#define __NR_fchownat (__NR_Linux + 254)
879#define __NR_futimesat (__NR_Linux + 255) 879#define __NR_futimesat (__NR_Linux + 255)
880#define __NR_newfstatat (__NR_Linux + 256) 880#define __NR_fstatat (__NR_Linux + 256)
881#define __NR_unlinkat (__NR_Linux + 257) 881#define __NR_unlinkat (__NR_Linux + 257)
882#define __NR_renameat (__NR_Linux + 258) 882#define __NR_renameat (__NR_Linux + 258)
883#define __NR_linkat (__NR_Linux + 259) 883#define __NR_linkat (__NR_Linux + 259)
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index 39200def8d11..a3e8a45e45a9 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -154,19 +154,6 @@ extern char cmd_line[COMMAND_LINE_SIZE];
154 154
155extern void setup_pci_ptrs(void); 155extern void setup_pci_ptrs(void);
156 156
157/*
158 * Power macintoshes have either a CUDA or a PMU controlling
159 * system reset, power, NVRAM, RTC.
160 */
161typedef enum sys_ctrler_kind {
162 SYS_CTRLER_UNKNOWN = 0,
163 SYS_CTRLER_CUDA = 1,
164 SYS_CTRLER_PMU = 2,
165 SYS_CTRLER_SMU = 3,
166} sys_ctrler_t;
167
168extern sys_ctrler_t sys_ctrler;
169
170#ifdef CONFIG_SMP 157#ifdef CONFIG_SMP
171struct smp_ops_t { 158struct smp_ops_t {
172 void (*message_pass)(int target, int msg); 159 void (*message_pass)(int target, int msg);
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index c744ff33b1df..1630c26e8f45 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -204,8 +204,7 @@ typedef struct attrib_data_t {
204 * 204 *
205 * Here ist how the ioctl-nr should be used: 205 * Here ist how the ioctl-nr should be used:
206 * 0 - 31 DASD driver itself 206 * 0 - 31 DASD driver itself
207 * 32 - 229 still open 207 * 32 - 239 still open
208 * 230 - 239 DASD extended error reporting
209 * 240 - 255 reserved for EMC 208 * 240 - 255 reserved for EMC
210 *******************************************************************************/ 209 *******************************************************************************/
211 210
@@ -237,22 +236,12 @@ typedef struct attrib_data_t {
237#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) 236#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
238/* Get Attributes (cache operations) */ 237/* Get Attributes (cache operations) */
239#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) 238#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t)
240/* retrieve extended error-reporting value */
241#define BIODASDEERGET _IOR(DASD_IOCTL_LETTER,6,int)
242 239
243 240
244/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ 241/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
245#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) 242#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
246/* Set Attributes (cache operations) */ 243/* Set Attributes (cache operations) */
247#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) 244#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
248/* retrieve extended error-reporting value */
249#define BIODASDEERSET _IOW(DASD_IOCTL_LETTER,3,int)
250
251
252/* remove all records from the eer buffer */
253#define DASD_EER_PURGE _IO(DASD_IOCTL_LETTER,230)
254/* set the number of pages that are used for the internal eer buffer */
255#define DASD_EER_SETBUFSIZE _IOW(DASD_IOCTL_LETTER,230,int)
256 245
257 246
258#endif /* DASD_H */ 247#endif /* DASD_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 84d3d9f034ce..d3bc25e6d27d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -427,7 +427,8 @@ extern int acpi_mp_config;
427extern struct acpi_table_mcfg_config *pci_mmcfg_config; 427extern struct acpi_table_mcfg_config *pci_mmcfg_config;
428extern int pci_mmcfg_config_num; 428extern int pci_mmcfg_config_num;
429 429
430extern int sbf_port ; 430extern int sbf_port;
431extern unsigned long acpi_video_flags;
431 432
432#else /* !CONFIG_ACPI */ 433#else /* !CONFIG_ACPI */
433 434
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2a8d8da70961..c374b5fa8d3b 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -41,8 +41,10 @@ enum kobject_action {
41 KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */ 41 KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */
42 KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */ 42 KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */
43 KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */ 43 KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */
44 KOBJ_OFFLINE = (__force kobject_action_t) 0x04, /* device offline */ 44 KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */
45 KOBJ_ONLINE = (__force kobject_action_t) 0x05, /* device online */ 45 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
46 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
47 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
46}; 48};
47 49
48struct kobject { 50struct kobject {
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 9e5db2949c58..c91be5e64ede 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -557,17 +557,29 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
557} 557}
558 558
559static inline struct scatterlist * 559static inline struct scatterlist *
560ata_qc_first_sg(struct ata_queued_cmd *qc)
561{
562 if (qc->n_elem)
563 return qc->__sg;
564 if (qc->pad_len)
565 return &qc->pad_sgent;
566 return NULL;
567}
568
569static inline struct scatterlist *
560ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) 570ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
561{ 571{
562 if (sg == &qc->pad_sgent) 572 if (sg == &qc->pad_sgent)
563 return NULL; 573 return NULL;
564 if (++sg - qc->__sg < qc->n_elem) 574 if (++sg - qc->__sg < qc->n_elem)
565 return sg; 575 return sg;
566 return qc->pad_len ? &qc->pad_sgent : NULL; 576 if (qc->pad_len)
577 return &qc->pad_sgent;
578 return NULL;
567} 579}
568 580
569#define ata_for_each_sg(sg, qc) \ 581#define ata_for_each_sg(sg, qc) \
570 for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc)) 582 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
571 583
572static inline unsigned int ata_tag_valid(unsigned int tag) 584static inline unsigned int ata_tag_valid(unsigned int tag)
573{ 585{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 26e1663a5cbe..498ff8778fb6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1051,7 +1051,11 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1051void drop_pagecache(void); 1051void drop_pagecache(void);
1052void drop_slab(void); 1052void drop_slab(void);
1053 1053
1054#ifndef CONFIG_MMU
1055#define randomize_va_space 0
1056#else
1054extern int randomize_va_space; 1057extern int randomize_va_space;
1058#endif
1055 1059
1056#endif /* __KERNEL__ */ 1060#endif /* __KERNEL__ */
1057#endif /* _LINUX_MM_H */ 1061#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index f38872abc126..bdc556d88498 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -49,7 +49,7 @@ struct mmc_command {
49/* 49/*
50 * These are the command types. 50 * These are the command types.
51 */ 51 */
52#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_TYPE) 52#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
53 53
54 unsigned int retries; /* max number of retries */ 54 unsigned int retries; /* max number of retries */
55 unsigned int error; /* command error */ 55 unsigned int error; /* command error */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 547d649b274e..b4dc6e2e10c9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -398,7 +398,7 @@ extern struct inode_operations nfs_symlink_inode_operations;
398extern int nfs_register_sysctl(void); 398extern int nfs_register_sysctl(void);
399extern void nfs_unregister_sysctl(void); 399extern void nfs_unregister_sysctl(void);
400#else 400#else
401#define nfs_register_sysctl() do { } while(0) 401#define nfs_register_sysctl() 0
402#define nfs_unregister_sysctl() do { } while(0) 402#define nfs_unregister_sysctl() do { } while(0)
403#endif 403#endif
404 404
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f3e17d5963c3..d572b19afb7d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -147,7 +147,7 @@ struct swap_list_t {
147#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 147#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
148 148
149/* linux/mm/oom_kill.c */ 149/* linux/mm/oom_kill.c */
150extern void out_of_memory(gfp_t gfp_mask, int order); 150extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
151 151
152/* linux/mm/memory.c */ 152/* linux/mm/memory.c */
153extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); 153extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 32a4139c4ad8..0e92bf7ec28e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -146,6 +146,7 @@ enum
146 KERN_RANDOMIZE=68, /* int: randomize virtual address space */ 146 KERN_RANDOMIZE=68, /* int: randomize virtual address space */
147 KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ 147 KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */
148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ 148 KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
149 KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
149}; 150};
150 151
151 152
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 685c25175d96..d7e7e637b92a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -841,7 +841,7 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
841 841
842 for (aux = context->aux; aux; aux = aux->next) { 842 for (aux = context->aux; aux; aux = aux->next) {
843 843
844 ab = audit_log_start(context, GFP_KERNEL, aux->type); 844 ab = audit_log_start(context, gfp_mask, aux->type);
845 if (!ab) 845 if (!ab)
846 continue; /* audit_panic has been called */ 846 continue; /* audit_panic has been called */
847 847
@@ -878,14 +878,14 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
878 } 878 }
879 879
880 if (context->pwd && context->pwdmnt) { 880 if (context->pwd && context->pwdmnt) {
881 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); 881 ab = audit_log_start(context, gfp_mask, AUDIT_CWD);
882 if (ab) { 882 if (ab) {
883 audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt); 883 audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt);
884 audit_log_end(ab); 884 audit_log_end(ab);
885 } 885 }
886 } 886 }
887 for (i = 0; i < context->name_count; i++) { 887 for (i = 0; i < context->name_count; i++) {
888 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); 888 ab = audit_log_start(context, gfp_mask, AUDIT_PATH);
889 if (!ab) 889 if (!ab)
890 continue; /* audit_panic has been called */ 890 continue; /* audit_panic has been called */
891 891
diff --git a/kernel/exit.c b/kernel/exit.c
index 93cee3671332..531aadca5530 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -360,6 +360,9 @@ void daemonize(const char *name, ...)
360 fs = init_task.fs; 360 fs = init_task.fs;
361 current->fs = fs; 361 current->fs = fs;
362 atomic_inc(&fs->count); 362 atomic_inc(&fs->count);
363 exit_namespace(current);
364 current->namespace = init_task.namespace;
365 get_namespace(current->namespace);
363 exit_files(current); 366 exit_files(current);
364 current->files = init_task.files; 367 current->files = init_task.files;
365 atomic_inc(&current->files->count); 368 atomic_inc(&current->files->count);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 17313b99e53d..1067090db6b1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -104,6 +104,8 @@ cond_syscall(sys_setreuid16);
104cond_syscall(sys_setuid16); 104cond_syscall(sys_setuid16);
105cond_syscall(sys_vm86old); 105cond_syscall(sys_vm86old);
106cond_syscall(sys_vm86); 106cond_syscall(sys_vm86);
107cond_syscall(compat_sys_ipc);
108cond_syscall(compat_sys_sysctl);
107 109
108/* arch-specific weak syscall entries */ 110/* arch-specific weak syscall entries */
109cond_syscall(sys_pciconfig_read); 111cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7654d55c47f5..c05a2b7125e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -44,14 +44,12 @@
44#include <linux/limits.h> 44#include <linux/limits.h>
45#include <linux/dcache.h> 45#include <linux/dcache.h>
46#include <linux/syscalls.h> 46#include <linux/syscalls.h>
47#include <linux/nfs_fs.h>
48#include <linux/acpi.h>
47 49
48#include <asm/uaccess.h> 50#include <asm/uaccess.h>
49#include <asm/processor.h> 51#include <asm/processor.h>
50 52
51#ifdef CONFIG_ROOT_NFS
52#include <linux/nfs_fs.h>
53#endif
54
55#if defined(CONFIG_SYSCTL) 53#if defined(CONFIG_SYSCTL)
56 54
57/* External variables not in a header file. */ 55/* External variables not in a header file. */
@@ -638,6 +636,7 @@ static ctl_table kern_table[] = {
638 .proc_handler = &proc_dointvec, 636 .proc_handler = &proc_dointvec,
639 }, 637 },
640#endif 638#endif
639#if defined(CONFIG_MMU)
641 { 640 {
642 .ctl_name = KERN_RANDOMIZE, 641 .ctl_name = KERN_RANDOMIZE,
643 .procname = "randomize_va_space", 642 .procname = "randomize_va_space",
@@ -646,6 +645,7 @@ static ctl_table kern_table[] = {
646 .mode = 0644, 645 .mode = 0644,
647 .proc_handler = &proc_dointvec, 646 .proc_handler = &proc_dointvec,
648 }, 647 },
648#endif
649#if defined(CONFIG_S390) && defined(CONFIG_SMP) 649#if defined(CONFIG_S390) && defined(CONFIG_SMP)
650 { 650 {
651 .ctl_name = KERN_SPIN_RETRY, 651 .ctl_name = KERN_SPIN_RETRY,
@@ -656,6 +656,16 @@ static ctl_table kern_table[] = {
656 .proc_handler = &proc_dointvec, 656 .proc_handler = &proc_dointvec,
657 }, 657 },
658#endif 658#endif
659#ifdef CONFIG_ACPI_SLEEP
660 {
661 .ctl_name = KERN_ACPI_VIDEO_FLAGS,
662 .procname = "acpi_video_flags",
663 .data = &acpi_video_flags,
664 .maxlen = sizeof (unsigned long),
665 .mode = 0644,
666 .proc_handler = &proc_dointvec,
667 },
668#endif
659 { .ctl_name = 0 } 669 { .ctl_name = 0 }
660}; 670};
661 671
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index a6b1e271d53c..351045f4f63c 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -15,8 +15,8 @@
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */ 16 */
17 17
18#include <linux/io.h>
19#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/io.h>
20 20
21/** 21/**
22 * __iowrite32_copy - copy data to MMIO space, in 32-bit units 22 * __iowrite32_copy - copy data to MMIO space, in 32-bit units
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 1b1985c136ec..086a0c6e888e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -38,6 +38,10 @@ static char *action_to_string(enum kobject_action action)
38 return "remove"; 38 return "remove";
39 case KOBJ_CHANGE: 39 case KOBJ_CHANGE:
40 return "change"; 40 return "change";
41 case KOBJ_MOUNT:
42 return "mount";
43 case KOBJ_UMOUNT:
44 return "umount";
41 case KOBJ_OFFLINE: 45 case KOBJ_OFFLINE:
42 return "offline"; 46 return "offline";
43 case KOBJ_ONLINE: 47 case KOBJ_ONLINE:
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bedfa4f09c80..880831bd3003 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -587,7 +587,7 @@ redo:
587 } 587 }
588 list_add(&page->lru, &newlist); 588 list_add(&page->lru, &newlist);
589 nr_pages++; 589 nr_pages++;
590 if (nr_pages > MIGRATE_CHUNK_SIZE); 590 if (nr_pages > MIGRATE_CHUNK_SIZE)
591 break; 591 break;
592 } 592 }
593 err = migrate_pages(pagelist, &newlist, &moved, &failed); 593 err = migrate_pages(pagelist, &newlist, &moved, &failed);
@@ -808,7 +808,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
808 nodes_clear(*nodes); 808 nodes_clear(*nodes);
809 if (maxnode == 0 || !nmask) 809 if (maxnode == 0 || !nmask)
810 return 0; 810 return 0;
811 if (maxnode > PAGE_SIZE) 811 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
812 return -EINVAL; 812 return -EINVAL;
813 813
814 nlongs = BITS_TO_LONGS(maxnode); 814 nlongs = BITS_TO_LONGS(maxnode);
diff --git a/mm/nommu.c b/mm/nommu.c
index c10262d68232..99d21020ec9d 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -57,6 +57,8 @@ EXPORT_SYMBOL(vmalloc);
57EXPORT_SYMBOL(vfree); 57EXPORT_SYMBOL(vfree);
58EXPORT_SYMBOL(vmalloc_to_page); 58EXPORT_SYMBOL(vmalloc_to_page);
59EXPORT_SYMBOL(vmalloc_32); 59EXPORT_SYMBOL(vmalloc_32);
60EXPORT_SYMBOL(vmap);
61EXPORT_SYMBOL(vunmap);
60 62
61/* 63/*
62 * Handle all mappings that got truncated by a "truncate()" 64 * Handle all mappings that got truncated by a "truncate()"
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b05ab8f2a562..8123fad5a485 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -58,15 +58,17 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
58 58
59 /* 59 /*
60 * Processes which fork a lot of child processes are likely 60 * Processes which fork a lot of child processes are likely
61 * a good choice. We add the vmsize of the children if they 61 * a good choice. We add half the vmsize of the children if they
62 * have an own mm. This prevents forking servers to flood the 62 * have an own mm. This prevents forking servers to flood the
63 * machine with an endless amount of children 63 * machine with an endless amount of children. In case a single
64 * child is eating the vast majority of memory, adding only half
65 * to the parents will make the child our kill candidate of choice.
64 */ 66 */
65 list_for_each(tsk, &p->children) { 67 list_for_each(tsk, &p->children) {
66 struct task_struct *chld; 68 struct task_struct *chld;
67 chld = list_entry(tsk, struct task_struct, sibling); 69 chld = list_entry(tsk, struct task_struct, sibling);
68 if (chld->mm != p->mm && chld->mm) 70 if (chld->mm != p->mm && chld->mm)
69 points += chld->mm->total_vm; 71 points += chld->mm->total_vm/2 + 1;
70 } 72 }
71 73
72 /* 74 /*
@@ -131,17 +133,47 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
131} 133}
132 134
133/* 135/*
136 * Types of limitations to the nodes from which allocations may occur
137 */
138#define CONSTRAINT_NONE 1
139#define CONSTRAINT_MEMORY_POLICY 2
140#define CONSTRAINT_CPUSET 3
141
142/*
143 * Determine the type of allocation constraint.
144 */
145static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
146{
147#ifdef CONFIG_NUMA
148 struct zone **z;
149 nodemask_t nodes = node_online_map;
150
151 for (z = zonelist->zones; *z; z++)
152 if (cpuset_zone_allowed(*z, gfp_mask))
153 node_clear((*z)->zone_pgdat->node_id,
154 nodes);
155 else
156 return CONSTRAINT_CPUSET;
157
158 if (!nodes_empty(nodes))
159 return CONSTRAINT_MEMORY_POLICY;
160#endif
161
162 return CONSTRAINT_NONE;
163}
164
165/*
134 * Simple selection loop. We chose the process with the highest 166 * Simple selection loop. We chose the process with the highest
135 * number of 'points'. We expect the caller will lock the tasklist. 167 * number of 'points'. We expect the caller will lock the tasklist.
136 * 168 *
137 * (not docbooked, we don't want this one cluttering up the manual) 169 * (not docbooked, we don't want this one cluttering up the manual)
138 */ 170 */
139static struct task_struct * select_bad_process(void) 171static struct task_struct *select_bad_process(unsigned long *ppoints)
140{ 172{
141 unsigned long maxpoints = 0;
142 struct task_struct *g, *p; 173 struct task_struct *g, *p;
143 struct task_struct *chosen = NULL; 174 struct task_struct *chosen = NULL;
144 struct timespec uptime; 175 struct timespec uptime;
176 *ppoints = 0;
145 177
146 do_posix_clock_monotonic_gettime(&uptime); 178 do_posix_clock_monotonic_gettime(&uptime);
147 do_each_thread(g, p) { 179 do_each_thread(g, p) {
@@ -169,9 +201,9 @@ static struct task_struct * select_bad_process(void)
169 return p; 201 return p;
170 202
171 points = badness(p, uptime.tv_sec); 203 points = badness(p, uptime.tv_sec);
172 if (points > maxpoints || !chosen) { 204 if (points > *ppoints || !chosen) {
173 chosen = p; 205 chosen = p;
174 maxpoints = points; 206 *ppoints = points;
175 } 207 }
176 } while_each_thread(g, p); 208 } while_each_thread(g, p);
177 return chosen; 209 return chosen;
@@ -182,7 +214,7 @@ static struct task_struct * select_bad_process(void)
182 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that 214 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
183 * we select a process with CAP_SYS_RAW_IO set). 215 * we select a process with CAP_SYS_RAW_IO set).
184 */ 216 */
185static void __oom_kill_task(task_t *p) 217static void __oom_kill_task(task_t *p, const char *message)
186{ 218{
187 if (p->pid == 1) { 219 if (p->pid == 1) {
188 WARN_ON(1); 220 WARN_ON(1);
@@ -198,8 +230,8 @@ static void __oom_kill_task(task_t *p)
198 return; 230 return;
199 } 231 }
200 task_unlock(p); 232 task_unlock(p);
201 printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", 233 printk(KERN_ERR "%s: Killed process %d (%s).\n",
202 p->pid, p->comm); 234 message, p->pid, p->comm);
203 235
204 /* 236 /*
205 * We give our sacrificial lamb high priority and access to 237 * We give our sacrificial lamb high priority and access to
@@ -212,7 +244,7 @@ static void __oom_kill_task(task_t *p)
212 force_sig(SIGKILL, p); 244 force_sig(SIGKILL, p);
213} 245}
214 246
215static struct mm_struct *oom_kill_task(task_t *p) 247static struct mm_struct *oom_kill_task(task_t *p, const char *message)
216{ 248{
217 struct mm_struct *mm = get_task_mm(p); 249 struct mm_struct *mm = get_task_mm(p);
218 task_t * g, * q; 250 task_t * g, * q;
@@ -224,35 +256,38 @@ static struct mm_struct *oom_kill_task(task_t *p)
224 return NULL; 256 return NULL;
225 } 257 }
226 258
227 __oom_kill_task(p); 259 __oom_kill_task(p, message);
228 /* 260 /*
229 * kill all processes that share the ->mm (i.e. all threads), 261 * kill all processes that share the ->mm (i.e. all threads),
230 * but are in a different thread group 262 * but are in a different thread group
231 */ 263 */
232 do_each_thread(g, q) 264 do_each_thread(g, q)
233 if (q->mm == mm && q->tgid != p->tgid) 265 if (q->mm == mm && q->tgid != p->tgid)
234 __oom_kill_task(q); 266 __oom_kill_task(q, message);
235 while_each_thread(g, q); 267 while_each_thread(g, q);
236 268
237 return mm; 269 return mm;
238} 270}
239 271
240static struct mm_struct *oom_kill_process(struct task_struct *p) 272static struct mm_struct *oom_kill_process(struct task_struct *p,
273 unsigned long points, const char *message)
241{ 274{
242 struct mm_struct *mm; 275 struct mm_struct *mm;
243 struct task_struct *c; 276 struct task_struct *c;
244 struct list_head *tsk; 277 struct list_head *tsk;
245 278
279 printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li and "
280 "children.\n", p->pid, p->comm, points);
246 /* Try to kill a child first */ 281 /* Try to kill a child first */
247 list_for_each(tsk, &p->children) { 282 list_for_each(tsk, &p->children) {
248 c = list_entry(tsk, struct task_struct, sibling); 283 c = list_entry(tsk, struct task_struct, sibling);
249 if (c->mm == p->mm) 284 if (c->mm == p->mm)
250 continue; 285 continue;
251 mm = oom_kill_task(c); 286 mm = oom_kill_task(c, message);
252 if (mm) 287 if (mm)
253 return mm; 288 return mm;
254 } 289 }
255 return oom_kill_task(p); 290 return oom_kill_task(p, message);
256} 291}
257 292
258/** 293/**
@@ -263,10 +298,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
263 * OR try to be smart about which process to kill. Note that we 298 * OR try to be smart about which process to kill. Note that we
264 * don't have to be perfect here, we just have to be good. 299 * don't have to be perfect here, we just have to be good.
265 */ 300 */
266void out_of_memory(gfp_t gfp_mask, int order) 301void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
267{ 302{
268 struct mm_struct *mm = NULL; 303 struct mm_struct *mm = NULL;
269 task_t * p; 304 task_t *p;
305 unsigned long points;
270 306
271 if (printk_ratelimit()) { 307 if (printk_ratelimit()) {
272 printk("oom-killer: gfp_mask=0x%x, order=%d\n", 308 printk("oom-killer: gfp_mask=0x%x, order=%d\n",
@@ -277,25 +313,48 @@ void out_of_memory(gfp_t gfp_mask, int order)
277 313
278 cpuset_lock(); 314 cpuset_lock();
279 read_lock(&tasklist_lock); 315 read_lock(&tasklist_lock);
316
317 /*
318 * Check if there were limitations on the allocation (only relevant for
319 * NUMA) that may require different handling.
320 */
321 switch (constrained_alloc(zonelist, gfp_mask)) {
322 case CONSTRAINT_MEMORY_POLICY:
323 mm = oom_kill_process(current, points,
324 "No available memory (MPOL_BIND)");
325 break;
326
327 case CONSTRAINT_CPUSET:
328 mm = oom_kill_process(current, points,
329 "No available memory in cpuset");
330 break;
331
332 case CONSTRAINT_NONE:
280retry: 333retry:
281 p = select_bad_process(); 334 /*
335 * Rambo mode: Shoot down a process and hope it solves whatever
336 * issues we may have.
337 */
338 p = select_bad_process(&points);
282 339
283 if (PTR_ERR(p) == -1UL) 340 if (PTR_ERR(p) == -1UL)
284 goto out; 341 goto out;
285 342
286 /* Found nothing?!?! Either we hang forever, or we panic. */ 343 /* Found nothing?!?! Either we hang forever, or we panic. */
287 if (!p) { 344 if (!p) {
288 read_unlock(&tasklist_lock); 345 read_unlock(&tasklist_lock);
289 cpuset_unlock(); 346 cpuset_unlock();
290 panic("Out of memory and no killable processes...\n"); 347 panic("Out of memory and no killable processes...\n");
291 } 348 }
292 349
293 mm = oom_kill_process(p); 350 mm = oom_kill_process(p, points, "Out of memory");
294 if (!mm) 351 if (!mm)
295 goto retry; 352 goto retry;
353
354 break;
355 }
296 356
297 out: 357out:
298 read_unlock(&tasklist_lock);
299 cpuset_unlock(); 358 cpuset_unlock();
300 if (mm) 359 if (mm)
301 mmput(mm); 360 mmput(mm);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 208812b25597..791690d7d3fa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1015,7 +1015,7 @@ rebalance:
1015 if (page) 1015 if (page)
1016 goto got_pg; 1016 goto got_pg;
1017 1017
1018 out_of_memory(gfp_mask, order); 1018 out_of_memory(zonelist, gfp_mask, order);
1019 goto restart; 1019 goto restart;
1020 } 1020 }
1021 1021
diff --git a/mm/shmem.c b/mm/shmem.c
index f7ac7b812f92..7c455fbaff7b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -45,6 +45,7 @@
45#include <linux/swapops.h> 45#include <linux/swapops.h>
46#include <linux/mempolicy.h> 46#include <linux/mempolicy.h>
47#include <linux/namei.h> 47#include <linux/namei.h>
48#include <linux/ctype.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49#include <asm/div64.h> 50#include <asm/div64.h>
50#include <asm/pgtable.h> 51#include <asm/pgtable.h>
@@ -874,6 +875,51 @@ redirty:
874} 875}
875 876
876#ifdef CONFIG_NUMA 877#ifdef CONFIG_NUMA
878static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
879{
880 char *nodelist = strchr(value, ':');
881 int err = 1;
882
883 if (nodelist) {
884 /* NUL-terminate policy string */
885 *nodelist++ = '\0';
886 if (nodelist_parse(nodelist, *policy_nodes))
887 goto out;
888 }
889 if (!strcmp(value, "default")) {
890 *policy = MPOL_DEFAULT;
891 /* Don't allow a nodelist */
892 if (!nodelist)
893 err = 0;
894 } else if (!strcmp(value, "prefer")) {
895 *policy = MPOL_PREFERRED;
896 /* Insist on a nodelist of one node only */
897 if (nodelist) {
898 char *rest = nodelist;
899 while (isdigit(*rest))
900 rest++;
901 if (!*rest)
902 err = 0;
903 }
904 } else if (!strcmp(value, "bind")) {
905 *policy = MPOL_BIND;
906 /* Insist on a nodelist */
907 if (nodelist)
908 err = 0;
909 } else if (!strcmp(value, "interleave")) {
910 *policy = MPOL_INTERLEAVE;
911 /* Default to nodes online if no nodelist */
912 if (!nodelist)
913 *policy_nodes = node_online_map;
914 err = 0;
915 }
916out:
917 /* Restore string for error message */
918 if (nodelist)
919 *--nodelist = ':';
920 return err;
921}
922
877static struct page *shmem_swapin_async(struct shared_policy *p, 923static struct page *shmem_swapin_async(struct shared_policy *p,
878 swp_entry_t entry, unsigned long idx) 924 swp_entry_t entry, unsigned long idx)
879{ 925{
@@ -926,6 +972,11 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
926 return page; 972 return page;
927} 973}
928#else 974#else
975static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
976{
977 return 1;
978}
979
929static inline struct page * 980static inline struct page *
930shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 981shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
931{ 982{
@@ -1859,7 +1910,23 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
1859{ 1910{
1860 char *this_char, *value, *rest; 1911 char *this_char, *value, *rest;
1861 1912
1862 while ((this_char = strsep(&options, ",")) != NULL) { 1913 while (options != NULL) {
1914 this_char = options;
1915 for (;;) {
1916 /*
1917 * NUL-terminate this option: unfortunately,
1918 * mount options form a comma-separated list,
1919 * but mpol's nodelist may also contain commas.
1920 */
1921 options = strchr(options, ',');
1922 if (options == NULL)
1923 break;
1924 options++;
1925 if (!isdigit(*options)) {
1926 options[-1] = '\0';
1927 break;
1928 }
1929 }
1863 if (!*this_char) 1930 if (!*this_char)
1864 continue; 1931 continue;
1865 if ((value = strchr(this_char,'=')) != NULL) { 1932 if ((value = strchr(this_char,'=')) != NULL) {
@@ -1910,18 +1977,8 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
1910 if (*rest) 1977 if (*rest)
1911 goto bad_val; 1978 goto bad_val;
1912 } else if (!strcmp(this_char,"mpol")) { 1979 } else if (!strcmp(this_char,"mpol")) {
1913 if (!strcmp(value,"default")) 1980 if (shmem_parse_mpol(value,policy,policy_nodes))
1914 *policy = MPOL_DEFAULT;
1915 else if (!strcmp(value,"preferred"))
1916 *policy = MPOL_PREFERRED;
1917 else if (!strcmp(value,"bind"))
1918 *policy = MPOL_BIND;
1919 else if (!strcmp(value,"interleave"))
1920 *policy = MPOL_INTERLEAVE;
1921 else
1922 goto bad_val; 1981 goto bad_val;
1923 } else if (!strcmp(this_char,"mpol_nodelist")) {
1924 nodelist_parse(value, *policy_nodes);
1925 } else { 1982 } else {
1926 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 1983 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1927 this_char); 1984 this_char);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6766f118f070..2144952d1c6c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -411,6 +411,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
411 C(pkt_type); 411 C(pkt_type);
412 C(ip_summed); 412 C(ip_summed);
413 C(priority); 413 C(priority);
414#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
415 C(ipvs_property);
416#endif
414 C(protocol); 417 C(protocol);
415 n->destructor = NULL; 418 n->destructor = NULL;
416#ifdef CONFIG_NETFILTER 419#ifdef CONFIG_NETFILTER
@@ -422,13 +425,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
422 C(nfct_reasm); 425 C(nfct_reasm);
423 nf_conntrack_get_reasm(skb->nfct_reasm); 426 nf_conntrack_get_reasm(skb->nfct_reasm);
424#endif 427#endif
425#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
426 C(ipvs_property);
427#endif
428#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
429 C(nfct_reasm);
430 nf_conntrack_get_reasm(skb->nfct_reasm);
431#endif
432#ifdef CONFIG_BRIDGE_NETFILTER 428#ifdef CONFIG_BRIDGE_NETFILTER
433 C(nf_bridge); 429 C(nf_bridge);
434 nf_bridge_get(skb->nf_bridge); 430 nf_bridge_get(skb->nf_bridge);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index c1a61462507f..1741d555ad0d 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -434,6 +434,7 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
434 } *inside; 434 } *inside;
435 struct ip_conntrack_tuple inner, target; 435 struct ip_conntrack_tuple inner, target;
436 int hdrlen = (*pskb)->nh.iph->ihl * 4; 436 int hdrlen = (*pskb)->nh.iph->ihl * 4;
437 unsigned long statusbit;
437 438
438 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) 439 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
439 return 0; 440 return 0;
@@ -495,17 +496,16 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
495 496
496 /* Change outer to look the reply to an incoming packet 497 /* Change outer to look the reply to an incoming packet
497 * (proto 0 means don't invert per-proto part). */ 498 * (proto 0 means don't invert per-proto part). */
499 if (manip == IP_NAT_MANIP_SRC)
500 statusbit = IPS_SRC_NAT;
501 else
502 statusbit = IPS_DST_NAT;
498 503
499 /* Obviously, we need to NAT destination IP, but source IP 504 /* Invert if this is reply dir. */
500 should be NAT'ed only if it is from a NAT'd host. 505 if (dir == IP_CT_DIR_REPLY)
506 statusbit ^= IPS_NAT_MASK;
501 507
502 Explanation: some people use NAT for anonymizing. Also, 508 if (ct->status & statusbit) {
503 CERT recommends dropping all packets from private IP
504 addresses (although ICMP errors from internal links with
505 such addresses are not too uncommon, as Alan Cox points
506 out) */
507 if (manip != IP_NAT_MANIP_SRC
508 || ((*pskb)->nh.iph->saddr == ct->tuplehash[dir].tuple.src.ip)) {
509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 509 invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
510 if (!manip_pkt(0, pskb, 0, &target, manip)) 510 if (!manip_pkt(0, pskb, 0, &target, manip))
511 return 0; 511 return 0;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 7c3f7d380240..ab1f88fa21ec 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -200,20 +200,14 @@ ip_nat_in(unsigned int hooknum,
200 const struct net_device *out, 200 const struct net_device *out,
201 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
202{ 202{
203 struct ip_conntrack *ct;
204 enum ip_conntrack_info ctinfo;
205 unsigned int ret; 203 unsigned int ret;
204 u_int32_t daddr = (*pskb)->nh.iph->daddr;
206 205
207 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 206 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
208 if (ret != NF_DROP && ret != NF_STOLEN 207 if (ret != NF_DROP && ret != NF_STOLEN
209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 208 && daddr != (*pskb)->nh.iph->daddr) {
210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 209 dst_release((*pskb)->dst);
211 210 (*pskb)->dst = NULL;
212 if (ct->tuplehash[dir].tuple.dst.ip !=
213 ct->tuplehash[!dir].tuple.src.ip) {
214 dst_release((*pskb)->dst);
215 (*pskb)->dst = NULL;
216 }
217 } 211 }
218 return ret; 212 return ret;
219} 213}
@@ -276,7 +270,7 @@ ip_nat_local_fn(unsigned int hooknum,
276 ct->tuplehash[!dir].tuple.src.ip 270 ct->tuplehash[!dir].tuple.src.ip
277#ifdef CONFIG_XFRM 271#ifdef CONFIG_XFRM
278 || ct->tuplehash[dir].tuple.dst.u.all != 272 || ct->tuplehash[dir].tuple.dst.u.all !=
279 ct->tuplehash[dir].tuple.src.u.all 273 ct->tuplehash[!dir].tuple.src.u.all
280#endif 274#endif
281 ) 275 )
282 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 276 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 92ead3cf956b..faea8a120ee2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -458,7 +458,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
458 mtu = IPV6_MIN_MTU; 458 mtu = IPV6_MIN_MTU;
459 t->dev->mtu = mtu; 459 t->dev->mtu = mtu;
460 460
461 if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) { 461 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
462 rel_type = ICMPV6_PKT_TOOBIG; 462 rel_type = ICMPV6_PKT_TOOBIG;
463 rel_code = 0; 463 rel_code = 0;
464 rel_info = mtu; 464 rel_info = mtu;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 98ec53bd3ac7..5e6b05ac1260 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -885,8 +885,6 @@ restart:
885 * We can't enlist stable bundles either. 885 * We can't enlist stable bundles either.
886 */ 886 */
887 write_unlock_bh(&policy->lock); 887 write_unlock_bh(&policy->lock);
888
889 xfrm_pol_put(policy);
890 if (dst) 888 if (dst)
891 dst_free(dst); 889 dst_free(dst);
892 890
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 418c6d4e5daf..a529b62972b4 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -167,7 +167,7 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
167 int *countp) 167 int *countp)
168{ 168{
169 struct snd_kcontrol *kctl; 169 struct snd_kcontrol *kctl;
170 struct snd_ctl_elem_info info; 170 struct snd_ctl_elem_info *info;
171 int err; 171 int err;
172 172
173 down_read(&card->controls_rwsem); 173 down_read(&card->controls_rwsem);
@@ -176,13 +176,19 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
176 up_read(&card->controls_rwsem); 176 up_read(&card->controls_rwsem);
177 return -ENXIO; 177 return -ENXIO;
178 } 178 }
179 info.id = *id; 179 info = kzalloc(sizeof(*info), GFP_KERNEL);
180 err = kctl->info(kctl, &info); 180 if (info == NULL) {
181 up_read(&card->controls_rwsem);
182 return -ENOMEM;
183 }
184 info->id = *id;
185 err = kctl->info(kctl, info);
181 up_read(&card->controls_rwsem); 186 up_read(&card->controls_rwsem);
182 if (err >= 0) { 187 if (err >= 0) {
183 err = info.type; 188 err = info->type;
184 *countp = info.count; 189 *countp = info->count;
185 } 190 }
191 kfree(info);
186 return err; 192 return err;
187} 193}
188 194