aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/sparse.txt10
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c7
-rw-r--r--arch/mips/kernel/linux32.c46
-rw-r--r--arch/um/drivers/daemon_user.c17
-rw-r--r--arch/um/drivers/line.c6
-rw-r--r--arch/um/drivers/mcast_user.c10
-rw-r--r--arch/um/drivers/ssl.c2
-rw-r--r--arch/um/drivers/stdio_console.c2
-rw-r--r--arch/um/include/os.h3
-rw-r--r--arch/um/kernel/irq.c1
-rw-r--r--arch/um/os-Linux/process.c3
-rw-r--r--arch/um/os-Linux/signal.c5
-rw-r--r--arch/um/sys-x86_64/syscalls.c6
-rw-r--r--arch/x86_64/ia32/ptrace32.c1
-rw-r--r--arch/x86_64/kernel/early-quirks.c9
-rw-r--r--drivers/ata/ata_piix.c11
-rw-r--r--drivers/ata/libata-acpi.c17
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/sata_nv.c8
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c40
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h33
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c59
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c13
-rw-r--r--drivers/input/serio/i8042.c10
-rw-r--r--drivers/net/3c59x.c28
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c42
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c5
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/sky2.c24
-rw-r--r--drivers/net/tokenring/ibmtr.c25
-rw-r--r--drivers/net/via-rhine.c32
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c10
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c10
-rw-r--r--drivers/serial/sn_console.c52
-rw-r--r--fs/compat.c100
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c17
-rw-r--r--fs/partitions/check.c2
-rw-r--r--include/asm-sparc64/parport.h11
-rw-r--r--include/linux/compat.h19
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/sctp/ipv6.c4
57 files changed, 527 insertions, 316 deletions
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index f9c99c9a54f9..1a3bdc27d95e 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -45,11 +45,15 @@ special.
45Getting sparse 45Getting sparse
46~~~~~~~~~~~~~~ 46~~~~~~~~~~~~~~
47 47
48With git, you can just get it from 48You can get latest released versions from the Sparse homepage at
49http://www.kernel.org/pub/linux/kernel/people/josh/sparse/
49 50
50 rsync://rsync.kernel.org/pub/scm/devel/sparse/sparse.git 51Alternatively, you can get snapshots of the latest development version
52of sparse using git to clone..
51 53
52and DaveJ has tar-balls at 54 git://git.kernel.org/pub/scm/linux/kernel/git/josh/sparse.git
55
56DaveJ has hourly generated tarballs of the git tree available at..
53 57
54 http://www.codemonkey.org.uk/projects/git-snapshots/sparse/ 58 http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
55 59
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index bf86f7662d8b..a7d22d9f3d7e 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -14,11 +14,8 @@
14 14
15#ifdef CONFIG_ACPI 15#ifdef CONFIG_ACPI
16 16
17static int nvidia_hpet_detected __initdata;
18
19static int __init nvidia_hpet_check(struct acpi_table_header *header) 17static int __init nvidia_hpet_check(struct acpi_table_header *header)
20{ 18{
21 nvidia_hpet_detected = 1;
22 return 0; 19 return 0;
23} 20}
24#endif 21#endif
@@ -29,9 +26,7 @@ static int __init check_bridge(int vendor, int device)
29 /* According to Nvidia all timer overrides are bogus unless HPET 26 /* According to Nvidia all timer overrides are bogus unless HPET
30 is enabled. */ 27 is enabled. */
31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 28 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
32 nvidia_hpet_detected = 0; 29 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
33 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
34 if (nvidia_hpet_detected == 0) {
35 acpi_skip_timer_override = 1; 30 acpi_skip_timer_override = 1;
36 printk(KERN_INFO "Nvidia board " 31 printk(KERN_INFO "Nvidia board "
37 "detected. Ignoring ACPI " 32 "detected. Ignoring ACPI "
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 30d433f14f93..1df544c1f966 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -564,49 +564,3 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs)
564 return do_fork(clone_flags, newsp, &regs, 0, 564 return do_fork(clone_flags, newsp, &regs, 0,
565 parent_tidptr, child_tidptr); 565 parent_tidptr, child_tidptr);
566} 566}
567
568/*
569 * Implement the event wait interface for the eventpoll file. It is the kernel
570 * part of the user space epoll_pwait(2).
571 */
572asmlinkage long compat_sys_epoll_pwait(int epfd,
573 struct epoll_event __user *events, int maxevents, int timeout,
574 const compat_sigset_t __user *sigmask, size_t sigsetsize)
575{
576 int error;
577 sigset_t ksigmask, sigsaved;
578
579 /*
580 * If the caller wants a certain signal mask to be set during the wait,
581 * we apply it here.
582 */
583 if (sigmask) {
584 if (sigsetsize != sizeof(sigset_t))
585 return -EINVAL;
586 if (!access_ok(VERIFY_READ, sigmask, sizeof(ksigmask)))
587 return -EFAULT;
588 if (__copy_conv_sigset_from_user(&ksigmask, sigmask))
589 return -EFAULT;
590 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
591 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
592 }
593
594 error = sys_epoll_wait(epfd, events, maxevents, timeout);
595
596 /*
597 * If we changed the signal mask, we need to restore the original one.
598 * In case we've got a signal while waiting, we do not restore the
599 * signal mask yet, and we allow do_signal() to deliver the signal on
600 * the way back to userspace, before the signal mask is restored.
601 */
602 if (sigmask) {
603 if (error == -EINTR) {
604 memcpy(&current->saved_sigmask, &sigsaved,
605 sizeof(sigsaved));
606 set_thread_flag(TIF_RESTORE_SIGMASK);
607 } else
608 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
609 }
610
611 return error;
612}
diff --git a/arch/um/drivers/daemon_user.c b/arch/um/drivers/daemon_user.c
index 310af0f1e49e..021b82c7a759 100644
--- a/arch/um/drivers/daemon_user.c
+++ b/arch/um/drivers/daemon_user.c
@@ -56,30 +56,31 @@ static int connect_to_switch(struct daemon_data *pri)
56 56
57 pri->control = socket(AF_UNIX, SOCK_STREAM, 0); 57 pri->control = socket(AF_UNIX, SOCK_STREAM, 0);
58 if(pri->control < 0){ 58 if(pri->control < 0){
59 err = -errno;
59 printk("daemon_open : control socket failed, errno = %d\n", 60 printk("daemon_open : control socket failed, errno = %d\n",
60 errno); 61 -err);
61 return(-errno); 62 return err;
62 } 63 }
63 64
64 if(connect(pri->control, (struct sockaddr *) ctl_addr, 65 if(connect(pri->control, (struct sockaddr *) ctl_addr,
65 sizeof(*ctl_addr)) < 0){ 66 sizeof(*ctl_addr)) < 0){
66 printk("daemon_open : control connect failed, errno = %d\n",
67 errno);
68 err = -errno; 67 err = -errno;
68 printk("daemon_open : control connect failed, errno = %d\n",
69 -err);
69 goto out; 70 goto out;
70 } 71 }
71 72
72 fd = socket(AF_UNIX, SOCK_DGRAM, 0); 73 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
73 if(fd < 0){ 74 if(fd < 0){
74 printk("daemon_open : data socket failed, errno = %d\n",
75 errno);
76 err = -errno; 75 err = -errno;
76 printk("daemon_open : data socket failed, errno = %d\n",
77 -err);
77 goto out; 78 goto out;
78 } 79 }
79 if(bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0){ 80 if(bind(fd, (struct sockaddr *) local_addr, sizeof(*local_addr)) < 0){
80 printk("daemon_open : data bind failed, errno = %d\n",
81 errno);
82 err = -errno; 81 err = -errno;
82 printk("daemon_open : data bind failed, errno = %d\n",
83 -err);
83 goto out_close; 84 goto out_close;
84 } 85 }
85 86
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 01d4ab6b0ef1..f75d7b05c481 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -370,10 +370,10 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
370 struct tty_struct *tty = line->tty; 370 struct tty_struct *tty = line->tty;
371 int err; 371 int err;
372 372
373 /* Interrupts are enabled here because we registered the interrupt with 373 /* Interrupts are disabled here because we registered the interrupt with
374 * IRQF_DISABLED (see line_setup_irq).*/ 374 * IRQF_DISABLED (see line_setup_irq).*/
375 375
376 spin_lock_irq(&line->lock); 376 spin_lock(&line->lock);
377 err = flush_buffer(line); 377 err = flush_buffer(line);
378 if (err == 0) { 378 if (err == 0) {
379 return IRQ_NONE; 379 return IRQ_NONE;
@@ -381,7 +381,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
381 line->head = line->buffer; 381 line->head = line->buffer;
382 line->tail = line->buffer; 382 line->tail = line->buffer;
383 } 383 }
384 spin_unlock_irq(&line->lock); 384 spin_unlock(&line->lock);
385 385
386 if(tty == NULL) 386 if(tty == NULL)
387 return IRQ_NONE; 387 return IRQ_NONE;
diff --git a/arch/um/drivers/mcast_user.c b/arch/um/drivers/mcast_user.c
index 8138f5ea1bf7..b827e82884c9 100644
--- a/arch/um/drivers/mcast_user.c
+++ b/arch/um/drivers/mcast_user.c
@@ -50,6 +50,14 @@ static void mcast_user_init(void *data, void *dev)
50 pri->dev = dev; 50 pri->dev = dev;
51} 51}
52 52
53static void mcast_remove(void *data)
54{
55 struct mcast_data *pri = data;
56
57 kfree(pri->mcast_addr);
58 pri->mcast_addr = NULL;
59}
60
53static int mcast_open(void *data) 61static int mcast_open(void *data)
54{ 62{
55 struct mcast_data *pri = data; 63 struct mcast_data *pri = data;
@@ -157,7 +165,7 @@ const struct net_user_info mcast_user_info = {
157 .init = mcast_user_init, 165 .init = mcast_user_init,
158 .open = mcast_open, 166 .open = mcast_open,
159 .close = mcast_close, 167 .close = mcast_close,
160 .remove = NULL, 168 .remove = mcast_remove,
161 .set_mtu = mcast_set_mtu, 169 .set_mtu = mcast_set_mtu,
162 .add_address = NULL, 170 .add_address = NULL,
163 .delete_address = NULL, 171 .delete_address = NULL,
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index fc22b9bd9153..4b382a6e710f 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -179,7 +179,7 @@ static struct console ssl_cons = {
179 .write = ssl_console_write, 179 .write = ssl_console_write,
180 .device = ssl_console_device, 180 .device = ssl_console_device,
181 .setup = ssl_console_setup, 181 .setup = ssl_console_setup,
182 .flags = CON_PRINTBUFFER, 182 .flags = CON_PRINTBUFFER|CON_ANYTIME,
183 .index = -1, 183 .index = -1,
184}; 184};
185 185
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 7ff0b0fc37e7..76d1f1c980ef 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -153,7 +153,7 @@ static struct console stdiocons = {
153 .write = uml_console_write, 153 .write = uml_console_write,
154 .device = uml_console_device, 154 .device = uml_console_device,
155 .setup = uml_console_setup, 155 .setup = uml_console_setup,
156 .flags = CON_PRINTBUFFER, 156 .flags = CON_PRINTBUFFER|CON_ANYTIME,
157 .index = -1, 157 .index = -1,
158}; 158};
159 159
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 8629bd191492..5c74da410451 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -192,7 +192,9 @@ extern int os_process_parent(int pid);
192extern void os_stop_process(int pid); 192extern void os_stop_process(int pid);
193extern void os_kill_process(int pid, int reap_child); 193extern void os_kill_process(int pid, int reap_child);
194extern void os_kill_ptraced_process(int pid, int reap_child); 194extern void os_kill_ptraced_process(int pid, int reap_child);
195#ifdef UML_CONFIG_MODE_TT
195extern void os_usr1_process(int pid); 196extern void os_usr1_process(int pid);
197#endif
196extern long os_ptrace_ldt(long pid, long addr, long data); 198extern long os_ptrace_ldt(long pid, long addr, long data);
197 199
198extern int os_getpid(void); 200extern int os_getpid(void);
@@ -261,7 +263,6 @@ extern void block_signals(void);
261extern void unblock_signals(void); 263extern void unblock_signals(void);
262extern int get_signals(void); 264extern int get_signals(void);
263extern int set_signals(int enable); 265extern int set_signals(int enable);
264extern void os_usr1_signal(int on);
265 266
266/* trap.c */ 267/* trap.c */
267extern void os_fill_handlinfo(struct kern_handlers h); 268extern void os_fill_handlinfo(struct kern_handlers h);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 50a288bb875a..dbf2f5bc842f 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -142,6 +142,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
142 .events = events, 142 .events = events,
143 .current_events = 0 } ); 143 .current_events = 0 } );
144 144
145 err = -EBUSY;
145 spin_lock_irqsave(&irq_lock, flags); 146 spin_lock_irqsave(&irq_lock, flags);
146 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { 147 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
147 if ((irq_fd->fd == fd) && (irq_fd->type == type)) { 148 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index c692a192957a..76bdd6712417 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -21,6 +21,7 @@
21#include "longjmp.h" 21#include "longjmp.h"
22#include "skas_ptrace.h" 22#include "skas_ptrace.h"
23#include "kern_constants.h" 23#include "kern_constants.h"
24#include "uml-config.h"
24 25
25#define ARBITRARY_ADDR -1 26#define ARBITRARY_ADDR -1
26#define FAILURE_PID -1 27#define FAILURE_PID -1
@@ -131,10 +132,12 @@ void os_kill_ptraced_process(int pid, int reap_child)
131 CATCH_EINTR(waitpid(pid, NULL, 0)); 132 CATCH_EINTR(waitpid(pid, NULL, 0));
132} 133}
133 134
135#ifdef UML_CONFIG_MODE_TT
134void os_usr1_process(int pid) 136void os_usr1_process(int pid)
135{ 137{
136 kill(pid, SIGUSR1); 138 kill(pid, SIGUSR1);
137} 139}
140#endif
138 141
139/* Don't use the glibc version, which caches the result in TLS. It misses some 142/* Don't use the glibc version, which caches the result in TLS. It misses some
140 * syscalls, and also breaks with clone(), which does not unshare the TLS. 143 * syscalls, and also breaks with clone(), which does not unshare the TLS.
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index b897e8592d77..266768629fee 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -243,8 +243,3 @@ int set_signals(int enable)
243 243
244 return ret; 244 return ret;
245} 245}
246
247void os_usr1_signal(int on)
248{
249 change_sig(SIGUSR1, on);
250}
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 01b91f9fa789..b3f6350cac44 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -103,6 +103,9 @@ long arch_prctl_skas(struct task_struct *task, int code,
103 103
104 switch(code){ 104 switch(code){
105 case ARCH_SET_FS: 105 case ARCH_SET_FS:
106 current->thread.arch.fs = (unsigned long) ptr;
107 save_registers(pid, &current->thread.regs.regs);
108 break;
106 case ARCH_SET_GS: 109 case ARCH_SET_GS:
107 save_registers(pid, &current->thread.regs.regs); 110 save_registers(pid, &current->thread.regs.regs);
108 break; 111 break;
@@ -140,9 +143,8 @@ long sys_clone(unsigned long clone_flags, unsigned long newsp,
140 143
141void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) 144void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
142{ 145{
143 if(to->thread.arch.fs == 0) 146 if((to->thread.arch.fs == 0) || (to->mm == NULL))
144 return; 147 return;
145 148
146 arch_prctl_skas(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); 149 arch_prctl_skas(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
147} 150}
148
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 04566fe5de49..4de3a54318f4 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -243,6 +243,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
243 case PTRACE_SINGLESTEP: 243 case PTRACE_SINGLESTEP:
244 case PTRACE_DETACH: 244 case PTRACE_DETACH:
245 case PTRACE_SYSCALL: 245 case PTRACE_SYSCALL:
246 case PTRACE_OLDSETOPTIONS:
246 case PTRACE_SETOPTIONS: 247 case PTRACE_SETOPTIONS:
247 case PTRACE_SET_THREAD_AREA: 248 case PTRACE_SET_THREAD_AREA:
248 case PTRACE_GET_THREAD_AREA: 249 case PTRACE_GET_THREAD_AREA:
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 8047ea8c2ab2..dec587b293bf 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -30,11 +30,8 @@ static void via_bugs(void)
30 30
31#ifdef CONFIG_ACPI 31#ifdef CONFIG_ACPI
32 32
33static int nvidia_hpet_detected __initdata;
34
35static int __init nvidia_hpet_check(struct acpi_table_header *header) 33static int __init nvidia_hpet_check(struct acpi_table_header *header)
36{ 34{
37 nvidia_hpet_detected = 1;
38 return 0; 35 return 0;
39} 36}
40#endif 37#endif
@@ -52,11 +49,7 @@ static void nvidia_bugs(void)
52 if (acpi_use_timer_override) 49 if (acpi_use_timer_override)
53 return; 50 return;
54 51
55 nvidia_hpet_detected = 0; 52 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
56 if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check))
57 return;
58
59 if (nvidia_hpet_detected == 0) {
60 acpi_skip_timer_override = 1; 53 acpi_skip_timer_override = 1;
61 printk(KERN_INFO "Nvidia board " 54 printk(KERN_INFO "Nvidia board "
62 "detected. Ignoring ACPI " 55 "detected. Ignoring ACPI "
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index dc42ba1b46f7..b952c584338f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.10" 96#define DRV_VERSION "2.10ac1"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -667,14 +667,9 @@ static int ich_pata_prereset(struct ata_port *ap)
667{ 667{
668 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 668 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
669 669
670 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) { 670 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
671 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n"); 671 return -ENOENT;
672 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
673 return 0;
674 }
675
676 ich_pata_cbl_detect(ap); 672 ich_pata_cbl_detect(ap);
677
678 return ata_std_prereset(ap); 673 return ata_std_prereset(ap);
679} 674}
680 675
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index d14a48e75f1b..019d8ffdde50 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -34,6 +34,13 @@ struct taskfile_array {
34 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */ 34 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
35}; 35};
36 36
37/*
38 * Helper - belongs in the PCI layer somewhere eventually
39 */
40static int is_pci_dev(struct device *dev)
41{
42 return (dev->bus == &pci_bus_type);
43}
37 44
38/** 45/**
39 * sata_get_dev_handle - finds acpi_handle and PCI device.function 46 * sata_get_dev_handle - finds acpi_handle and PCI device.function
@@ -53,6 +60,9 @@ static int sata_get_dev_handle(struct device *dev, acpi_handle *handle,
53 struct pci_dev *pci_dev; 60 struct pci_dev *pci_dev;
54 acpi_integer addr; 61 acpi_integer addr;
55 62
63 if (!is_pci_dev(dev))
64 return -ENODEV;
65
56 pci_dev = to_pci_dev(dev); /* NOTE: PCI-specific */ 66 pci_dev = to_pci_dev(dev); /* NOTE: PCI-specific */
57 /* Please refer to the ACPI spec for the syntax of _ADR. */ 67 /* Please refer to the ACPI spec for the syntax of _ADR. */
58 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 68 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
@@ -84,7 +94,12 @@ static int pata_get_dev_handle(struct device *dev, acpi_handle *handle,
84 acpi_status status; 94 acpi_status status;
85 struct acpi_device_info *dinfo = NULL; 95 struct acpi_device_info *dinfo = NULL;
86 int ret = -ENODEV; 96 int ret = -ENODEV;
87 struct pci_dev *pdev = to_pci_dev(dev); 97 struct pci_dev *pdev;
98
99 if (!is_pci_dev(dev))
100 return -ENODEV;
101
102 pdev = to_pci_dev(dev);
88 103
89 bus = pdev->bus->number; 104 bus = pdev->bus->number;
90 devnum = PCI_SLOT(pdev->devfn); 105 devnum = PCI_SLOT(pdev->devfn);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index dc362fa01ca4..3c1f8830ac8b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3455,7 +3455,8 @@ static void ata_dev_xfermask(struct ata_device *dev)
3455 "device is on DMA blacklist, disabling DMA\n"); 3455 "device is on DMA blacklist, disabling DMA\n");
3456 } 3456 }
3457 3457
3458 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed != ap) { 3458 if ((host->flags & ATA_HOST_SIMPLEX) &&
3459 host->simplex_claimed && host->simplex_claimed != ap) {
3459 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3460 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3460 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 3461 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3461 "other device, disabling DMA\n"); 3462 "other device, disabling DMA\n");
@@ -5684,18 +5685,22 @@ static void ata_host_release(struct device *gendev, void *res)
5684 for (i = 0; i < host->n_ports; i++) { 5685 for (i = 0; i < host->n_ports; i++) {
5685 struct ata_port *ap = host->ports[i]; 5686 struct ata_port *ap = host->ports[i];
5686 5687
5687 if (!ap) 5688 if (ap && ap->ops->port_stop)
5688 continue;
5689
5690 if (ap->ops->port_stop)
5691 ap->ops->port_stop(ap); 5689 ap->ops->port_stop(ap);
5692
5693 scsi_host_put(ap->scsi_host);
5694 } 5690 }
5695 5691
5696 if (host->ops->host_stop) 5692 if (host->ops->host_stop)
5697 host->ops->host_stop(host); 5693 host->ops->host_stop(host);
5698 5694
5695 for (i = 0; i < host->n_ports; i++) {
5696 struct ata_port *ap = host->ports[i];
5697
5698 if (ap)
5699 scsi_host_put(ap->scsi_host);
5700
5701 host->ports[i] = NULL;
5702 }
5703
5699 dev_set_drvdata(gendev, NULL); 5704 dev_set_drvdata(gendev, NULL);
5700} 5705}
5701 5706
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 388d07fab5f7..9d9670a9b117 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -874,8 +874,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
874 874
875 if (status & (NV_ADMA_STAT_DONE | 875 if (status & (NV_ADMA_STAT_DONE |
876 NV_ADMA_STAT_CPBERR)) { 876 NV_ADMA_STAT_CPBERR)) {
877 u32 check_commands = notifier | notifier_error; 877 u32 check_commands;
878 int pos, error = 0; 878 int pos, error = 0;
879
880 if(ata_tag_valid(ap->active_tag))
881 check_commands = 1 << ap->active_tag;
882 else
883 check_commands = ap->sactive;
884
879 /** Check CPBs for completed commands */ 885 /** Check CPBs for completed commands */
880 while ((pos = ffs(check_commands)) && !error) { 886 while ((pos = ffs(check_commands)) && !error) {
881 pos--; 887 pos--;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cf2a398aaaa1..89ebe3682726 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -787,6 +787,13 @@ void device_del(struct device * dev)
787 device_remove_attrs(dev); 787 device_remove_attrs(dev);
788 bus_remove_device(dev); 788 bus_remove_device(dev);
789 789
790 /*
791 * Some platform devices are driven without driver attached
792 * and managed resources may have been acquired. Make sure
793 * all resources are released.
794 */
795 devres_release_all(dev);
796
790 /* Notify the platform of the removal, in case they 797 /* Notify the platform of the removal, in case they
791 * need to do anything... 798 * need to do anything...
792 */ 799 */
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d441815a3e0c..fde92ce45153 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1821,7 +1821,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1821 struct rdma_bind_list *bind_list; 1821 struct rdma_bind_list *bind_list;
1822 int port, ret; 1822 int port, ret;
1823 1823
1824 bind_list = kmalloc(sizeof *bind_list, GFP_KERNEL); 1824 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1825 if (!bind_list) 1825 if (!bind_list)
1826 return -ENOMEM; 1826 return -ENOMEM;
1827 1827
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b516b93b8550..c859134c1daa 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -266,7 +266,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
266 mutex_lock(&ctx->file->mut); 266 mutex_lock(&ctx->file->mut);
267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
268 if (!ctx->backlog) { 268 if (!ctx->backlog) {
269 ret = -EDQUOT; 269 ret = -ENOMEM;
270 kfree(uevent); 270 kfree(uevent);
271 goto out; 271 goto out;
272 } 272 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index d737c738d876..818cf1aee8c7 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -36,6 +36,7 @@
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
39 40
40#include "cxio_resource.h" 41#include "cxio_resource.h"
41#include "cxio_hal.h" 42#include "cxio_hal.h"
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index b21fde8b659d..d0ed1d35ca3e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -305,8 +305,7 @@ static int status2errno(int status)
305 */ 305 */
306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
307{ 307{
308 if (skb) { 308 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
309 BUG_ON(skb_cloned(skb));
310 skb_trim(skb, 0); 309 skb_trim(skb, 0);
311 skb_get(skb); 310 skb_get(skb);
312 } else { 311 } else {
@@ -1415,6 +1414,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1415 wake_up(&ep->com.waitq); 1414 wake_up(&ep->com.waitq);
1416 break; 1415 break;
1417 case FPDU_MODE: 1416 case FPDU_MODE:
1417 start_ep_timer(ep);
1418 __state_set(&ep->com, CLOSING); 1418 __state_set(&ep->com, CLOSING);
1419 attrs.next_state = IWCH_QP_STATE_CLOSING; 1419 attrs.next_state = IWCH_QP_STATE_CLOSING;
1420 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, 1420 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1425,7 +1425,6 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1425 disconnect = 0; 1425 disconnect = 0;
1426 break; 1426 break;
1427 case CLOSING: 1427 case CLOSING:
1428 start_ep_timer(ep);
1429 __state_set(&ep->com, MORIBUND); 1428 __state_set(&ep->com, MORIBUND);
1430 disconnect = 0; 1429 disconnect = 0;
1431 break; 1430 break;
@@ -1487,8 +1486,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1487 case CONNECTING: 1486 case CONNECTING:
1488 break; 1487 break;
1489 case MPA_REQ_WAIT: 1488 case MPA_REQ_WAIT:
1489 stop_ep_timer(ep);
1490 break; 1490 break;
1491 case MPA_REQ_SENT: 1491 case MPA_REQ_SENT:
1492 stop_ep_timer(ep);
1492 connect_reply_upcall(ep, -ECONNRESET); 1493 connect_reply_upcall(ep, -ECONNRESET);
1493 break; 1494 break;
1494 case MPA_REP_SENT: 1495 case MPA_REP_SENT:
@@ -1507,9 +1508,10 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1507 get_ep(&ep->com); 1508 get_ep(&ep->com);
1508 break; 1509 break;
1509 case MORIBUND: 1510 case MORIBUND:
1511 case CLOSING:
1510 stop_ep_timer(ep); 1512 stop_ep_timer(ep);
1513 /*FALLTHROUGH*/
1511 case FPDU_MODE: 1514 case FPDU_MODE:
1512 case CLOSING:
1513 if (ep->com.cm_id && ep->com.qp) { 1515 if (ep->com.cm_id && ep->com.qp) {
1514 attrs.next_state = IWCH_QP_STATE_ERROR; 1516 attrs.next_state = IWCH_QP_STATE_ERROR;
1515 ret = iwch_modify_qp(ep->com.qp->rhp, 1517 ret = iwch_modify_qp(ep->com.qp->rhp,
@@ -1570,7 +1572,6 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1570 spin_lock_irqsave(&ep->com.lock, flags); 1572 spin_lock_irqsave(&ep->com.lock, flags);
1571 switch (ep->com.state) { 1573 switch (ep->com.state) {
1572 case CLOSING: 1574 case CLOSING:
1573 start_ep_timer(ep);
1574 __state_set(&ep->com, MORIBUND); 1575 __state_set(&ep->com, MORIBUND);
1575 break; 1576 break;
1576 case MORIBUND: 1577 case MORIBUND:
@@ -1586,6 +1587,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1586 __state_set(&ep->com, DEAD); 1587 __state_set(&ep->com, DEAD);
1587 release = 1; 1588 release = 1;
1588 break; 1589 break;
1590 case ABORTING:
1591 break;
1589 case DEAD: 1592 case DEAD:
1590 default: 1593 default:
1591 BUG_ON(1); 1594 BUG_ON(1);
@@ -1659,6 +1662,7 @@ static void ep_timeout(unsigned long arg)
1659 break; 1662 break;
1660 case MPA_REQ_WAIT: 1663 case MPA_REQ_WAIT:
1661 break; 1664 break;
1665 case CLOSING:
1662 case MORIBUND: 1666 case MORIBUND:
1663 if (ep->com.cm_id && ep->com.qp) { 1667 if (ep->com.cm_id && ep->com.qp) {
1664 attrs.next_state = IWCH_QP_STATE_ERROR; 1668 attrs.next_state = IWCH_QP_STATE_ERROR;
@@ -1687,12 +1691,11 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1687 return -ECONNRESET; 1691 return -ECONNRESET;
1688 } 1692 }
1689 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1693 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1690 state_set(&ep->com, CLOSING);
1691 if (mpa_rev == 0) 1694 if (mpa_rev == 0)
1692 abort_connection(ep, NULL, GFP_KERNEL); 1695 abort_connection(ep, NULL, GFP_KERNEL);
1693 else { 1696 else {
1694 err = send_mpa_reject(ep, pdata, pdata_len); 1697 err = send_mpa_reject(ep, pdata, pdata_len);
1695 err = send_halfclose(ep, GFP_KERNEL); 1698 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1696 } 1699 }
1697 return 0; 1700 return 0;
1698} 1701}
@@ -1957,11 +1960,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1957 case MPA_REQ_RCVD: 1960 case MPA_REQ_RCVD:
1958 case MPA_REP_SENT: 1961 case MPA_REP_SENT:
1959 case FPDU_MODE: 1962 case FPDU_MODE:
1963 start_ep_timer(ep);
1960 ep->com.state = CLOSING; 1964 ep->com.state = CLOSING;
1961 close = 1; 1965 close = 1;
1962 break; 1966 break;
1963 case CLOSING: 1967 case CLOSING:
1964 start_ep_timer(ep);
1965 ep->com.state = MORIBUND; 1968 ep->com.state = MORIBUND;
1966 close = 1; 1969 close = 1;
1967 break; 1970 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 54362afbf72f..b40676662a8a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -47,12 +47,6 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
47 struct iwch_qp_attributes attrs; 47 struct iwch_qp_attributes attrs;
48 struct iwch_qp *qhp; 48 struct iwch_qp *qhp;
49 49
50 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
51 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
52 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
53 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
54 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
55
56 spin_lock(&rnicp->lock); 50 spin_lock(&rnicp->lock);
57 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); 51 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
58 52
@@ -73,6 +67,12 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
73 return; 67 return;
74 } 68 }
75 69
70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
75
76 atomic_inc(&qhp->refcnt); 76 atomic_inc(&qhp->refcnt);
77 spin_unlock(&rnicp->lock); 77 spin_unlock(&rnicp->lock);
78 78
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9947a144a929..f2774ae906bf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -331,6 +331,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
331 int ret = 0; 331 int ret = 0;
332 struct iwch_mm_entry *mm; 332 struct iwch_mm_entry *mm;
333 struct iwch_ucontext *ucontext; 333 struct iwch_ucontext *ucontext;
334 u64 addr;
334 335
335 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 336 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
336 key, len); 337 key, len);
@@ -345,10 +346,11 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
345 mm = remove_mmap(ucontext, key, len); 346 mm = remove_mmap(ucontext, key, len);
346 if (!mm) 347 if (!mm)
347 return -EINVAL; 348 return -EINVAL;
349 addr = mm->addr;
348 kfree(mm); 350 kfree(mm);
349 351
350 if ((mm->addr >= rdev_p->rnic_info.udbell_physbase) && 352 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
351 (mm->addr < (rdev_p->rnic_info.udbell_physbase + 353 (addr < (rdev_p->rnic_info.udbell_physbase +
352 rdev_p->rnic_info.udbell_len))) { 354 rdev_p->rnic_info.udbell_len))) {
353 355
354 /* 356 /*
@@ -362,7 +364,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
362 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 364 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
363 vma->vm_flags &= ~VM_MAYREAD; 365 vma->vm_flags &= ~VM_MAYREAD;
364 ret = io_remap_pfn_range(vma, vma->vm_start, 366 ret = io_remap_pfn_range(vma, vma->vm_start,
365 mm->addr >> PAGE_SHIFT, 367 addr >> PAGE_SHIFT,
366 len, vma->vm_page_prot); 368 len, vma->vm_page_prot);
367 } else { 369 } else {
368 370
@@ -370,7 +372,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
370 * Map WQ or CQ contig dma memory... 372 * Map WQ or CQ contig dma memory...
371 */ 373 */
372 ret = remap_pfn_range(vma, vma->vm_start, 374 ret = remap_pfn_range(vma, vma->vm_start,
373 mm->addr >> PAGE_SHIFT, 375 addr >> PAGE_SHIFT,
374 len, vma->vm_page_prot); 376 len, vma->vm_page_prot);
375 } 377 }
376 378
@@ -463,9 +465,6 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
463 php = to_iwch_pd(pd); 465 php = to_iwch_pd(pd);
464 rhp = php->rhp; 466 rhp = php->rhp;
465 467
466 acc = iwch_convert_access(acc);
467
468
469 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 468 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
470 if (!mhp) 469 if (!mhp)
471 return ERR_PTR(-ENOMEM); 470 return ERR_PTR(-ENOMEM);
@@ -491,12 +490,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
491 mhp->attr.pdid = php->pdid; 490 mhp->attr.pdid = php->pdid;
492 mhp->attr.zbva = 0; 491 mhp->attr.zbva = 0;
493 492
494 /* NOTE: TPT perms are backwards from BIND WR perms! */ 493 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
495 mhp->attr.perms = (acc & 0x1) << 3;
496 mhp->attr.perms |= (acc & 0x2) << 1;
497 mhp->attr.perms |= (acc & 0x4) >> 1;
498 mhp->attr.perms |= (acc & 0x8) >> 3;
499
500 mhp->attr.va_fbo = *iova_start; 494 mhp->attr.va_fbo = *iova_start;
501 mhp->attr.page_size = shift - 12; 495 mhp->attr.page_size = shift - 12;
502 496
@@ -525,7 +519,6 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
525 struct iwch_mr mh, *mhp; 519 struct iwch_mr mh, *mhp;
526 struct iwch_pd *php; 520 struct iwch_pd *php;
527 struct iwch_dev *rhp; 521 struct iwch_dev *rhp;
528 int new_acc;
529 __be64 *page_list = NULL; 522 __be64 *page_list = NULL;
530 int shift = 0; 523 int shift = 0;
531 u64 total_size; 524 u64 total_size;
@@ -546,14 +539,12 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
546 if (rhp != php->rhp) 539 if (rhp != php->rhp)
547 return -EINVAL; 540 return -EINVAL;
548 541
549 new_acc = mhp->attr.perms;
550
551 memcpy(&mh, mhp, sizeof *mhp); 542 memcpy(&mh, mhp, sizeof *mhp);
552 543
553 if (mr_rereg_mask & IB_MR_REREG_PD) 544 if (mr_rereg_mask & IB_MR_REREG_PD)
554 php = to_iwch_pd(pd); 545 php = to_iwch_pd(pd);
555 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 546 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
556 mh.attr.perms = iwch_convert_access(acc); 547 mh.attr.perms = iwch_ib_to_tpt_access(acc);
557 if (mr_rereg_mask & IB_MR_REREG_TRANS) 548 if (mr_rereg_mask & IB_MR_REREG_TRANS)
558 ret = build_phys_page_list(buffer_list, num_phys_buf, 549 ret = build_phys_page_list(buffer_list, num_phys_buf,
559 iova_start, 550 iova_start,
@@ -568,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
568 if (mr_rereg_mask & IB_MR_REREG_PD) 559 if (mr_rereg_mask & IB_MR_REREG_PD)
569 mhp->attr.pdid = php->pdid; 560 mhp->attr.pdid = php->pdid;
570 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 561 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
571 mhp->attr.perms = acc; 562 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
572 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 563 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
573 mhp->attr.zbva = 0; 564 mhp->attr.zbva = 0;
574 mhp->attr.va_fbo = *iova_start; 565 mhp->attr.va_fbo = *iova_start;
@@ -613,8 +604,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
613 goto err; 604 goto err;
614 } 605 }
615 606
616 acc = iwch_convert_access(acc);
617
618 i = n = 0; 607 i = n = 0;
619 608
620 list_for_each_entry(chunk, &region->chunk_list, list) 609 list_for_each_entry(chunk, &region->chunk_list, list)
@@ -630,10 +619,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
630 mhp->rhp = rhp; 619 mhp->rhp = rhp;
631 mhp->attr.pdid = php->pdid; 620 mhp->attr.pdid = php->pdid;
632 mhp->attr.zbva = 0; 621 mhp->attr.zbva = 0;
633 mhp->attr.perms = (acc & 0x1) << 3; 622 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634 mhp->attr.perms |= (acc & 0x2) << 1;
635 mhp->attr.perms |= (acc & 0x4) >> 1;
636 mhp->attr.perms |= (acc & 0x8) >> 3;
637 mhp->attr.va_fbo = region->virt_base; 623 mhp->attr.va_fbo = region->virt_base;
638 mhp->attr.page_size = shift - 12; 624 mhp->attr.page_size = shift - 12;
639 mhp->attr.len = (u32) region->length; 625 mhp->attr.len = (u32) region->length;
@@ -736,10 +722,8 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
736 qhp = to_iwch_qp(ib_qp); 722 qhp = to_iwch_qp(ib_qp);
737 rhp = qhp->rhp; 723 rhp = qhp->rhp;
738 724
739 if (qhp->attr.state == IWCH_QP_STATE_RTS) { 725 attrs.next_state = IWCH_QP_STATE_ERROR;
740 attrs.next_state = IWCH_QP_STATE_ERROR; 726 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
741 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
742 }
743 wait_event(qhp->wait, !qhp->ep); 727 wait_event(qhp->wait, !qhp->ep);
744 728
745 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); 729 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index de0fe1b93a0c..93bcc56756bd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -286,27 +286,20 @@ static inline int iwch_convert_state(enum ib_qp_state ib_state)
286 } 286 }
287} 287}
288 288
289enum iwch_mem_perms { 289static inline u32 iwch_ib_to_tpt_access(int acc)
290 IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0,
291 IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1,
292 IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2,
293 IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3,
294 IWCH_MEM_ACCESS_ATOMICS = 1 << 4,
295 IWCH_MEM_ACCESS_BINDING = 1 << 5,
296 IWCH_MEM_ACCESS_LOCAL =
297 (IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE),
298 IWCH_MEM_ACCESS_REMOTE =
299 (IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ)
300 /* cannot go beyond 1 << 31 */
301} __attribute__ ((packed));
302
303static inline u32 iwch_convert_access(int acc)
304{ 290{
305 return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0) 291 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
306 | (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) | 292 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
307 (acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) | 293 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
308 (acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) | 294 TPT_LOCAL_READ;
309 IWCH_MEM_ACCESS_LOCAL_READ; 295}
296
297static inline u32 iwch_ib_to_mwbind_access(int acc)
298{
299 return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
300 (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
301 (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
302 T3_MEM_ACCESS_LOCAL_READ;
310} 303}
311 304
312enum iwch_mmid_state { 305enum iwch_mmid_state {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9ea00cc4a5f8..0a472c9b44db 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -439,7 +439,7 @@ int iwch_bind_mw(struct ib_qp *qp,
439 wqe->bind.type = T3_VA_BASED_TO; 439 wqe->bind.type = T3_VA_BASED_TO;
440 440
441 /* TBD: check perms */ 441 /* TBD: check perms */
442 wqe->bind.perms = iwch_convert_access(mw_bind->mw_access_flags); 442 wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags);
443 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); 443 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
444 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); 444 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
445 wqe->bind.mw_len = cpu_to_be32(mw_bind->length); 445 wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 40404c9e2817..82ded44c6cee 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -52,6 +52,8 @@ struct ehca_mw;
52struct ehca_pd; 52struct ehca_pd;
53struct ehca_av; 53struct ehca_av;
54 54
55#include <linux/wait.h>
56
55#include <rdma/ib_verbs.h> 57#include <rdma/ib_verbs.h>
56#include <rdma/ib_user_verbs.h> 58#include <rdma/ib_user_verbs.h>
57 59
@@ -153,7 +155,9 @@ struct ehca_cq {
153 spinlock_t cb_lock; 155 spinlock_t cb_lock;
154 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 156 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
155 struct list_head entry; 157 struct list_head entry;
156 u32 nr_callbacks; 158 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
159 u32 nr_events; /* #events seen */
160 wait_queue_head_t wait_completion;
157 spinlock_t task_lock; 161 spinlock_t task_lock;
158 u32 ownpid; 162 u32 ownpid;
159 /* mmap counter for resources mapped into user space */ 163 /* mmap counter for resources mapped into user space */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6ebfa27e4e16..e2cdc1a16fe9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
146 spin_lock_init(&my_cq->spinlock); 146 spin_lock_init(&my_cq->spinlock);
147 spin_lock_init(&my_cq->cb_lock); 147 spin_lock_init(&my_cq->cb_lock);
148 spin_lock_init(&my_cq->task_lock); 148 spin_lock_init(&my_cq->task_lock);
149 init_waitqueue_head(&my_cq->wait_completion);
149 my_cq->ownpid = current->tgid; 150 my_cq->ownpid = current->tgid;
150 151
151 cq = &my_cq->ib_cq; 152 cq = &my_cq->ib_cq;
@@ -302,6 +303,16 @@ create_cq_exit1:
302 return cq; 303 return cq;
303} 304}
304 305
306static int get_cq_nr_events(struct ehca_cq *my_cq)
307{
308 int ret;
309 unsigned long flags;
310 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
311 ret = my_cq->nr_events;
312 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
313 return ret;
314}
315
305int ehca_destroy_cq(struct ib_cq *cq) 316int ehca_destroy_cq(struct ib_cq *cq)
306{ 317{
307 u64 h_ret; 318 u64 h_ret;
@@ -329,10 +340,11 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 } 340 }
330 341
331 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 342 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
332 while (my_cq->nr_callbacks) { 343 while (my_cq->nr_events) {
333 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 344 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
334 yield(); 345 wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
335 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 /* recheck nr_events to assure no cqe has just arrived */
336 } 348 }
337 349
338 idr_remove(&ehca_cq_idr, my_cq->token); 350 idr_remove(&ehca_cq_idr, my_cq->token);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3ec53c687d08..20f36bf8b2b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -404,10 +404,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
404 u32 token; 404 u32 token;
405 unsigned long flags; 405 unsigned long flags;
406 struct ehca_cq *cq; 406 struct ehca_cq *cq;
407
407 eqe_value = eqe->entry; 408 eqe_value = eqe->entry;
408 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 409 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
409 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 410 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
410 ehca_dbg(&shca->ib_device, "... completion event"); 411 ehca_dbg(&shca->ib_device, "Got completion event");
411 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 412 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
412 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 413 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
413 cq = idr_find(&ehca_cq_idr, token); 414 cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +420,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
419 return; 420 return;
420 } 421 }
421 reset_eq_pending(cq); 422 reset_eq_pending(cq);
422 if (ehca_scaling_code) { 423 cq->nr_events++;
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
425 if (ehca_scaling_code)
423 queue_comp_task(cq); 426 queue_comp_task(cq);
424 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 427 else {
425 } else {
426 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
427 comp_event_callback(cq); 428 comp_event_callback(cq);
429 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
430 cq->nr_events--;
431 if (!cq->nr_events)
432 wake_up(&cq->wait_completion);
433 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
428 } 434 }
429 } else { 435 } else {
430 ehca_dbg(&shca->ib_device, 436 ehca_dbg(&shca->ib_device, "Got non completion event");
431 "Got non completion event");
432 parse_identifier(shca, eqe_value); 437 parse_identifier(shca, eqe_value);
433 } 438 }
434} 439}
@@ -478,6 +483,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
478 "token=%x", token); 483 "token=%x", token);
479 continue; 484 continue;
480 } 485 }
486 eqe_cache[eqe_cnt].cq->nr_events++;
481 spin_unlock(&ehca_cq_idr_lock); 487 spin_unlock(&ehca_cq_idr_lock);
482 } else 488 } else
483 eqe_cache[eqe_cnt].cq = NULL; 489 eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +510,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
504 /* call completion handler for cached eqes */ 510 /* call completion handler for cached eqes */
505 for (i = 0; i < eqe_cnt; i++) 511 for (i = 0; i < eqe_cnt; i++)
506 if (eq->eqe_cache[i].cq) { 512 if (eq->eqe_cache[i].cq) {
507 if (ehca_scaling_code) { 513 if (ehca_scaling_code)
508 spin_lock(&ehca_cq_idr_lock);
509 queue_comp_task(eq->eqe_cache[i].cq); 514 queue_comp_task(eq->eqe_cache[i].cq);
510 spin_unlock(&ehca_cq_idr_lock); 515 else {
511 } else 516 struct ehca_cq *cq = eq->eqe_cache[i].cq;
512 comp_event_callback(eq->eqe_cache[i].cq); 517 comp_event_callback(cq);
518 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
519 cq->nr_events--;
520 if (!cq->nr_events)
521 wake_up(&cq->wait_completion);
522 spin_unlock_irqrestore(&ehca_cq_idr_lock,
523 flags);
524 }
513 } else { 525 } else {
514 ehca_dbg(&shca->ib_device, "Got non completion event"); 526 ehca_dbg(&shca->ib_device, "Got non completion event");
515 parse_identifier(shca, eq->eqe_cache[i].eqe->entry); 527 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +535,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
523 if (!eqe) 535 if (!eqe)
524 break; 536 break;
525 process_eqe(shca, eqe); 537 process_eqe(shca, eqe);
526 eqe_cnt++;
527 } while (1); 538 } while (1);
528 539
529unlock_irq_spinlock: 540unlock_irq_spinlock:
@@ -567,8 +578,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
567 list_add_tail(&__cq->entry, &cct->cq_list); 578 list_add_tail(&__cq->entry, &cct->cq_list);
568 cct->cq_jobs++; 579 cct->cq_jobs++;
569 wake_up(&cct->wait_queue); 580 wake_up(&cct->wait_queue);
570 } 581 } else
571 else
572 __cq->nr_callbacks++; 582 __cq->nr_callbacks++;
573 583
574 spin_unlock(&__cq->task_lock); 584 spin_unlock(&__cq->task_lock);
@@ -577,18 +587,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
577 587
578static void queue_comp_task(struct ehca_cq *__cq) 588static void queue_comp_task(struct ehca_cq *__cq)
579{ 589{
580 int cpu;
581 int cpu_id; 590 int cpu_id;
582 struct ehca_cpu_comp_task *cct; 591 struct ehca_cpu_comp_task *cct;
592 int cq_jobs;
593 unsigned long flags;
583 594
584 cpu = get_cpu();
585 cpu_id = find_next_online_cpu(pool); 595 cpu_id = find_next_online_cpu(pool);
586 BUG_ON(!cpu_online(cpu_id)); 596 BUG_ON(!cpu_online(cpu_id));
587 597
588 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 598 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
589 BUG_ON(!cct); 599 BUG_ON(!cct);
590 600
591 if (cct->cq_jobs > 0) { 601 spin_lock_irqsave(&cct->task_lock, flags);
602 cq_jobs = cct->cq_jobs;
603 spin_unlock_irqrestore(&cct->task_lock, flags);
604 if (cq_jobs > 0) {
592 cpu_id = find_next_online_cpu(pool); 605 cpu_id = find_next_online_cpu(pool);
593 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); 606 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
594 BUG_ON(!cct); 607 BUG_ON(!cct);
@@ -608,11 +621,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
608 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 621 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
609 spin_unlock_irqrestore(&cct->task_lock, flags); 622 spin_unlock_irqrestore(&cct->task_lock, flags);
610 comp_event_callback(cq); 623 comp_event_callback(cq);
611 spin_lock_irqsave(&cct->task_lock, flags);
612 624
625 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
626 cq->nr_events--;
627 if (!cq->nr_events)
628 wake_up(&cq->wait_completion);
629 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
630
631 spin_lock_irqsave(&cct->task_lock, flags);
613 spin_lock(&cq->task_lock); 632 spin_lock(&cq->task_lock);
614 cq->nr_callbacks--; 633 cq->nr_callbacks--;
615 if (cq->nr_callbacks == 0) { 634 if (!cq->nr_callbacks) {
616 list_del_init(cct->cq_list.next); 635 list_del_init(cct->cq_list.next);
617 cct->cq_jobs--; 636 cct->cq_jobs--;
618 } 637 }
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c1835121a822..059da9628bb5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0021"); 55MODULE_VERSION("SVNEHCA_0022");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -810,7 +810,7 @@ int __init ehca_module_init(void)
810 int ret; 810 int ret;
811 811
812 printk(KERN_INFO "eHCA Infiniband Device Driver " 812 printk(KERN_INFO "eHCA Infiniband Device Driver "
813 "(Rel.: SVNEHCA_0021)\n"); 813 "(Rel.: SVNEHCA_0022)\n");
814 idr_init(&ehca_qp_idr); 814 idr_init(&ehca_qp_idr);
815 idr_init(&ehca_cq_idr); 815 idr_init(&ehca_cq_idr);
816 spin_lock_init(&ehca_qp_idr_lock); 816 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 71dc84bd4254..1c6b63aca268 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1088,21 +1088,21 @@ static void mthca_unmap_memfree(struct mthca_dev *dev,
1088static int mthca_alloc_memfree(struct mthca_dev *dev, 1088static int mthca_alloc_memfree(struct mthca_dev *dev,
1089 struct mthca_qp *qp) 1089 struct mthca_qp *qp)
1090{ 1090{
1091 int ret = 0;
1092
1093 if (mthca_is_memfree(dev)) { 1091 if (mthca_is_memfree(dev)) {
1094 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1092 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1095 qp->qpn, &qp->rq.db); 1093 qp->qpn, &qp->rq.db);
1096 if (qp->rq.db_index < 0) 1094 if (qp->rq.db_index < 0)
1097 return ret; 1095 return -ENOMEM;
1098 1096
1099 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1097 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1100 qp->qpn, &qp->sq.db); 1098 qp->qpn, &qp->sq.db);
1101 if (qp->sq.db_index < 0) 1099 if (qp->sq.db_index < 0) {
1102 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1100 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1101 return -ENOMEM;
1102 }
1103 } 1103 }
1104 1104
1105 return ret; 1105 return 0;
1106} 1106}
1107 1107
1108static void mthca_free_memfree(struct mthca_dev *dev, 1108static void mthca_free_memfree(struct mthca_dev *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index bb2e3d5eee20..56c87a81bb67 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -407,6 +407,10 @@ static int ipoib_mcast_join_complete(int status,
407 queue_delayed_work(ipoib_workqueue, 407 queue_delayed_work(ipoib_workqueue,
408 &priv->mcast_task, 0); 408 &priv->mcast_task, 0);
409 mutex_unlock(&mcast_mutex); 409 mutex_unlock(&mcast_mutex);
410
411 if (mcast == priv->broadcast)
412 netif_carrier_on(dev);
413
410 return 0; 414 return 0;
411 } 415 }
412 416
@@ -594,7 +598,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
594 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 598 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
595 599
596 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 600 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
597 netif_carrier_on(dev);
598} 601}
599 602
600int ipoib_mcast_start_thread(struct net_device *dev) 603int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 3cb551b88756..7f3ec205e35f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -259,12 +259,13 @@ void ipoib_event(struct ib_event_handler *handler,
259 struct ipoib_dev_priv *priv = 259 struct ipoib_dev_priv *priv =
260 container_of(handler, struct ipoib_dev_priv, event_handler); 260 container_of(handler, struct ipoib_dev_priv, event_handler);
261 261
262 if (record->event == IB_EVENT_PORT_ERR || 262 if ((record->event == IB_EVENT_PORT_ERR ||
263 record->event == IB_EVENT_PKEY_CHANGE || 263 record->event == IB_EVENT_PKEY_CHANGE ||
264 record->event == IB_EVENT_PORT_ACTIVE || 264 record->event == IB_EVENT_PORT_ACTIVE ||
265 record->event == IB_EVENT_LID_CHANGE || 265 record->event == IB_EVENT_LID_CHANGE ||
266 record->event == IB_EVENT_SM_CHANGE || 266 record->event == IB_EVENT_SM_CHANGE ||
267 record->event == IB_EVENT_CLIENT_REREGISTER) { 267 record->event == IB_EVENT_CLIENT_REREGISTER) &&
268 record->element.port_num == priv->port) {
268 ipoib_dbg(priv, "Port state change event\n"); 269 ipoib_dbg(priv, "Port state change event\n");
269 queue_work(ipoib_workqueue, &priv->flush_task); 270 queue_work(ipoib_workqueue, &priv->flush_task);
270 } 271 }
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index ec195a36e8f6..db9cca3b65e0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -553,7 +553,8 @@ static int __devinit i8042_check_aux(void)
553 */ 553 */
554 554
555 param = 0x5a; 555 param = 0x5a;
556 if (i8042_command(&param, I8042_CMD_AUX_LOOP) || param != 0x5a) { 556 retval = i8042_command(&param, I8042_CMD_AUX_LOOP);
557 if (retval || param != 0x5a) {
557 558
558/* 559/*
559 * External connection test - filters out AT-soldered PS/2 i8042's 560 * External connection test - filters out AT-soldered PS/2 i8042's
@@ -567,7 +568,12 @@ static int __devinit i8042_check_aux(void)
567 (param && param != 0xfa && param != 0xff)) 568 (param && param != 0xfa && param != 0xff))
568 return -1; 569 return -1;
569 570
570 aux_loop_broken = 1; 571/*
572 * If AUX_LOOP completed without error but returned unexpected data
573 * mark it as broken
574 */
575 if (!retval)
576 aux_loop_broken = 1;
571 } 577 }
572 578
573/* 579/*
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 72995777f809..b406ecfa7268 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -858,19 +858,7 @@ static struct eisa_device_id vortex_eisa_ids[] = {
858}; 858};
859MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); 859MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
860 860
861static int vortex_eisa_probe(struct device *device); 861static int __init vortex_eisa_probe(struct device *device)
862static int vortex_eisa_remove(struct device *device);
863
864static struct eisa_driver vortex_eisa_driver = {
865 .id_table = vortex_eisa_ids,
866 .driver = {
867 .name = "3c59x",
868 .probe = vortex_eisa_probe,
869 .remove = vortex_eisa_remove
870 }
871};
872
873static int vortex_eisa_probe(struct device *device)
874{ 862{
875 void __iomem *ioaddr; 863 void __iomem *ioaddr;
876 struct eisa_device *edev; 864 struct eisa_device *edev;
@@ -893,7 +881,7 @@ static int vortex_eisa_probe(struct device *device)
893 return 0; 881 return 0;
894} 882}
895 883
896static int vortex_eisa_remove(struct device *device) 884static int __devexit vortex_eisa_remove(struct device *device)
897{ 885{
898 struct eisa_device *edev; 886 struct eisa_device *edev;
899 struct net_device *dev; 887 struct net_device *dev;
@@ -918,7 +906,17 @@ static int vortex_eisa_remove(struct device *device)
918 free_netdev(dev); 906 free_netdev(dev);
919 return 0; 907 return 0;
920} 908}
921#endif 909
910static struct eisa_driver vortex_eisa_driver = {
911 .id_table = vortex_eisa_ids,
912 .driver = {
913 .name = "3c59x",
914 .probe = vortex_eisa_probe,
915 .remove = __devexit_p(vortex_eisa_remove)
916 }
917};
918
919#endif /* CONFIG_EISA */
922 920
923/* returns count found (>= 0), or negative on error */ 921/* returns count found (>= 0), or negative on error */
924static int __init vortex_eisa_init(void) 922static int __init vortex_eisa_init(void)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 9ba21e0f27c5..1ee27c360a4b 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -787,6 +787,12 @@ static int mv643xx_eth_open(struct net_device *dev)
787 unsigned int size; 787 unsigned int size;
788 int err; 788 int err;
789 789
790 /* Clear any pending ethernet port interrupts */
791 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
792 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
793 /* wait for previous write to complete */
794 mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num));
795
790 err = request_irq(dev->irq, mv643xx_eth_int_handler, 796 err = request_irq(dev->irq, mv643xx_eth_int_handler,
791 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 797 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
792 if (err) { 798 if (err) {
@@ -875,10 +881,6 @@ static int mv643xx_eth_open(struct net_device *dev)
875 881
876 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ 882 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
877 883
878 /* Clear any pending ethernet port interrupts */
879 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
880 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
881
882 eth_port_start(dev); 884 eth_port_start(dev);
883 885
884 /* Interrupt Coalescing */ 886 /* Interrupt Coalescing */
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index b05dc6ed7fb7..ac02b3b60f92 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -181,6 +181,7 @@ struct myri10ge_priv {
181 int intr_coal_delay; 181 int intr_coal_delay;
182 __be32 __iomem *intr_coal_delay_ptr; 182 __be32 __iomem *intr_coal_delay_ptr;
183 int mtrr; 183 int mtrr;
184 int wc_enabled;
184 int wake_queue; 185 int wake_queue;
185 int stop_queue; 186 int stop_queue;
186 int down_cnt; 187 int down_cnt;
@@ -717,6 +718,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
717 int status; 718 int status;
718 size_t bytes; 719 size_t bytes;
719 u32 len; 720 u32 len;
721 struct page *dmatest_page;
722 dma_addr_t dmatest_bus;
720 723
721 /* try to send a reset command to the card to see if it 724 /* try to send a reset command to the card to see if it
722 * is alive */ 725 * is alive */
@@ -726,6 +729,11 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
726 dev_err(&mgp->pdev->dev, "failed reset\n"); 729 dev_err(&mgp->pdev->dev, "failed reset\n");
727 return -ENXIO; 730 return -ENXIO;
728 } 731 }
732 dmatest_page = alloc_page(GFP_KERNEL);
733 if (!dmatest_page)
734 return -ENOMEM;
735 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
736 DMA_BIDIRECTIONAL);
729 737
730 /* Now exchange information about interrupts */ 738 /* Now exchange information about interrupts */
731 739
@@ -764,8 +772,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
764 772
765 len = mgp->tx.boundary; 773 len = mgp->tx.boundary;
766 774
767 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 775 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
768 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 776 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
769 cmd.data2 = len * 0x10000; 777 cmd.data2 = len * 0x10000;
770 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); 778 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
771 if (status == 0) 779 if (status == 0)
@@ -774,8 +782,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
774 else 782 else
775 dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n", 783 dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n",
776 status); 784 status);
777 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 785 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
778 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 786 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
779 cmd.data2 = len * 0x1; 787 cmd.data2 = len * 0x1;
780 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); 788 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
781 if (status == 0) 789 if (status == 0)
@@ -785,8 +793,8 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
785 dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n", 793 dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n",
786 status); 794 status);
787 795
788 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 796 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
789 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 797 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
790 cmd.data2 = len * 0x10001; 798 cmd.data2 = len * 0x10001;
791 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0); 799 status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
792 if (status == 0) 800 if (status == 0)
@@ -796,6 +804,9 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
796 dev_warn(&mgp->pdev->dev, 804 dev_warn(&mgp->pdev->dev,
797 "DMA read/write benchmark failed: %d\n", status); 805 "DMA read/write benchmark failed: %d\n", status);
798 806
807 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
808 put_page(dmatest_page);
809
799 memset(mgp->rx_done.entry, 0, bytes); 810 memset(mgp->rx_done.entry, 0, bytes);
800 811
801 /* reset mcp/driver shared state back to 0 */ 812 /* reset mcp/driver shared state back to 0 */
@@ -1375,7 +1386,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1375 data[i] = ((unsigned long *)&mgp->stats)[i]; 1386 data[i] = ((unsigned long *)&mgp->stats)[i];
1376 1387
1377 data[i++] = (unsigned int)mgp->tx.boundary; 1388 data[i++] = (unsigned int)mgp->tx.boundary;
1378 data[i++] = (unsigned int)(mgp->mtrr >= 0); 1389 data[i++] = (unsigned int)mgp->wc_enabled;
1379 data[i++] = (unsigned int)mgp->pdev->irq; 1390 data[i++] = (unsigned int)mgp->pdev->irq;
1380 data[i++] = (unsigned int)mgp->msi_enabled; 1391 data[i++] = (unsigned int)mgp->msi_enabled;
1381 data[i++] = (unsigned int)mgp->read_dma; 1392 data[i++] = (unsigned int)mgp->read_dma;
@@ -1456,6 +1467,8 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1456 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1467 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1457 tx_ring_size = cmd.data0; 1468 tx_ring_size = cmd.data0;
1458 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1469 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1470 if (status != 0)
1471 return status;
1459 rx_ring_size = cmd.data0; 1472 rx_ring_size = cmd.data0;
1460 1473
1461 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1474 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
@@ -1463,6 +1476,8 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1463 mgp->tx.mask = tx_ring_entries - 1; 1476 mgp->tx.mask = tx_ring_entries - 1;
1464 mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; 1477 mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
1465 1478
1479 status = -ENOMEM;
1480
1466 /* allocate the host shadow rings */ 1481 /* allocate the host shadow rings */
1467 1482
1468 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1483 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
@@ -1735,7 +1750,7 @@ static int myri10ge_open(struct net_device *dev)
1735 goto abort_with_irq; 1750 goto abort_with_irq;
1736 } 1751 }
1737 1752
1738 if (myri10ge_wcfifo && mgp->mtrr >= 0) { 1753 if (myri10ge_wcfifo && mgp->wc_enabled) {
1739 mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1754 mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
1740 mgp->rx_small.wc_fifo = 1755 mgp->rx_small.wc_fifo =
1741 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; 1756 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
@@ -2510,6 +2525,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2510 bridge->vendor, bridge->device); 2525 bridge->vendor, bridge->device);
2511 mgp->tx.boundary = 4096; 2526 mgp->tx.boundary = 4096;
2512 mgp->fw_name = myri10ge_fw_aligned; 2527 mgp->fw_name = myri10ge_fw_aligned;
2528 } else if (bridge &&
2529 bridge->vendor == PCI_VENDOR_ID_SGI &&
2530 bridge->device == 0x4002 /* TIOCE pcie-port */ ) {
2531 /* this pcie bridge does not support 4K rdma request */
2532 mgp->tx.boundary = 2048;
2533 mgp->fw_name = myri10ge_fw_aligned;
2513 } 2534 }
2514 } else { 2535 } else {
2515 if (myri10ge_force_firmware == 1) { 2536 if (myri10ge_force_firmware == 1) {
@@ -2830,9 +2851,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2830 mgp->board_span = pci_resource_len(pdev, 0); 2851 mgp->board_span = pci_resource_len(pdev, 0);
2831 mgp->iomem_base = pci_resource_start(pdev, 0); 2852 mgp->iomem_base = pci_resource_start(pdev, 0);
2832 mgp->mtrr = -1; 2853 mgp->mtrr = -1;
2854 mgp->wc_enabled = 0;
2833#ifdef CONFIG_MTRR 2855#ifdef CONFIG_MTRR
2834 mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, 2856 mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
2835 MTRR_TYPE_WRCOMB, 1); 2857 MTRR_TYPE_WRCOMB, 1);
2858 if (mgp->mtrr >= 0)
2859 mgp->wc_enabled = 1;
2836#endif 2860#endif
2837 /* Hack. need to get rid of these magic numbers */ 2861 /* Hack. need to get rid of these magic numbers */
2838 mgp->sram_size = 2862 mgp->sram_size =
@@ -2927,7 +2951,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2927 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 2951 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
2928 (mgp->msi_enabled ? "MSI" : "xPIC"), 2952 (mgp->msi_enabled ? "MSI" : "xPIC"),
2929 netdev->irq, mgp->tx.boundary, mgp->fw_name, 2953 netdev->irq, mgp->tx.boundary, mgp->fw_name,
2930 (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); 2954 (mgp->wc_enabled ? "Enabled" : "Disabled"));
2931 2955
2932 return 0; 2956 return 0;
2933 2957
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index a2877f33fa85..1be55702557d 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -228,7 +228,7 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
228 &adapter->ctx_desc_pdev); 228 &adapter->ctx_desc_pdev);
229 229
230 printk("ctx_desc_phys_addr: 0x%llx\n", 230 printk("ctx_desc_phys_addr: 0x%llx\n",
231 (u64) adapter->ctx_desc_phys_addr); 231 (unsigned long long) adapter->ctx_desc_phys_addr);
232 if (addr == NULL) { 232 if (addr == NULL) {
233 DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); 233 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
234 err = -ENOMEM; 234 err = -ENOMEM;
@@ -247,7 +247,8 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
247 adapter->max_tx_desc_count, 247 adapter->max_tx_desc_count,
248 (dma_addr_t *) & hw->cmd_desc_phys_addr, 248 (dma_addr_t *) & hw->cmd_desc_phys_addr,
249 &adapter->ahw.cmd_desc_pdev); 249 &adapter->ahw.cmd_desc_pdev);
250 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr); 250 printk("cmd_desc_phys_addr: 0x%llx\n",
251 (unsigned long long) hw->cmd_desc_phys_addr);
251 252
252 if (addr == NULL) { 253 if (addr == NULL) {
253 DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); 254 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 15d954e50cae..521b5f0618a4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -572,8 +572,8 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
572{ 572{
573 unsigned int val; 573 unsigned int val;
574 574
575 mdio_write(ioaddr, MII_BMCR, BMCR_RESET); 575 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
576 val = mdio_read(ioaddr, MII_BMCR); 576 mdio_write(ioaddr, MII_BMCR, val & 0xffff);
577} 577}
578 578
579static void rtl8169_check_link_status(struct net_device *dev, 579static void rtl8169_check_link_status(struct net_device *dev,
@@ -1368,11 +1368,7 @@ static inline void rtl8169_request_timer(struct net_device *dev)
1368 (tp->phy_version >= RTL_GIGA_PHY_VER_H)) 1368 (tp->phy_version >= RTL_GIGA_PHY_VER_H))
1369 return; 1369 return;
1370 1370
1371 init_timer(timer); 1371 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
1372 timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
1373 timer->data = (unsigned long)(dev);
1374 timer->function = rtl8169_phy_timer;
1375 add_timer(timer);
1376} 1372}
1377 1373
1378#ifdef CONFIG_NET_POLL_CONTROLLER 1374#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1685,6 +1681,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1685 tp->mmio_addr = ioaddr; 1681 tp->mmio_addr = ioaddr;
1686 tp->align = rtl_cfg_info[ent->driver_data].align; 1682 tp->align = rtl_cfg_info[ent->driver_data].align;
1687 1683
1684 init_timer(&tp->timer);
1685 tp->timer.data = (unsigned long) dev;
1686 tp->timer.function = rtl8169_phy_timer;
1687
1688 spin_lock_init(&tp->lock); 1688 spin_lock_init(&tp->lock);
1689 1689
1690 rc = register_netdev(dev); 1690 rc = register_netdev(dev);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 53839979cfb8..ab0ab92583fe 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2165,9 +2165,27 @@ force_update:
2165 /* fall through */ 2165 /* fall through */
2166#endif 2166#endif
2167 case OP_RXCHKS: 2167 case OP_RXCHKS:
2168 skb = sky2->rx_ring[sky2->rx_next].skb; 2168 if (!sky2->rx_csum)
2169 skb->ip_summed = CHECKSUM_COMPLETE; 2169 break;
2170 skb->csum = status & 0xffff; 2170
2171 /* Both checksum counters are programmed to start at
2172 * the same offset, so unless there is a problem they
2173 * should match. This failure is an early indication that
2174 * hardware receive checksumming won't work.
2175 */
2176 if (likely(status >> 16 == (status & 0xffff))) {
2177 skb = sky2->rx_ring[sky2->rx_next].skb;
2178 skb->ip_summed = CHECKSUM_COMPLETE;
2179 skb->csum = status & 0xffff;
2180 } else {
2181 printk(KERN_NOTICE PFX "%s: hardware receive "
2182 "checksum problem (status = %#x)\n",
2183 dev->name, status);
2184 sky2->rx_csum = 0;
2185 sky2_write32(sky2->hw,
2186 Q_ADDR(rxqaddr[le->link], Q_CSR),
2187 BMU_DIS_RX_CHKSUM);
2188 }
2171 break; 2189 break;
2172 2190
2173 case OP_TXINDEXLE: 2191 case OP_TXINDEXLE:
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 0d97e10ccac5..36202e94ee91 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -186,7 +186,6 @@ static char __devinit *adapter_def(char type)
186#define TRC_INITV 0x02 /* verbose init trace points */ 186#define TRC_INITV 0x02 /* verbose init trace points */
187static unsigned char ibmtr_debug_trace = 0; 187static unsigned char ibmtr_debug_trace = 0;
188 188
189static int ibmtr_probe(struct net_device *dev);
190static int ibmtr_probe1(struct net_device *dev, int ioaddr); 189static int ibmtr_probe1(struct net_device *dev, int ioaddr);
191static unsigned char get_sram_size(struct tok_info *adapt_info); 190static unsigned char get_sram_size(struct tok_info *adapt_info);
192static int trdev_init(struct net_device *dev); 191static int trdev_init(struct net_device *dev);
@@ -335,17 +334,6 @@ static void ibmtr_cleanup_card(struct net_device *dev)
335#endif 334#endif
336} 335}
337 336
338int ibmtr_probe_card(struct net_device *dev)
339{
340 int err = ibmtr_probe(dev);
341 if (!err) {
342 err = register_netdev(dev);
343 if (err)
344 ibmtr_cleanup_card(dev);
345 }
346 return err;
347}
348
349/**************************************************************************** 337/****************************************************************************
350 * ibmtr_probe(): Routine specified in the network device structure 338 * ibmtr_probe(): Routine specified in the network device structure
351 * to probe for an IBM Token Ring Adapter. Routine outline: 339 * to probe for an IBM Token Ring Adapter. Routine outline:
@@ -358,7 +346,7 @@ int ibmtr_probe_card(struct net_device *dev)
358 * which references it. 346 * which references it.
359 ****************************************************************************/ 347 ****************************************************************************/
360 348
361static int ibmtr_probe(struct net_device *dev) 349static int __init ibmtr_probe(struct net_device *dev)
362{ 350{
363 int i; 351 int i;
364 int base_addr = dev->base_addr; 352 int base_addr = dev->base_addr;
@@ -378,6 +366,17 @@ static int ibmtr_probe(struct net_device *dev)
378 return -ENODEV; 366 return -ENODEV;
379} 367}
380 368
369int __init ibmtr_probe_card(struct net_device *dev)
370{
371 int err = ibmtr_probe(dev);
372 if (!err) {
373 err = register_netdev(dev);
374 if (err)
375 ibmtr_cleanup_card(dev);
376 }
377 return err;
378}
379
381/*****************************************************************************/ 380/*****************************************************************************/
382 381
383static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) 382static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index ebbda1d8f542..f3a972e74e9a 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.2" 33#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "Sept-11-2006" 34#define DRV_RELDATE "2007-03-06"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -105,6 +105,7 @@ static const int multicast_filter_limit = 32;
105#include <asm/io.h> 105#include <asm/io.h>
106#include <asm/irq.h> 106#include <asm/irq.h>
107#include <asm/uaccess.h> 107#include <asm/uaccess.h>
108#include <linux/dmi.h>
108 109
109/* These identify the driver base version and may not be removed. */ 110/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 111static char version[] __devinitdata =
@@ -1995,6 +1996,23 @@ static struct pci_driver rhine_driver = {
1995 .shutdown = rhine_shutdown, 1996 .shutdown = rhine_shutdown,
1996}; 1997};
1997 1998
1999static struct dmi_system_id __initdata rhine_dmi_table[] = {
2000 {
2001 .ident = "EPIA-M",
2002 .matches = {
2003 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2004 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2005 },
2006 },
2007 {
2008 .ident = "KV7",
2009 .matches = {
2010 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2011 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2012 },
2013 },
2014 { NULL }
2015};
1998 2016
1999static int __init rhine_init(void) 2017static int __init rhine_init(void)
2000{ 2018{
@@ -2002,6 +2020,16 @@ static int __init rhine_init(void)
2002#ifdef MODULE 2020#ifdef MODULE
2003 printk(version); 2021 printk(version);
2004#endif 2022#endif
2023 if (dmi_check_system(rhine_dmi_table)) {
2024 /* these BIOSes fail at PXE boot if chip is in D3 */
2025 avoid_D3 = 1;
2026 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2027 "enabled.\n",
2028 DRV_NAME);
2029 }
2030 else if (avoid_D3)
2031 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2032
2005 return pci_register_driver(&rhine_driver); 2033 return pci_register_driver(&rhine_driver);
2006} 2034}
2007 2035
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 8dbcf83bb5f3..8b4540bfc1b0 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -407,7 +407,7 @@ static void z8530_tx(struct z8530_channel *c)
407 while(c->txcount) { 407 while(c->txcount) {
408 /* FIFO full ? */ 408 /* FIFO full ? */
409 if(!(read_zsreg(c, R0)&4)) 409 if(!(read_zsreg(c, R0)&4))
410 break; 410 return;
411 c->txcount--; 411 c->txcount--;
412 /* 412 /*
413 * Shovel out the byte 413 * Shovel out the byte
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index e594af46ff05..80cb88eb98c6 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -1858,9 +1858,6 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id)
1858 1858
1859 spin_lock(&bcm->irq_lock); 1859 spin_lock(&bcm->irq_lock);
1860 1860
1861 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
1862 assert(bcm->current_core->id == BCM43xx_COREID_80211);
1863
1864 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); 1861 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
1865 if (reason == 0xffffffff) { 1862 if (reason == 0xffffffff) {
1866 /* irq not for us (shared irq) */ 1863 /* irq not for us (shared irq) */
@@ -1871,6 +1868,9 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id)
1871 if (!reason) 1868 if (!reason)
1872 goto out; 1869 goto out;
1873 1870
1871 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
1872 assert(bcm->current_core->id == BCM43xx_COREID_80211);
1873
1874 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON) 1874 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
1875 & 0x0001DC00; 1875 & 0x0001DC00;
1876 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) 1876 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 3a5c9c2b2150..cae89258a640 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -859,6 +859,11 @@ static void bcm43xx_phy_initb6(struct bcm43xx_private *bcm)
859 bcm43xx_radio_write16(bcm, 0x005D, 0x0088); 859 bcm43xx_radio_write16(bcm, 0x005D, 0x0088);
860 bcm43xx_radio_write16(bcm, 0x005E, 0x0088); 860 bcm43xx_radio_write16(bcm, 0x005E, 0x0088);
861 bcm43xx_radio_write16(bcm, 0x007D, 0x0088); 861 bcm43xx_radio_write16(bcm, 0x007D, 0x0088);
862 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
863 BCM43xx_UCODEFLAGS_OFFSET,
864 (bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
865 BCM43xx_UCODEFLAGS_OFFSET)
866 | 0x00000200));
862 } 867 }
863 if (radio->revision == 8) { 868 if (radio->revision == 8) {
864 bcm43xx_radio_write16(bcm, 0x0051, 0x0000); 869 bcm43xx_radio_write16(bcm, 0x0051, 0x0000);
@@ -941,7 +946,8 @@ static void bcm43xx_phy_initb6(struct bcm43xx_private *bcm)
941 bcm43xx_phy_write(bcm, 0x0038, 0x0668); 946 bcm43xx_phy_write(bcm, 0x0038, 0x0668);
942 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF); 947 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF);
943 if (radio->revision <= 5) 948 if (radio->revision <= 5)
944 bcm43xx_phy_write(bcm, 0x005D, bcm43xx_phy_read(bcm, 0x005D) | 0x0003); 949 bcm43xx_phy_write(bcm, 0x005D, (bcm43xx_phy_read(bcm, 0x005D)
950 & 0xFF80) | 0x0003);
945 if (radio->revision <= 2) 951 if (radio->revision <= 2)
946 bcm43xx_radio_write16(bcm, 0x005D, 0x000D); 952 bcm43xx_radio_write16(bcm, 0x005D, 0x000D);
947 953
@@ -958,7 +964,7 @@ static void bcm43xx_phy_initb6(struct bcm43xx_private *bcm)
958 bcm43xx_phy_write(bcm, 0x0016, 0x0410); 964 bcm43xx_phy_write(bcm, 0x0016, 0x0410);
959 bcm43xx_phy_write(bcm, 0x0017, 0x0820); 965 bcm43xx_phy_write(bcm, 0x0017, 0x0820);
960 bcm43xx_phy_write(bcm, 0x0062, 0x0007); 966 bcm43xx_phy_write(bcm, 0x0062, 0x0007);
961 (void) bcm43xx_radio_calibrationvalue(bcm); 967 bcm43xx_radio_init2050(bcm);
962 bcm43xx_phy_lo_g_measure(bcm); 968 bcm43xx_phy_lo_g_measure(bcm);
963 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) { 969 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) {
964 bcm43xx_calc_nrssi_slope(bcm); 970 bcm43xx_calc_nrssi_slope(bcm);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 7b665e2386a8..d6d9413d7f23 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -105,18 +105,24 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
105 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 105 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
106 unsigned long flags; 106 unsigned long flags;
107 u8 channel; 107 u8 channel;
108 s8 expon;
108 int freq; 109 int freq;
109 int err = -EINVAL; 110 int err = -EINVAL;
110 111
111 mutex_lock(&bcm->mutex); 112 mutex_lock(&bcm->mutex);
112 spin_lock_irqsave(&bcm->irq_lock, flags); 113 spin_lock_irqsave(&bcm->irq_lock, flags);
113 114
114 if ((data->freq.m >= 0) && (data->freq.m <= 1000)) { 115 if ((data->freq.e == 0) &&
116 (data->freq.m >= 0) && (data->freq.m <= 1000)) {
115 channel = data->freq.m; 117 channel = data->freq.m;
116 freq = bcm43xx_channel_to_freq(bcm, channel); 118 freq = bcm43xx_channel_to_freq(bcm, channel);
117 } else { 119 } else {
118 channel = bcm43xx_freq_to_channel(bcm, data->freq.m);
119 freq = data->freq.m; 120 freq = data->freq.m;
121 expon = 6 - data->freq.e;
122 while (--expon >= 0) /* scale down the frequency to MHz */
123 freq /= 10;
124 assert(freq > 1000);
125 channel = bcm43xx_freq_to_channel(bcm, freq);
120 } 126 }
121 if (!ieee80211_is_valid_channel(bcm->ieee, channel)) 127 if (!ieee80211_is_valid_channel(bcm->ieee, channel))
122 goto out_unlock; 128 goto out_unlock;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 253ceb895ca7..a27e9e92cb5e 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -636,25 +636,6 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
636} 636}
637 637
638/** 638/**
639 * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt
640 * @port: Our sn_cons_port (which contains the uart port)
641 *
642 * returns the console irq if interrupt is successfully registered, else 0
643 *
644 */
645static int sn_sal_connect_interrupt(struct sn_cons_port *port)
646{
647 if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
648 IRQF_DISABLED | IRQF_SHARED,
649 "SAL console driver", port) >= 0) {
650 return SGI_UART_VECTOR;
651 }
652
653 printk(KERN_INFO "sn_console: console proceeding in polled mode\n");
654 return 0;
655}
656
657/**
658 * sn_sal_timer_poll - this function handles polled console mode 639 * sn_sal_timer_poll - this function handles polled console mode
659 * @data: A pointer to our sn_cons_port (which contains the uart port) 640 * @data: A pointer to our sn_cons_port (which contains the uart port)
660 * 641 *
@@ -746,30 +727,31 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
746 * mode. We were previously in asynch/polling mode (using init_timer). 727 * mode. We were previously in asynch/polling mode (using init_timer).
747 * 728 *
748 * We attempt to switch to interrupt mode here by calling 729 * We attempt to switch to interrupt mode here by calling
749 * sn_sal_connect_interrupt. If that works out, we enable receive interrupts. 730 * request_irq. If that works out, we enable receive interrupts.
750 */ 731 */
751static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port) 732static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
752{ 733{
753 int irq;
754 unsigned long flags; 734 unsigned long flags;
755 735
756 if (!port) 736 if (port) {
757 return; 737 DPRINTF("sn_console: switching to interrupt driven console\n");
758
759 DPRINTF("sn_console: switching to interrupt driven console\n");
760
761 spin_lock_irqsave(&port->sc_port.lock, flags);
762 738
763 irq = sn_sal_connect_interrupt(port); 739 if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
740 IRQF_DISABLED | IRQF_SHARED,
741 "SAL console driver", port) >= 0) {
742 spin_lock_irqsave(&port->sc_port.lock, flags);
743 port->sc_port.irq = SGI_UART_VECTOR;
744 port->sc_ops = &intr_ops;
764 745
765 if (irq) { 746 /* turn on receive interrupts */
766 port->sc_port.irq = irq; 747 ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
767 port->sc_ops = &intr_ops; 748 spin_unlock_irqrestore(&port->sc_port.lock, flags);
768 749 }
769 /* turn on receive interrupts */ 750 else {
770 ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); 751 printk(KERN_INFO
752 "sn_console: console proceeding in polled mode\n");
753 }
771 } 754 }
772 spin_unlock_irqrestore(&port->sc_port.lock, flags);
773} 755}
774 756
775/* 757/*
diff --git a/fs/compat.c b/fs/compat.c
index 0ec70e3cee0a..040a8be38a48 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -48,6 +48,7 @@
48#include <linux/highmem.h> 48#include <linux/highmem.h>
49#include <linux/poll.h> 49#include <linux/poll.h>
50#include <linux/mm.h> 50#include <linux/mm.h>
51#include <linux/eventpoll.h>
51 52
52#include <net/sock.h> /* siocdevprivate_ioctl */ 53#include <net/sock.h> /* siocdevprivate_ioctl */
53 54
@@ -2235,3 +2236,102 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
2235 return sys_ni_syscall(); 2236 return sys_ni_syscall();
2236} 2237}
2237#endif 2238#endif
2239
2240#ifdef CONFIG_EPOLL
2241
2242#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
2243asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
2244 struct compat_epoll_event __user *event)
2245{
2246 long err = 0;
2247 struct compat_epoll_event user;
2248 struct epoll_event __user *kernel = NULL;
2249
2250 if (event) {
2251 if (copy_from_user(&user, event, sizeof(user)))
2252 return -EFAULT;
2253 kernel = compat_alloc_user_space(sizeof(struct epoll_event));
2254 err |= __put_user(user.events, &kernel->events);
2255 err |= __put_user(user.data, &kernel->data);
2256 }
2257
2258 return err ? err : sys_epoll_ctl(epfd, op, fd, kernel);
2259}
2260
2261
2262asmlinkage long compat_sys_epoll_wait(int epfd,
2263 struct compat_epoll_event __user *events,
2264 int maxevents, int timeout)
2265{
2266 long i, ret, err = 0;
2267 struct epoll_event __user *kbuf;
2268 struct epoll_event ev;
2269
2270 if ((maxevents <= 0) ||
2271 (maxevents > (INT_MAX / sizeof(struct epoll_event))))
2272 return -EINVAL;
2273 kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents);
2274 ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
2275 for (i = 0; i < ret; i++) {
2276 err |= __get_user(ev.events, &kbuf[i].events);
2277 err |= __get_user(ev.data, &kbuf[i].data);
2278 err |= __put_user(ev.events, &events->events);
2279 err |= __put_user_unaligned(ev.data, &events->data);
2280 events++;
2281 }
2282
2283 return err ? -EFAULT: ret;
2284}
2285#endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */
2286
2287#ifdef TIF_RESTORE_SIGMASK
2288asmlinkage long compat_sys_epoll_pwait(int epfd,
2289 struct compat_epoll_event __user *events,
2290 int maxevents, int timeout,
2291 const compat_sigset_t __user *sigmask,
2292 compat_size_t sigsetsize)
2293{
2294 long err;
2295 compat_sigset_t csigmask;
2296 sigset_t ksigmask, sigsaved;
2297
2298 /*
2299 * If the caller wants a certain signal mask to be set during the wait,
2300 * we apply it here.
2301 */
2302 if (sigmask) {
2303 if (sigsetsize != sizeof(compat_sigset_t))
2304 return -EINVAL;
2305 if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
2306 return -EFAULT;
2307 sigset_from_compat(&ksigmask, &csigmask);
2308 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2309 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
2310 }
2311
2312#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
2313 err = compat_sys_epoll_wait(epfd, events, maxevents, timeout);
2314#else
2315 err = sys_epoll_wait(epfd, events, maxevents, timeout);
2316#endif
2317
2318 /*
2319 * If we changed the signal mask, we need to restore the original one.
2320 * In case we've got a signal while waiting, we do not restore the
2321 * signal mask yet, and we allow do_signal() to deliver the signal on
2322 * the way back to userspace, before the signal mask is restored.
2323 */
2324 if (sigmask) {
2325 if (err == -EINTR) {
2326 memcpy(&current->saved_sigmask, &sigsaved,
2327 sizeof(sigsaved));
2328 set_thread_flag(TIF_RESTORE_SIGMASK);
2329 } else
2330 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2331 }
2332
2333 return err;
2334}
2335#endif /* TIF_RESTORE_SIGMASK */
2336
2337#endif /* CONFIG_EPOLL */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e62f3fc7241e..1548be26b5e6 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -38,7 +38,7 @@ static struct dentry *lock_parent(struct dentry *dentry)
38 struct dentry *dir; 38 struct dentry *dir;
39 39
40 dir = dget(dentry->d_parent); 40 dir = dget(dentry->d_parent);
41 mutex_lock(&(dir->d_inode->i_mutex)); 41 mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT);
42 return dir; 42 return dir;
43} 43}
44 44
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e965eb11d76f..9baf69773ed1 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -47,7 +47,7 @@ struct dentry_operations hostfs_dentry_ops = {
47}; 47};
48 48
49/* Changed in hostfs_args before the kernel starts running */ 49/* Changed in hostfs_args before the kernel starts running */
50static char *root_ino = "/"; 50static char *root_ino = "";
51static int append = 0; 51static int append = 0;
52 52
53#define HOSTFS_SUPER_MAGIC 0x00c0ffee 53#define HOSTFS_SUPER_MAGIC 0x00c0ffee
@@ -947,15 +947,17 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
947 sb->s_magic = HOSTFS_SUPER_MAGIC; 947 sb->s_magic = HOSTFS_SUPER_MAGIC;
948 sb->s_op = &hostfs_sbops; 948 sb->s_op = &hostfs_sbops;
949 949
950 if((data == NULL) || (*data == '\0')) 950 /* NULL is printed as <NULL> by sprintf: avoid that. */
951 data = root_ino; 951 if (data == NULL)
952 data = "";
952 953
953 err = -ENOMEM; 954 err = -ENOMEM;
954 name = kmalloc(strlen(data) + 1, GFP_KERNEL); 955 name = kmalloc(strlen(root_ino) + 1
956 + strlen(data) + 1, GFP_KERNEL);
955 if(name == NULL) 957 if(name == NULL)
956 goto out; 958 goto out;
957 959
958 strcpy(name, data); 960 sprintf(name, "%s/%s", root_ino, data);
959 961
960 root_inode = iget(sb, 0); 962 root_inode = iget(sb, 0);
961 if(root_inode == NULL) 963 if(root_inode == NULL)
@@ -966,6 +968,9 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
966 goto out_put; 968 goto out_put;
967 969
968 HOSTFS_I(root_inode)->host_filename = name; 970 HOSTFS_I(root_inode)->host_filename = name;
971 /* Avoid that in the error path, iput(root_inode) frees again name through
972 * hostfs_destroy_inode! */
973 name = NULL;
969 974
970 err = -ENOMEM; 975 err = -ENOMEM;
971 sb->s_root = d_alloc_root(root_inode); 976 sb->s_root = d_alloc_root(root_inode);
@@ -977,7 +982,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
977 /* No iput in this case because the dput does that for us */ 982 /* No iput in this case because the dput does that for us */
978 dput(sb->s_root); 983 dput(sb->s_root);
979 sb->s_root = NULL; 984 sb->s_root = NULL;
980 goto out_free; 985 goto out;
981 } 986 }
982 987
983 return(0); 988 return(0);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 22d38ffc9ef0..e46d237b10f9 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -180,7 +180,7 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
180 } 180 }
181 if (res > 0) 181 if (res > 0)
182 return state; 182 return state;
183 if (!err) 183 if (err)
184 /* The partition is unrecognized. So report I/O errors if there were any */ 184 /* The partition is unrecognized. So report I/O errors if there were any */
185 res = err; 185 res = err;
186 if (!res) 186 if (!res)
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h
index be9509c8f8c1..284dfd01a33d 100644
--- a/include/asm-sparc64/parport.h
+++ b/include/asm-sparc64/parport.h
@@ -19,6 +19,17 @@
19 */ 19 */
20#define HAS_DMA 20#define HAS_DMA
21 21
22static DEFINE_SPINLOCK(dma_spin_lock);
23
24#define claim_dma_lock() \
25({ unsigned long flags; \
26 spin_lock_irqsave(&dma_spin_lock, flags); \
27 flags; \
28})
29
30#define release_dma_lock(__flags) \
31 spin_unlock_irqrestore(&dma_spin_lock, __flags);
32
22static struct sparc_ebus_info { 33static struct sparc_ebus_info {
23 struct ebus_dma_info info; 34 struct ebus_dma_info info;
24 unsigned int addr; 35 unsigned int addr;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 80b17f440ec1..ccd863dd77fa 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -234,5 +234,24 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
234 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, 234 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
235 const compat_ulong_t __user *new_nodes); 235 const compat_ulong_t __user *new_nodes);
236 236
237/*
238 * epoll (fs/eventpoll.c) compat bits follow ...
239 */
240#ifndef CONFIG_HAS_COMPAT_EPOLL_EVENT
241struct epoll_event;
242#define compat_epoll_event epoll_event
243#else
244asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
245 struct compat_epoll_event __user *event);
246asmlinkage long compat_sys_epoll_wait(int epfd,
247 struct compat_epoll_event __user *events,
248 int maxevents, int timeout);
249#endif
250asmlinkage long compat_sys_epoll_pwait(int epfd,
251 struct compat_epoll_event __user *events,
252 int maxevents, int timeout,
253 const compat_sigset_t __user *sigmask,
254 compat_size_t sigsetsize);
255
237#endif /* CONFIG_COMPAT */ 256#endif /* CONFIG_COMPAT */
238#endif /* _LINUX_COMPAT_H */ 257#endif /* _LINUX_COMPAT_H */
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f928d2b2a17d..71f5cfbbebb8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -656,7 +656,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
656 /* Detach sockets from device */ 656 /* Detach sockets from device */
657 read_lock(&hci_sk_list.lock); 657 read_lock(&hci_sk_list.lock);
658 sk_for_each(sk, node, &hci_sk_list.head) { 658 sk_for_each(sk, node, &hci_sk_list.head) {
659 bh_lock_sock(sk); 659 lock_sock(sk);
660 if (hci_pi(sk)->hdev == hdev) { 660 if (hci_pi(sk)->hdev == hdev) {
661 hci_pi(sk)->hdev = NULL; 661 hci_pi(sk)->hdev = NULL;
662 sk->sk_err = EPIPE; 662 sk->sk_err = EPIPE;
@@ -665,7 +665,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
665 665
666 hci_dev_put(hdev); 666 hci_dev_put(hdev);
667 } 667 }
668 bh_unlock_sock(sk); 668 release_sock(sk);
669 } 669 }
670 read_unlock(&hci_sk_list.lock); 670 read_unlock(&hci_sk_list.lock);
671 } 671 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 63fe1093b616..0b9c49b3a100 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -360,7 +360,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
360 return; 360 return;
361 } 361 }
362 362
363 read_lock(&in6_dev->lock); 363 read_lock_bh(&in6_dev->lock);
364 for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { 364 for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
365 /* Add the address to the local list. */ 365 /* Add the address to the local list. */
366 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); 366 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
@@ -374,7 +374,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
374 } 374 }
375 } 375 }
376 376
377 read_unlock(&in6_dev->lock); 377 read_unlock_bh(&in6_dev->lock);
378 rcu_read_unlock(); 378 rcu_read_unlock();
379} 379}
380 380