aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-07-06 13:07:32 -0400
committerJeff Garzik <jeff@garzik.org>2006-07-06 13:07:32 -0400
commit70f05366b71c51c35c25c0b76b4318fbc26c975a (patch)
treed31230b93ba7df50d87eb11b1dba091641a9b89e
parentc0bc8721b8d0380ec69fa97578c91201201b05a9 (diff)
parent120bda20c6f64b32e8bfbdd7b34feafaa5f5332e (diff)
Merge branch 'master' into upstream
-rw-r--r--Makefile6
-rw-r--r--arch/arm/mach-at91rm9200/at91rm9200.c45
-rw-r--r--arch/arm/mach-at91rm9200/generic.h8
-rw-r--r--arch/arm/mach-at91rm9200/irq.c70
-rw-r--r--arch/arm/mach-pnx4008/core.c2
-rw-r--r--arch/arm/mach-pnx4008/dma.c1
-rw-r--r--arch/arm/mach-pnx4008/irq.c22
-rw-r--r--arch/arm/mach-pnx4008/time.c8
-rw-r--r--arch/powerpc/sysdev/mpic.c39
-rw-r--r--arch/sparc64/kernel/prom.c2
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c10
-rw-r--r--arch/sparc64/kernel/time.c5
-rw-r--r--drivers/scsi/ahci.c17
-rw-r--r--drivers/scsi/libata-core.c289
-rw-r--r--drivers/scsi/libata-eh.c405
-rw-r--r--drivers/scsi/libata-scsi.c124
-rw-r--r--drivers/scsi/sata_sil.c105
-rw-r--r--drivers/scsi/sata_sil24.c134
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/serial/at91_serial.c5
-rw-r--r--fs/lockd/clntproc.c26
-rw-r--r--fs/locks.c23
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/direct.c435
-rw-r--r--fs/nfs/nfs4proc.c74
-rw-r--r--fs/nfs/write.c20
-rw-r--r--include/asm-arm/arch-at91rm9200/irqs.h8
-rw-r--r--include/asm-powerpc/cputime.h18
-rw-r--r--include/asm-sparc64/dma-mapping.h14
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/libata.h85
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/pci_ids.h7
-rw-r--r--net/sched/act_api.c18
-rw-r--r--net/sunrpc/xdr.c3
35 files changed, 1368 insertions, 669 deletions
diff --git a/Makefile b/Makefile
index 11a850cffd3..7c010f3325a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 17 3SUBLEVEL = 18
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -528,7 +528,7 @@ export MODLIB
528 528
529ifdef INSTALL_MOD_STRIP 529ifdef INSTALL_MOD_STRIP
530ifeq ($(INSTALL_MOD_STRIP),1) 530ifeq ($(INSTALL_MOD_STRIP),1)
531mod_strip_cmd = $STRIP) --strip-debug 531mod_strip_cmd = $(STRIP) --strip-debug
532else 532else
533mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) 533mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP)
534endif # INSTALL_MOD_STRIP=1 534endif # INSTALL_MOD_STRIP=1
diff --git a/arch/arm/mach-at91rm9200/at91rm9200.c b/arch/arm/mach-at91rm9200/at91rm9200.c
index 7e1d072bdd8..0985b1c42c7 100644
--- a/arch/arm/mach-at91rm9200/at91rm9200.c
+++ b/arch/arm/mach-at91rm9200/at91rm9200.c
@@ -107,3 +107,48 @@ void __init at91rm9200_map_io(void)
107 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc)); 107 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
108} 108}
109 109
110/*
111 * The default interrupt priority levels (0 = lowest, 7 = highest).
112 */
113static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
114 7, /* Advanced Interrupt Controller (FIQ) */
115 7, /* System Peripherals */
116 0, /* Parallel IO Controller A */
117 0, /* Parallel IO Controller B */
118 0, /* Parallel IO Controller C */
119 0, /* Parallel IO Controller D */
120 6, /* USART 0 */
121 6, /* USART 1 */
122 6, /* USART 2 */
123 6, /* USART 3 */
124 0, /* Multimedia Card Interface */
125 4, /* USB Device Port */
126 0, /* Two-Wire Interface */
127 6, /* Serial Peripheral Interface */
128 5, /* Serial Synchronous Controller 0 */
129 5, /* Serial Synchronous Controller 1 */
130 5, /* Serial Synchronous Controller 2 */
131 0, /* Timer Counter 0 */
132 0, /* Timer Counter 1 */
133 0, /* Timer Counter 2 */
134 0, /* Timer Counter 3 */
135 0, /* Timer Counter 4 */
136 0, /* Timer Counter 5 */
137 3, /* USB Host port */
138 3, /* Ethernet MAC */
139 0, /* Advanced Interrupt Controller (IRQ0) */
140 0, /* Advanced Interrupt Controller (IRQ1) */
141 0, /* Advanced Interrupt Controller (IRQ2) */
142 0, /* Advanced Interrupt Controller (IRQ3) */
143 0, /* Advanced Interrupt Controller (IRQ4) */
144 0, /* Advanced Interrupt Controller (IRQ5) */
145 0 /* Advanced Interrupt Controller (IRQ6) */
146};
147
148void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS])
149{
150 if (!priority)
151 priority = at91rm9200_default_irq_priority;
152
153 at91_aic_init(priority);
154}
diff --git a/arch/arm/mach-at91rm9200/generic.h b/arch/arm/mach-at91rm9200/generic.h
index f0d969d7d87..7979d8ab7e0 100644
--- a/arch/arm/mach-at91rm9200/generic.h
+++ b/arch/arm/mach-at91rm9200/generic.h
@@ -8,13 +8,19 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11void at91_gpio_irq_setup(unsigned banks); 11 /* Interrupts */
12extern void __init at91rm9200_init_irq(unsigned int priority[]);
13extern void __init at91_aic_init(unsigned int priority[]);
14extern void __init at91_gpio_irq_setup(unsigned banks);
12 15
16 /* Timer */
13struct sys_timer; 17struct sys_timer;
14extern struct sys_timer at91rm9200_timer; 18extern struct sys_timer at91rm9200_timer;
15 19
20 /* Memory Map */
16extern void __init at91rm9200_map_io(void); 21extern void __init at91rm9200_map_io(void);
17 22
23 /* Clocks */
18extern int __init at91_clock_init(unsigned long main_clock); 24extern int __init at91_clock_init(unsigned long main_clock);
19struct device; 25struct device;
20extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func); 26extern void __init at91_clock_associate(const char *id, struct device *dev, const char *func);
diff --git a/arch/arm/mach-at91rm9200/irq.c b/arch/arm/mach-at91rm9200/irq.c
index dcd560dbcfb..9b091132041 100644
--- a/arch/arm/mach-at91rm9200/irq.c
+++ b/arch/arm/mach-at91rm9200/irq.c
@@ -36,58 +36,20 @@
36 36
37#include "generic.h" 37#include "generic.h"
38 38
39/*
40 * The default interrupt priority levels (0 = lowest, 7 = highest).
41 */
42static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
43 7, /* Advanced Interrupt Controller */
44 7, /* System Peripheral */
45 0, /* Parallel IO Controller A */
46 0, /* Parallel IO Controller B */
47 0, /* Parallel IO Controller C */
48 0, /* Parallel IO Controller D */
49 6, /* USART 0 */
50 6, /* USART 1 */
51 6, /* USART 2 */
52 6, /* USART 3 */
53 0, /* Multimedia Card Interface */
54 4, /* USB Device Port */
55 0, /* Two-Wire Interface */
56 6, /* Serial Peripheral Interface */
57 5, /* Serial Synchronous Controller */
58 5, /* Serial Synchronous Controller */
59 5, /* Serial Synchronous Controller */
60 0, /* Timer Counter 0 */
61 0, /* Timer Counter 1 */
62 0, /* Timer Counter 2 */
63 0, /* Timer Counter 3 */
64 0, /* Timer Counter 4 */
65 0, /* Timer Counter 5 */
66 3, /* USB Host port */
67 3, /* Ethernet MAC */
68 0, /* Advanced Interrupt Controller */
69 0, /* Advanced Interrupt Controller */
70 0, /* Advanced Interrupt Controller */
71 0, /* Advanced Interrupt Controller */
72 0, /* Advanced Interrupt Controller */
73 0, /* Advanced Interrupt Controller */
74 0 /* Advanced Interrupt Controller */
75};
76 39
77 40static void at91_aic_mask_irq(unsigned int irq)
78static void at91rm9200_mask_irq(unsigned int irq)
79{ 41{
80 /* Disable interrupt on AIC */ 42 /* Disable interrupt on AIC */
81 at91_sys_write(AT91_AIC_IDCR, 1 << irq); 43 at91_sys_write(AT91_AIC_IDCR, 1 << irq);
82} 44}
83 45
84static void at91rm9200_unmask_irq(unsigned int irq) 46static void at91_aic_unmask_irq(unsigned int irq)
85{ 47{
86 /* Enable interrupt on AIC */ 48 /* Enable interrupt on AIC */
87 at91_sys_write(AT91_AIC_IECR, 1 << irq); 49 at91_sys_write(AT91_AIC_IECR, 1 << irq);
88} 50}
89 51
90static int at91rm9200_irq_type(unsigned irq, unsigned type) 52static int at91_aic_set_type(unsigned irq, unsigned type)
91{ 53{
92 unsigned int smr, srctype; 54 unsigned int smr, srctype;
93 55
@@ -122,7 +84,7 @@ static int at91rm9200_irq_type(unsigned irq, unsigned type)
122static u32 wakeups; 84static u32 wakeups;
123static u32 backups; 85static u32 backups;
124 86
125static int at91rm9200_irq_set_wake(unsigned irq, unsigned value) 87static int at91_aic_set_wake(unsigned irq, unsigned value)
126{ 88{
127 if (unlikely(irq >= 32)) 89 if (unlikely(irq >= 32))
128 return -EINVAL; 90 return -EINVAL;
@@ -149,28 +111,24 @@ void at91_irq_resume(void)
149} 111}
150 112
151#else 113#else
152#define at91rm9200_irq_set_wake NULL 114#define at91_aic_set_wake NULL
153#endif 115#endif
154 116
155static struct irqchip at91rm9200_irq_chip = { 117static struct irqchip at91_aic_chip = {
156 .ack = at91rm9200_mask_irq, 118 .ack = at91_aic_mask_irq,
157 .mask = at91rm9200_mask_irq, 119 .mask = at91_aic_mask_irq,
158 .unmask = at91rm9200_unmask_irq, 120 .unmask = at91_aic_unmask_irq,
159 .set_type = at91rm9200_irq_type, 121 .set_type = at91_aic_set_type,
160 .set_wake = at91rm9200_irq_set_wake, 122 .set_wake = at91_aic_set_wake,
161}; 123};
162 124
163/* 125/*
164 * Initialize the AIC interrupt controller. 126 * Initialize the AIC interrupt controller.
165 */ 127 */
166void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS]) 128void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
167{ 129{
168 unsigned int i; 130 unsigned int i;
169 131
170 /* No priority list specified for this board -> use defaults */
171 if (priority == NULL)
172 priority = at91rm9200_default_irq_priority;
173
174 /* 132 /*
175 * The IVR is used by macro get_irqnr_and_base to read and verify. 133 * The IVR is used by macro get_irqnr_and_base to read and verify.
176 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred. 134 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
@@ -178,10 +136,10 @@ void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS])
178 for (i = 0; i < NR_AIC_IRQS; i++) { 136 for (i = 0; i < NR_AIC_IRQS; i++) {
179 /* Put irq number in Source Vector Register: */ 137 /* Put irq number in Source Vector Register: */
180 at91_sys_write(AT91_AIC_SVR(i), i); 138 at91_sys_write(AT91_AIC_SVR(i), i);
181 /* Store the Source Mode Register as defined in table above */ 139 /* Active Low interrupt, with the specified priority */
182 at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]); 140 at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
183 141
184 set_irq_chip(i, &at91rm9200_irq_chip); 142 set_irq_chip(i, &at91_aic_chip);
185 set_irq_handler(i, do_level_IRQ); 143 set_irq_handler(i, do_level_IRQ);
186 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 144 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
187 145
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index ba91daad64f..3d73c1e9375 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -27,7 +27,6 @@
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28 28
29#include <asm/hardware.h> 29#include <asm/hardware.h>
30#include <asm/irq.h>
31#include <asm/io.h> 30#include <asm/io.h>
32#include <asm/setup.h> 31#include <asm/setup.h>
33#include <asm/mach-types.h> 32#include <asm/mach-types.h>
@@ -36,7 +35,6 @@
36#include <asm/system.h> 35#include <asm/system.h>
37 36
38#include <asm/mach/arch.h> 37#include <asm/mach/arch.h>
39#include <asm/mach/irq.h>
40#include <asm/mach/map.h> 38#include <asm/mach/map.h>
41#include <asm/mach/time.h> 39#include <asm/mach/time.h>
42 40
diff --git a/arch/arm/mach-pnx4008/dma.c b/arch/arm/mach-pnx4008/dma.c
index 981aa9dcded..ec01574f88a 100644
--- a/arch/arm/mach-pnx4008/dma.c
+++ b/arch/arm/mach-pnx4008/dma.c
@@ -23,7 +23,6 @@
23#include <linux/clk.h> 23#include <linux/clk.h>
24 24
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/irq.h>
27#include <asm/hardware.h> 26#include <asm/hardware.h>
28#include <asm/dma.h> 27#include <asm/dma.h>
29#include <asm/dma-mapping.h> 28#include <asm/dma-mapping.h>
diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c
index 9b0a8e084e9..3a4bcf3d91f 100644
--- a/arch/arm/mach-pnx4008/irq.c
+++ b/arch/arm/mach-pnx4008/irq.c
@@ -22,8 +22,8 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/irq.h>
25#include <asm/hardware.h> 26#include <asm/hardware.h>
26#include <asm/irq.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/setup.h> 28#include <asm/setup.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
@@ -96,26 +96,24 @@ void __init pnx4008_init_irq(void)
96{ 96{
97 unsigned int i; 97 unsigned int i;
98 98
99 /* configure and enable IRQ 0,1,30,31 (cascade interrupts) mask all others */ 99 /* configure IRQ's */
100 for (i = 0; i < NR_IRQS; i++) {
101 set_irq_flags(i, IRQF_VALID);
102 set_irq_chip(i, &pnx4008_irq_chip);
103 pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
104 }
105
106 /* configure and enable IRQ 0,1,30,31 (cascade interrupts) */
100 pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]); 107 pnx4008_set_irq_type(SUB1_IRQ_N, pnx4008_irq_type[SUB1_IRQ_N]);
101 pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]); 108 pnx4008_set_irq_type(SUB2_IRQ_N, pnx4008_irq_type[SUB2_IRQ_N]);
102 pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]); 109 pnx4008_set_irq_type(SUB1_FIQ_N, pnx4008_irq_type[SUB1_FIQ_N]);
103 pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]); 110 pnx4008_set_irq_type(SUB2_FIQ_N, pnx4008_irq_type[SUB2_FIQ_N]);
104 111
112 /* mask all others */
105 __raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) | 113 __raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) |
106 (1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N), 114 (1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N),
107 INTC_ER(MAIN_BASE_INT)); 115 INTC_ER(MAIN_BASE_INT));
108 __raw_writel(0, INTC_ER(SIC1_BASE_INT)); 116 __raw_writel(0, INTC_ER(SIC1_BASE_INT));
109 __raw_writel(0, INTC_ER(SIC2_BASE_INT)); 117 __raw_writel(0, INTC_ER(SIC2_BASE_INT));
110
111 /* configure all other IRQ's */
112 for (i = 0; i < NR_IRQS; i++) {
113 if (i == SUB2_FIQ_N || i == SUB1_FIQ_N ||
114 i == SUB2_IRQ_N || i == SUB1_IRQ_N)
115 continue;
116 set_irq_flags(i, IRQF_VALID);
117 set_irq_chip(i, &pnx4008_irq_chip);
118 pnx4008_set_irq_type(i, pnx4008_irq_type[i]);
119 }
120} 118}
121 119
diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c
index 888bf6cfba8..756228ddd03 100644
--- a/arch/arm/mach-pnx4008/time.c
+++ b/arch/arm/mach-pnx4008/time.c
@@ -20,17 +20,15 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/time.h>
24#include <linux/timex.h>
25#include <linux/irq.h>
23 26
24#include <asm/system.h> 27#include <asm/system.h>
25#include <asm/hardware.h> 28#include <asm/hardware.h>
26#include <asm/io.h> 29#include <asm/io.h>
27#include <asm/leds.h> 30#include <asm/leds.h>
28#include <asm/irq.h>
29#include <asm/mach/irq.h>
30#include <asm/mach/time.h> 31#include <asm/mach/time.h>
31
32#include <linux/time.h>
33#include <linux/timex.h>
34#include <asm/errno.h> 32#include <asm/errno.h>
35 33
36/*! Note: all timers are UPCOUNTING */ 34/*! Note: all timers are UPCOUNTING */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7d31d7cc392..9cecebaa036 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -405,20 +405,22 @@ static void mpic_unmask_irq(unsigned int irq)
405 unsigned int loops = 100000; 405 unsigned int loops = 100000;
406 struct mpic *mpic = mpic_from_irq(irq); 406 struct mpic *mpic = mpic_from_irq(irq);
407 unsigned int src = mpic_irq_to_hw(irq); 407 unsigned int src = mpic_irq_to_hw(irq);
408 unsigned long flags;
408 409
409 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 410 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
410 411
412 spin_lock_irqsave(&mpic_lock, flags);
411 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 413 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
412 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & 414 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
413 ~MPIC_VECPRI_MASK); 415 ~MPIC_VECPRI_MASK);
414
415 /* make sure mask gets to controller before we return to user */ 416 /* make sure mask gets to controller before we return to user */
416 do { 417 do {
417 if (!loops--) { 418 if (!loops--) {
418 printk(KERN_ERR "mpic_enable_irq timeout\n"); 419 printk(KERN_ERR "mpic_enable_irq timeout\n");
419 break; 420 break;
420 } 421 }
421 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 422 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
423 spin_unlock_irqrestore(&mpic_lock, flags);
422} 424}
423 425
424static void mpic_mask_irq(unsigned int irq) 426static void mpic_mask_irq(unsigned int irq)
@@ -426,9 +428,11 @@ static void mpic_mask_irq(unsigned int irq)
426 unsigned int loops = 100000; 428 unsigned int loops = 100000;
427 struct mpic *mpic = mpic_from_irq(irq); 429 struct mpic *mpic = mpic_from_irq(irq);
428 unsigned int src = mpic_irq_to_hw(irq); 430 unsigned int src = mpic_irq_to_hw(irq);
431 unsigned long flags;
429 432
430 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 433 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
431 434
435 spin_lock_irqsave(&mpic_lock, flags);
432 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 436 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
433 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | 437 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
434 MPIC_VECPRI_MASK); 438 MPIC_VECPRI_MASK);
@@ -440,6 +444,7 @@ static void mpic_mask_irq(unsigned int irq)
440 break; 444 break;
441 } 445 }
442 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 446 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
447 spin_unlock_irqrestore(&mpic_lock, flags);
443} 448}
444 449
445static void mpic_end_irq(unsigned int irq) 450static void mpic_end_irq(unsigned int irq)
@@ -624,9 +629,10 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
624 struct irq_desc *desc = get_irq_desc(virq); 629 struct irq_desc *desc = get_irq_desc(virq);
625 struct irq_chip *chip; 630 struct irq_chip *chip;
626 struct mpic *mpic = h->host_data; 631 struct mpic *mpic = h->host_data;
627 unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL | 632 u32 v, vecpri = MPIC_VECPRI_SENSE_LEVEL |
628 MPIC_VECPRI_POLARITY_NEGATIVE; 633 MPIC_VECPRI_POLARITY_NEGATIVE;
629 int level; 634 int level;
635 unsigned long iflags;
630 636
631 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n", 637 pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n",
632 virq, hw, flags); 638 virq, hw, flags);
@@ -668,11 +674,21 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
668 } 674 }
669#endif 675#endif
670 676
671 /* Reconfigure irq */ 677 /* Reconfigure irq. We must preserve the mask bit as we can be called
672 vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 678 * while the interrupt is still active (This may change in the future
673 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri); 679 * but for now, it is the case).
674 680 */
675 pr_debug("mpic: mapping as IRQ\n"); 681 spin_lock_irqsave(&mpic_lock, iflags);
682 v = mpic_irq_read(hw, MPIC_IRQ_VECTOR_PRI);
683 vecpri = (v &
684 ~(MPIC_VECPRI_POLARITY_MASK | MPIC_VECPRI_SENSE_MASK)) |
685 vecpri;
686 if (vecpri != v)
687 mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri);
688 spin_unlock_irqrestore(&mpic_lock, iflags);
689
690 pr_debug("mpic: mapping as IRQ, vecpri = 0x%08x (was 0x%08x)\n",
691 vecpri, v);
676 692
677 set_irq_chip_data(virq, mpic); 693 set_irq_chip_data(virq, mpic);
678 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); 694 set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
@@ -904,8 +920,8 @@ void __init mpic_init(struct mpic *mpic)
904 920
905 /* do senses munging */ 921 /* do senses munging */
906 if (mpic->senses && i < mpic->senses_count) 922 if (mpic->senses && i < mpic->senses_count)
907 vecpri = mpic_flags_to_vecpri(mpic->senses[i], 923 vecpri |= mpic_flags_to_vecpri(mpic->senses[i],
908 &level); 924 &level);
909 else 925 else
910 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 926 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
911 927
@@ -955,14 +971,17 @@ void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
955 971
956void __init mpic_set_serial_int(struct mpic *mpic, int enable) 972void __init mpic_set_serial_int(struct mpic *mpic, int enable)
957{ 973{
974 unsigned long flags;
958 u32 v; 975 u32 v;
959 976
977 spin_lock_irqsave(&mpic_lock, flags);
960 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 978 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
961 if (enable) 979 if (enable)
962 v |= MPIC_GREG_GLOBAL_CONF_1_SIE; 980 v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
963 else 981 else
964 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; 982 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
965 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 983 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
984 spin_unlock_irqrestore(&mpic_lock, flags);
966} 985}
967 986
968void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 987void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index fa484d4f241..99daeee4209 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1032,7 +1032,9 @@ static void sun4v_vdev_irq_trans_init(struct device_node *dp)
1032static void irq_trans_init(struct device_node *dp) 1032static void irq_trans_init(struct device_node *dp)
1033{ 1033{
1034 const char *model; 1034 const char *model;
1035#ifdef CONFIG_PCI
1035 int i; 1036 int i;
1037#endif
1036 1038
1037 model = of_get_property(dp, "model", NULL); 1039 model = of_get_property(dp, "model", NULL);
1038 if (!model) 1040 if (!model)
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 4173de425f0..237524d87ca 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -124,11 +124,6 @@ EXPORT_SYMBOL(__write_lock);
124EXPORT_SYMBOL(__write_unlock); 124EXPORT_SYMBOL(__write_unlock);
125EXPORT_SYMBOL(__write_trylock); 125EXPORT_SYMBOL(__write_trylock);
126 126
127#if defined(CONFIG_MCOUNT)
128extern void _mcount(void);
129EXPORT_SYMBOL(_mcount);
130#endif
131
132/* CPU online map and active count. */ 127/* CPU online map and active count. */
133EXPORT_SYMBOL(cpu_online_map); 128EXPORT_SYMBOL(cpu_online_map);
134EXPORT_SYMBOL(phys_cpu_present_map); 129EXPORT_SYMBOL(phys_cpu_present_map);
@@ -136,6 +131,11 @@ EXPORT_SYMBOL(phys_cpu_present_map);
136EXPORT_SYMBOL(smp_call_function); 131EXPORT_SYMBOL(smp_call_function);
137#endif /* CONFIG_SMP */ 132#endif /* CONFIG_SMP */
138 133
134#if defined(CONFIG_MCOUNT)
135extern void _mcount(void);
136EXPORT_SYMBOL(_mcount);
137#endif
138
139EXPORT_SYMBOL(sparc64_get_clock_tick); 139EXPORT_SYMBOL(sparc64_get_clock_tick);
140 140
141/* semaphores */ 141/* semaphores */
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 8dcbfbffacc..b43de647ba7 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -788,12 +788,15 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
788 if (!regs) 788 if (!regs)
789 return -ENOMEM; 789 return -ENOMEM;
790 790
791#ifdef CONFIG_PCI
791 if (!strcmp(model, "ds1287") || 792 if (!strcmp(model, "ds1287") ||
792 !strcmp(model, "m5819") || 793 !strcmp(model, "m5819") ||
793 !strcmp(model, "m5819p") || 794 !strcmp(model, "m5819p") ||
794 !strcmp(model, "m5823")) { 795 !strcmp(model, "m5823")) {
795 ds1287_regs = (unsigned long) regs; 796 ds1287_regs = (unsigned long) regs;
796 } else if (model[5] == '0' && model[6] == '2') { 797 } else
798#endif
799 if (model[5] == '0' && model[6] == '2') {
797 mstk48t02_regs = regs; 800 mstk48t02_regs = regs;
798 } else if(model[5] == '0' && model[6] == '8') { 801 } else if(model[5] == '0' && model[6] == '8') {
799 mstk48t08_regs = regs; 802 mstk48t08_regs = regs;
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 15f6cd4279b..77e7202a0eb 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -1052,7 +1052,7 @@ static void ahci_thaw(struct ata_port *ap)
1052 1052
1053static void ahci_error_handler(struct ata_port *ap) 1053static void ahci_error_handler(struct ata_port *ap)
1054{ 1054{
1055 if (!(ap->flags & ATA_FLAG_FROZEN)) { 1055 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1056 /* restart engine */ 1056 /* restart engine */
1057 ahci_stop_engine(ap); 1057 ahci_stop_engine(ap);
1058 ahci_start_engine(ap); 1058 ahci_start_engine(ap);
@@ -1323,6 +1323,17 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1323 if (!printed_version++) 1323 if (!printed_version++)
1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1324 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1325 1325
1326 /* JMicron-specific fixup: make sure we're in AHCI mode */
1327 /* This is protected from races with ata_jmicron by the pci probe
1328 locking */
1329 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1330 /* AHCI enable, AHCI on function 0 */
1331 pci_write_config_byte(pdev, 0x41, 0xa1);
1332 /* Function 1 is the PATA controller */
1333 if (PCI_FUNC(pdev->devfn))
1334 return -ENODEV;
1335 }
1336
1326 rc = pci_enable_device(pdev); 1337 rc = pci_enable_device(pdev);
1327 if (rc) 1338 if (rc)
1328 return rc; 1339 return rc;
@@ -1378,10 +1389,6 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1378 if (have_msi) 1389 if (have_msi)
1379 hpriv->flags |= AHCI_FLAG_MSI; 1390 hpriv->flags |= AHCI_FLAG_MSI;
1380 1391
1381 /* JMicron-specific fixup: make sure we're in AHCI mode */
1382 if (pdev->vendor == 0x197b)
1383 pci_write_config_byte(pdev, 0x41, 0xa1);
1384
1385 /* initialize adapter */ 1392 /* initialize adapter */
1386 rc = ahci_host_init(probe_ent); 1393 rc = ahci_host_init(probe_ent);
1387 if (rc) 1394 if (rc)
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 1c960ac1617..386e5f21e19 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,9 +61,9 @@
61#include "libata.h" 61#include "libata.h"
62 62
63/* debounce timing parameters in msecs { interval, duration, timeout } */ 63/* debounce timing parameters in msecs { interval, duration, timeout } */
64const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 }; 64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 }; 65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 }; 66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 67
68static unsigned int ata_dev_init_params(struct ata_device *dev, 68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors); 69 u16 heads, u16 sectors);
@@ -907,7 +907,7 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
907{ 907{
908 int rc; 908 int rc;
909 909
910 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK) 910 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
911 return; 911 return;
912 912
913 PREPARE_WORK(&ap->port_task, fn, data); 913 PREPARE_WORK(&ap->port_task, fn, data);
@@ -938,7 +938,7 @@ void ata_port_flush_task(struct ata_port *ap)
938 DPRINTK("ENTER\n"); 938 DPRINTK("ENTER\n");
939 939
940 spin_lock_irqsave(ap->lock, flags); 940 spin_lock_irqsave(ap->lock, flags);
941 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; 941 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
942 spin_unlock_irqrestore(ap->lock, flags); 942 spin_unlock_irqrestore(ap->lock, flags);
943 943
944 DPRINTK("flush #1\n"); 944 DPRINTK("flush #1\n");
@@ -957,7 +957,7 @@ void ata_port_flush_task(struct ata_port *ap)
957 } 957 }
958 958
959 spin_lock_irqsave(ap->lock, flags); 959 spin_lock_irqsave(ap->lock, flags);
960 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; 960 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
961 spin_unlock_irqrestore(ap->lock, flags); 961 spin_unlock_irqrestore(ap->lock, flags);
962 962
963 if (ata_msg_ctl(ap)) 963 if (ata_msg_ctl(ap))
@@ -1009,7 +1009,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
1009 spin_lock_irqsave(ap->lock, flags); 1009 spin_lock_irqsave(ap->lock, flags);
1010 1010
1011 /* no internal command while frozen */ 1011 /* no internal command while frozen */
1012 if (ap->flags & ATA_FLAG_FROZEN) { 1012 if (ap->pflags & ATA_PFLAG_FROZEN) {
1013 spin_unlock_irqrestore(ap->lock, flags); 1013 spin_unlock_irqrestore(ap->lock, flags);
1014 return AC_ERR_SYSTEM; 1014 return AC_ERR_SYSTEM;
1015 } 1015 }
@@ -1325,6 +1325,19 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1326} 1326}
1327 1327
1328static void ata_set_port_max_cmd_len(struct ata_port *ap)
1329{
1330 int i;
1331
1332 if (ap->host) {
1333 ap->host->max_cmd_len = 0;
1334 for (i = 0; i < ATA_MAX_DEVICES; i++)
1335 ap->host->max_cmd_len = max_t(unsigned int,
1336 ap->host->max_cmd_len,
1337 ap->device[i].cdb_len);
1338 }
1339}
1340
1328/** 1341/**
1329 * ata_dev_configure - Configure the specified ATA/ATAPI device 1342 * ata_dev_configure - Configure the specified ATA/ATAPI device
1330 * @dev: Target device to configure 1343 * @dev: Target device to configure
@@ -1344,7 +1357,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1344 struct ata_port *ap = dev->ap; 1357 struct ata_port *ap = dev->ap;
1345 const u16 *id = dev->id; 1358 const u16 *id = dev->id;
1346 unsigned int xfer_mask; 1359 unsigned int xfer_mask;
1347 int i, rc; 1360 int rc;
1348 1361
1349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1362 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1350 ata_dev_printk(dev, KERN_INFO, 1363 ata_dev_printk(dev, KERN_INFO,
@@ -1404,7 +1417,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1404 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 1417 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1405 1418
1406 /* print device info to dmesg */ 1419 /* print device info to dmesg */
1407 if (ata_msg_info(ap)) 1420 if (ata_msg_drv(ap) && print_info)
1408 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1421 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1409 "max %s, %Lu sectors: %s %s\n", 1422 "max %s, %Lu sectors: %s %s\n",
1410 ata_id_major_version(id), 1423 ata_id_major_version(id),
@@ -1427,7 +1440,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1427 } 1440 }
1428 1441
1429 /* print device info to dmesg */ 1442 /* print device info to dmesg */
1430 if (ata_msg_info(ap)) 1443 if (ata_msg_drv(ap) && print_info)
1431 ata_dev_printk(dev, KERN_INFO, "ATA-%d, " 1444 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1432 "max %s, %Lu sectors: CHS %u/%u/%u\n", 1445 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1433 ata_id_major_version(id), 1446 ata_id_major_version(id),
@@ -1439,7 +1452,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1439 1452
1440 if (dev->id[59] & 0x100) { 1453 if (dev->id[59] & 0x100) {
1441 dev->multi_count = dev->id[59] & 0xff; 1454 dev->multi_count = dev->id[59] & 0xff;
1442 if (ata_msg_info(ap)) 1455 if (ata_msg_drv(ap) && print_info)
1443 ata_dev_printk(dev, KERN_INFO, 1456 ata_dev_printk(dev, KERN_INFO,
1444 "ata%u: dev %u multi count %u\n", 1457 "ata%u: dev %u multi count %u\n",
1445 ap->id, dev->devno, dev->multi_count); 1458 ap->id, dev->devno, dev->multi_count);
@@ -1468,21 +1481,17 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1468 } 1481 }
1469 1482
1470 /* print device info to dmesg */ 1483 /* print device info to dmesg */
1471 if (ata_msg_info(ap)) 1484 if (ata_msg_drv(ap) && print_info)
1472 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", 1485 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1473 ata_mode_string(xfer_mask), 1486 ata_mode_string(xfer_mask),
1474 cdb_intr_string); 1487 cdb_intr_string);
1475 } 1488 }
1476 1489
1477 ap->host->max_cmd_len = 0; 1490 ata_set_port_max_cmd_len(ap);
1478 for (i = 0; i < ATA_MAX_DEVICES; i++)
1479 ap->host->max_cmd_len = max_t(unsigned int,
1480 ap->host->max_cmd_len,
1481 ap->device[i].cdb_len);
1482 1491
1483 /* limit bridge transfers to udma5, 200 sectors */ 1492 /* limit bridge transfers to udma5, 200 sectors */
1484 if (ata_dev_knobble(dev)) { 1493 if (ata_dev_knobble(dev)) {
1485 if (ata_msg_info(ap)) 1494 if (ata_msg_drv(ap) && print_info)
1486 ata_dev_printk(dev, KERN_INFO, 1495 ata_dev_printk(dev, KERN_INFO,
1487 "applying bridge limits\n"); 1496 "applying bridge limits\n");
1488 dev->udma_mask &= ATA_UDMA5; 1497 dev->udma_mask &= ATA_UDMA5;
@@ -2137,7 +2146,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2137 * return error code and failing device on failure. 2146 * return error code and failing device on failure.
2138 */ 2147 */
2139 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2148 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2140 if (ata_dev_enabled(&ap->device[i])) { 2149 if (ata_dev_ready(&ap->device[i])) {
2141 ap->ops->set_mode(ap); 2150 ap->ops->set_mode(ap);
2142 break; 2151 break;
2143 } 2152 }
@@ -2203,7 +2212,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2203 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2212 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2204 dev = &ap->device[i]; 2213 dev = &ap->device[i];
2205 2214
2206 if (!ata_dev_enabled(dev)) 2215 /* don't udpate suspended devices' xfer mode */
2216 if (!ata_dev_ready(dev))
2207 continue; 2217 continue;
2208 2218
2209 rc = ata_dev_set_mode(dev); 2219 rc = ata_dev_set_mode(dev);
@@ -2579,7 +2589,7 @@ static void ata_wait_spinup(struct ata_port *ap)
2579 2589
2580 /* first, debounce phy if SATA */ 2590 /* first, debounce phy if SATA */
2581 if (ap->cbl == ATA_CBL_SATA) { 2591 if (ap->cbl == ATA_CBL_SATA) {
2582 rc = sata_phy_debounce(ap, sata_deb_timing_eh); 2592 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2583 2593
2584 /* if debounced successfully and offline, no need to wait */ 2594 /* if debounced successfully and offline, no need to wait */
2585 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) 2595 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
@@ -2615,16 +2625,17 @@ static void ata_wait_spinup(struct ata_port *ap)
2615int ata_std_prereset(struct ata_port *ap) 2625int ata_std_prereset(struct ata_port *ap)
2616{ 2626{
2617 struct ata_eh_context *ehc = &ap->eh_context; 2627 struct ata_eh_context *ehc = &ap->eh_context;
2618 const unsigned long *timing; 2628 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2619 int rc; 2629 int rc;
2620 2630
2621 /* hotplug? */ 2631 /* handle link resume & hotplug spinup */
2622 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) { 2632 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2623 if (ap->flags & ATA_FLAG_HRST_TO_RESUME) 2633 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2624 ehc->i.action |= ATA_EH_HARDRESET; 2634 ehc->i.action |= ATA_EH_HARDRESET;
2625 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY) 2635
2626 ata_wait_spinup(ap); 2636 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2627 } 2637 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2638 ata_wait_spinup(ap);
2628 2639
2629 /* if we're about to do hardreset, nothing more to do */ 2640 /* if we're about to do hardreset, nothing more to do */
2630 if (ehc->i.action & ATA_EH_HARDRESET) 2641 if (ehc->i.action & ATA_EH_HARDRESET)
@@ -2632,11 +2643,6 @@ int ata_std_prereset(struct ata_port *ap)
2632 2643
2633 /* if SATA, resume phy */ 2644 /* if SATA, resume phy */
2634 if (ap->cbl == ATA_CBL_SATA) { 2645 if (ap->cbl == ATA_CBL_SATA) {
2635 if (ap->flags & ATA_FLAG_LOADING)
2636 timing = sata_deb_timing_boot;
2637 else
2638 timing = sata_deb_timing_eh;
2639
2640 rc = sata_phy_resume(ap, timing); 2646 rc = sata_phy_resume(ap, timing);
2641 if (rc && rc != -EOPNOTSUPP) { 2647 if (rc && rc != -EOPNOTSUPP) {
2642 /* phy resume failed */ 2648 /* phy resume failed */
@@ -2724,6 +2730,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2724 */ 2730 */
2725int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 2731int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2726{ 2732{
2733 struct ata_eh_context *ehc = &ap->eh_context;
2734 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2727 u32 scontrol; 2735 u32 scontrol;
2728 int rc; 2736 int rc;
2729 2737
@@ -2761,7 +2769,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2761 msleep(1); 2769 msleep(1);
2762 2770
2763 /* bring phy back */ 2771 /* bring phy back */
2764 sata_phy_resume(ap, sata_deb_timing_eh); 2772 sata_phy_resume(ap, timing);
2765 2773
2766 /* TODO: phy layer with polling, timeouts, etc. */ 2774 /* TODO: phy layer with polling, timeouts, etc. */
2767 if (ata_port_offline(ap)) { 2775 if (ata_port_offline(ap)) {
@@ -4285,7 +4293,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4285 unsigned int i; 4293 unsigned int i;
4286 4294
4287 /* no command while frozen */ 4295 /* no command while frozen */
4288 if (unlikely(ap->flags & ATA_FLAG_FROZEN)) 4296 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4289 return NULL; 4297 return NULL;
4290 4298
4291 /* the last tag is reserved for internal command. */ 4299 /* the last tag is reserved for internal command. */
@@ -4407,7 +4415,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4407 * taken care of. 4415 * taken care of.
4408 */ 4416 */
4409 if (ap->ops->error_handler) { 4417 if (ap->ops->error_handler) {
4410 WARN_ON(ap->flags & ATA_FLAG_FROZEN); 4418 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4411 4419
4412 if (unlikely(qc->err_mask)) 4420 if (unlikely(qc->err_mask))
4413 qc->flags |= ATA_QCFLAG_FAILED; 4421 qc->flags |= ATA_QCFLAG_FAILED;
@@ -5001,86 +5009,120 @@ int ata_flush_cache(struct ata_device *dev)
5001 return 0; 5009 return 0;
5002} 5010}
5003 5011
5004static int ata_standby_drive(struct ata_device *dev) 5012static int ata_host_set_request_pm(struct ata_host_set *host_set,
5013 pm_message_t mesg, unsigned int action,
5014 unsigned int ehi_flags, int wait)
5005{ 5015{
5006 unsigned int err_mask; 5016 unsigned long flags;
5017 int i, rc;
5007 5018
5008 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 5019 for (i = 0; i < host_set->n_ports; i++) {
5009 if (err_mask) { 5020 struct ata_port *ap = host_set->ports[i];
5010 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5011 "(err_mask=0x%x)\n", err_mask);
5012 return -EIO;
5013 }
5014 5021
5015 return 0; 5022 /* Previous resume operation might still be in
5016} 5023 * progress. Wait for PM_PENDING to clear.
5024 */
5025 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5026 ata_port_wait_eh(ap);
5027 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5028 }
5017 5029
5018static int ata_start_drive(struct ata_device *dev) 5030 /* request PM ops to EH */
5019{ 5031 spin_lock_irqsave(ap->lock, flags);
5020 unsigned int err_mask;
5021 5032
5022 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE); 5033 ap->pm_mesg = mesg;
5023 if (err_mask) { 5034 if (wait) {
5024 ata_dev_printk(dev, KERN_ERR, "failed to start drive " 5035 rc = 0;
5025 "(err_mask=0x%x)\n", err_mask); 5036 ap->pm_result = &rc;
5026 return -EIO; 5037 }
5038
5039 ap->pflags |= ATA_PFLAG_PM_PENDING;
5040 ap->eh_info.action |= action;
5041 ap->eh_info.flags |= ehi_flags;
5042
5043 ata_port_schedule_eh(ap);
5044
5045 spin_unlock_irqrestore(ap->lock, flags);
5046
5047 /* wait and check result */
5048 if (wait) {
5049 ata_port_wait_eh(ap);
5050 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5051 if (rc)
5052 return rc;
5053 }
5027 } 5054 }
5028 5055
5029 return 0; 5056 return 0;
5030} 5057}
5031 5058
5032/** 5059/**
5033 * ata_device_resume - wakeup a previously suspended devices 5060 * ata_host_set_suspend - suspend host_set
5034 * @dev: the device to resume 5061 * @host_set: host_set to suspend
5062 * @mesg: PM message
5035 * 5063 *
5036 * Kick the drive back into action, by sending it an idle immediate 5064 * Suspend @host_set. Actual operation is performed by EH. This
5037 * command and making sure its transfer mode matches between drive 5065 * function requests EH to perform PM operations and waits for EH
5038 * and host. 5066 * to finish.
5039 * 5067 *
5068 * LOCKING:
5069 * Kernel thread context (may sleep).
5070 *
5071 * RETURNS:
5072 * 0 on success, -errno on failure.
5040 */ 5073 */
5041int ata_device_resume(struct ata_device *dev) 5074int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
5042{ 5075{
5043 struct ata_port *ap = dev->ap; 5076 int i, j, rc;
5044 5077
5045 if (ap->flags & ATA_FLAG_SUSPENDED) { 5078 rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
5046 struct ata_device *failed_dev; 5079 if (rc)
5080 goto fail;
5047 5081
5048 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 5082 /* EH is quiescent now. Fail if we have any ready device.
5049 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 5083 * This happens if hotplug occurs between completion of device
5084 * suspension and here.
5085 */
5086 for (i = 0; i < host_set->n_ports; i++) {
5087 struct ata_port *ap = host_set->ports[i];
5050 5088
5051 ap->flags &= ~ATA_FLAG_SUSPENDED; 5089 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5052 while (ata_set_mode(ap, &failed_dev)) 5090 struct ata_device *dev = &ap->device[j];
5053 ata_dev_disable(failed_dev); 5091
5092 if (ata_dev_ready(dev)) {
5093 ata_port_printk(ap, KERN_WARNING,
5094 "suspend failed, device %d "
5095 "still active\n", dev->devno);
5096 rc = -EBUSY;
5097 goto fail;
5098 }
5099 }
5054 } 5100 }
5055 if (!ata_dev_enabled(dev))
5056 return 0;
5057 if (dev->class == ATA_DEV_ATA)
5058 ata_start_drive(dev);
5059 5101
5102 host_set->dev->power.power_state = mesg;
5060 return 0; 5103 return 0;
5104
5105 fail:
5106 ata_host_set_resume(host_set);
5107 return rc;
5061} 5108}
5062 5109
5063/** 5110/**
5064 * ata_device_suspend - prepare a device for suspend 5111 * ata_host_set_resume - resume host_set
5065 * @dev: the device to suspend 5112 * @host_set: host_set to resume
5066 * @state: target power management state 5113 *
5114 * Resume @host_set. Actual operation is performed by EH. This
5115 * function requests EH to perform PM operations and returns.
5116 * Note that all resume operations are performed parallely.
5067 * 5117 *
5068 * Flush the cache on the drive, if appropriate, then issue a 5118 * LOCKING:
5069 * standbynow command. 5119 * Kernel thread context (may sleep).
5070 */ 5120 */
5071int ata_device_suspend(struct ata_device *dev, pm_message_t state) 5121void ata_host_set_resume(struct ata_host_set *host_set)
5072{ 5122{
5073 struct ata_port *ap = dev->ap; 5123 ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
5074 5124 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5075 if (!ata_dev_enabled(dev)) 5125 host_set->dev->power.power_state = PMSG_ON;
5076 return 0;
5077 if (dev->class == ATA_DEV_ATA)
5078 ata_flush_cache(dev);
5079
5080 if (state.event != PM_EVENT_FREEZE)
5081 ata_standby_drive(dev);
5082 ap->flags |= ATA_FLAG_SUSPENDED;
5083 return 0;
5084} 5126}
5085 5127
5086/** 5128/**
@@ -5440,6 +5482,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
5440 } 5482 }
5441 5483
5442 if (ap->ops->error_handler) { 5484 if (ap->ops->error_handler) {
5485 struct ata_eh_info *ehi = &ap->eh_info;
5443 unsigned long flags; 5486 unsigned long flags;
5444 5487
5445 ata_port_probe(ap); 5488 ata_port_probe(ap);
@@ -5447,10 +5490,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
5447 /* kick EH for boot probing */ 5490 /* kick EH for boot probing */
5448 spin_lock_irqsave(ap->lock, flags); 5491 spin_lock_irqsave(ap->lock, flags);
5449 5492
5450 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1; 5493 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5451 ap->eh_info.action |= ATA_EH_SOFTRESET; 5494 ehi->action |= ATA_EH_SOFTRESET;
5495 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5452 5496
5453 ap->flags |= ATA_FLAG_LOADING; 5497 ap->pflags |= ATA_PFLAG_LOADING;
5454 ata_port_schedule_eh(ap); 5498 ata_port_schedule_eh(ap);
5455 5499
5456 spin_unlock_irqrestore(ap->lock, flags); 5500 spin_unlock_irqrestore(ap->lock, flags);
@@ -5518,7 +5562,7 @@ void ata_port_detach(struct ata_port *ap)
5518 5562
5519 /* tell EH we're leaving & flush EH */ 5563 /* tell EH we're leaving & flush EH */
5520 spin_lock_irqsave(ap->lock, flags); 5564 spin_lock_irqsave(ap->lock, flags);
5521 ap->flags |= ATA_FLAG_UNLOADING; 5565 ap->pflags |= ATA_PFLAG_UNLOADING;
5522 spin_unlock_irqrestore(ap->lock, flags); 5566 spin_unlock_irqrestore(ap->lock, flags);
5523 5567
5524 ata_port_wait_eh(ap); 5568 ata_port_wait_eh(ap);
@@ -5723,20 +5767,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5723 return (tmp == bits->val) ? 1 : 0; 5767 return (tmp == bits->val) ? 1 : 0;
5724} 5768}
5725 5769
5726int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state) 5770void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
5727{ 5771{
5728 pci_save_state(pdev); 5772 pci_save_state(pdev);
5729 pci_disable_device(pdev); 5773
5730 pci_set_power_state(pdev, PCI_D3hot); 5774 if (state.event == PM_EVENT_SUSPEND) {
5731 return 0; 5775 pci_disable_device(pdev);
5776 pci_set_power_state(pdev, PCI_D3hot);
5777 }
5732} 5778}
5733 5779
5734int ata_pci_device_resume(struct pci_dev *pdev) 5780void ata_pci_device_do_resume(struct pci_dev *pdev)
5735{ 5781{
5736 pci_set_power_state(pdev, PCI_D0); 5782 pci_set_power_state(pdev, PCI_D0);
5737 pci_restore_state(pdev); 5783 pci_restore_state(pdev);
5738 pci_enable_device(pdev); 5784 pci_enable_device(pdev);
5739 pci_set_master(pdev); 5785 pci_set_master(pdev);
5786}
5787
5788int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5789{
5790 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5791 int rc = 0;
5792
5793 rc = ata_host_set_suspend(host_set, state);
5794 if (rc)
5795 return rc;
5796
5797 if (host_set->next) {
5798 rc = ata_host_set_suspend(host_set->next, state);
5799 if (rc) {
5800 ata_host_set_resume(host_set);
5801 return rc;
5802 }
5803 }
5804
5805 ata_pci_device_do_suspend(pdev, state);
5806
5807 return 0;
5808}
5809
5810int ata_pci_device_resume(struct pci_dev *pdev)
5811{
5812 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
5813
5814 ata_pci_device_do_resume(pdev);
5815 ata_host_set_resume(host_set);
5816 if (host_set->next)
5817 ata_host_set_resume(host_set->next);
5818
5740 return 0; 5819 return 0;
5741} 5820}
5742#endif /* CONFIG_PCI */ 5821#endif /* CONFIG_PCI */
@@ -5842,9 +5921,9 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5842 * Do not depend on ABI/API stability. 5921 * Do not depend on ABI/API stability.
5843 */ 5922 */
5844 5923
5845EXPORT_SYMBOL_GPL(sata_deb_timing_boot); 5924EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
5846EXPORT_SYMBOL_GPL(sata_deb_timing_eh); 5925EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
5847EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst); 5926EXPORT_SYMBOL_GPL(sata_deb_timing_long);
5848EXPORT_SYMBOL_GPL(ata_std_bios_param); 5927EXPORT_SYMBOL_GPL(ata_std_bios_param);
5849EXPORT_SYMBOL_GPL(ata_std_ports); 5928EXPORT_SYMBOL_GPL(ata_std_ports);
5850EXPORT_SYMBOL_GPL(ata_device_add); 5929EXPORT_SYMBOL_GPL(ata_device_add);
@@ -5916,6 +5995,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
5916EXPORT_SYMBOL_GPL(sata_scr_write_flush); 5995EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5917EXPORT_SYMBOL_GPL(ata_port_online); 5996EXPORT_SYMBOL_GPL(ata_port_online);
5918EXPORT_SYMBOL_GPL(ata_port_offline); 5997EXPORT_SYMBOL_GPL(ata_port_offline);
5998EXPORT_SYMBOL_GPL(ata_host_set_suspend);
5999EXPORT_SYMBOL_GPL(ata_host_set_resume);
5919EXPORT_SYMBOL_GPL(ata_id_string); 6000EXPORT_SYMBOL_GPL(ata_id_string);
5920EXPORT_SYMBOL_GPL(ata_id_c_string); 6001EXPORT_SYMBOL_GPL(ata_id_c_string);
5921EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6002EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5930,14 +6011,14 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5930EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 6011EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5931EXPORT_SYMBOL_GPL(ata_pci_init_one); 6012EXPORT_SYMBOL_GPL(ata_pci_init_one);
5932EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6013EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6014EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6015EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
5933EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6016EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5934EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6017EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5935EXPORT_SYMBOL_GPL(ata_pci_default_filter); 6018EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5936EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 6019EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5937#endif /* CONFIG_PCI */ 6020#endif /* CONFIG_PCI */
5938 6021
5939EXPORT_SYMBOL_GPL(ata_device_suspend);
5940EXPORT_SYMBOL_GPL(ata_device_resume);
5941EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 6022EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5942EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 6023EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5943 6024
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index bf5a72aca8a..4b6aa30f4d6 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -47,6 +47,8 @@
47 47
48static void __ata_port_freeze(struct ata_port *ap); 48static void __ata_port_freeze(struct ata_port *ap);
49static void ata_eh_finish(struct ata_port *ap); 49static void ata_eh_finish(struct ata_port *ap);
50static void ata_eh_handle_port_suspend(struct ata_port *ap);
51static void ata_eh_handle_port_resume(struct ata_port *ap);
50 52
51static void ata_ering_record(struct ata_ering *ering, int is_io, 53static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask) 54 unsigned int err_mask)
@@ -190,7 +192,6 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
190void ata_scsi_error(struct Scsi_Host *host) 192void ata_scsi_error(struct Scsi_Host *host)
191{ 193{
192 struct ata_port *ap = ata_shost_to_port(host); 194 struct ata_port *ap = ata_shost_to_port(host);
193 spinlock_t *ap_lock = ap->lock;
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT; 195 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
195 unsigned long flags; 196 unsigned long flags;
196 197
@@ -217,7 +218,7 @@ void ata_scsi_error(struct Scsi_Host *host)
217 struct scsi_cmnd *scmd, *tmp; 218 struct scsi_cmnd *scmd, *tmp;
218 int nr_timedout = 0; 219 int nr_timedout = 0;
219 220
220 spin_lock_irqsave(ap_lock, flags); 221 spin_lock_irqsave(ap->lock, flags);
221 222
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 223 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc; 224 struct ata_queued_cmd *qc;
@@ -256,43 +257,49 @@ void ata_scsi_error(struct Scsi_Host *host)
256 if (nr_timedout) 257 if (nr_timedout)
257 __ata_port_freeze(ap); 258 __ata_port_freeze(ap);
258 259
259 spin_unlock_irqrestore(ap_lock, flags); 260 spin_unlock_irqrestore(ap->lock, flags);
260 } else 261 } else
261 spin_unlock_wait(ap_lock); 262 spin_unlock_wait(ap->lock);
262 263
263 repeat: 264 repeat:
264 /* invoke error handler */ 265 /* invoke error handler */
265 if (ap->ops->error_handler) { 266 if (ap->ops->error_handler) {
267 /* process port resume request */
268 ata_eh_handle_port_resume(ap);
269
266 /* fetch & clear EH info */ 270 /* fetch & clear EH info */
267 spin_lock_irqsave(ap_lock, flags); 271 spin_lock_irqsave(ap->lock, flags);
268 272
269 memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 273 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
270 ap->eh_context.i = ap->eh_info; 274 ap->eh_context.i = ap->eh_info;
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 275 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
272 276
273 ap->flags |= ATA_FLAG_EH_IN_PROGRESS; 277 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
274 ap->flags &= ~ATA_FLAG_EH_PENDING; 278 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
275 279
276 spin_unlock_irqrestore(ap_lock, flags); 280 spin_unlock_irqrestore(ap->lock, flags);
277 281
278 /* invoke EH. if unloading, just finish failed qcs */ 282 /* invoke EH, skip if unloading or suspended */
279 if (!(ap->flags & ATA_FLAG_UNLOADING)) 283 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
280 ap->ops->error_handler(ap); 284 ap->ops->error_handler(ap);
281 else 285 else
282 ata_eh_finish(ap); 286 ata_eh_finish(ap);
283 287
288 /* process port suspend request */
289 ata_eh_handle_port_suspend(ap);
290
284 /* Exception might have happend after ->error_handler 291 /* Exception might have happend after ->error_handler
285 * recovered the port but before this point. Repeat 292 * recovered the port but before this point. Repeat
286 * EH in such case. 293 * EH in such case.
287 */ 294 */
288 spin_lock_irqsave(ap_lock, flags); 295 spin_lock_irqsave(ap->lock, flags);
289 296
290 if (ap->flags & ATA_FLAG_EH_PENDING) { 297 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
291 if (--repeat_cnt) { 298 if (--repeat_cnt) {
292 ata_port_printk(ap, KERN_INFO, 299 ata_port_printk(ap, KERN_INFO,
293 "EH pending after completion, " 300 "EH pending after completion, "
294 "repeating EH (cnt=%d)\n", repeat_cnt); 301 "repeating EH (cnt=%d)\n", repeat_cnt);
295 spin_unlock_irqrestore(ap_lock, flags); 302 spin_unlock_irqrestore(ap->lock, flags);
296 goto repeat; 303 goto repeat;
297 } 304 }
298 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 305 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
@@ -302,14 +309,14 @@ void ata_scsi_error(struct Scsi_Host *host)
302 /* this run is complete, make sure EH info is clear */ 309 /* this run is complete, make sure EH info is clear */
303 memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 310 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
304 311
305 /* Clear host_eh_scheduled while holding ap_lock such 312 /* Clear host_eh_scheduled while holding ap->lock such
306 * that if exception occurs after this point but 313 * that if exception occurs after this point but
307 * before EH completion, SCSI midlayer will 314 * before EH completion, SCSI midlayer will
308 * re-initiate EH. 315 * re-initiate EH.
309 */ 316 */
310 host->host_eh_scheduled = 0; 317 host->host_eh_scheduled = 0;
311 318
312 spin_unlock_irqrestore(ap_lock, flags); 319 spin_unlock_irqrestore(ap->lock, flags);
313 } else { 320 } else {
314 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 321 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
315 ap->ops->eng_timeout(ap); 322 ap->ops->eng_timeout(ap);
@@ -321,24 +328,23 @@ void ata_scsi_error(struct Scsi_Host *host)
321 scsi_eh_flush_done_q(&ap->eh_done_q); 328 scsi_eh_flush_done_q(&ap->eh_done_q);
322 329
323 /* clean up */ 330 /* clean up */
324 spin_lock_irqsave(ap_lock, flags); 331 spin_lock_irqsave(ap->lock, flags);
325 332
326 if (ap->flags & ATA_FLAG_LOADING) { 333 if (ap->pflags & ATA_PFLAG_LOADING)
327 ap->flags &= ~ATA_FLAG_LOADING; 334 ap->pflags &= ~ATA_PFLAG_LOADING;
328 } else { 335 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
329 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG) 336 queue_work(ata_aux_wq, &ap->hotplug_task);
330 queue_work(ata_aux_wq, &ap->hotplug_task); 337
331 if (ap->flags & ATA_FLAG_RECOVERED) 338 if (ap->pflags & ATA_PFLAG_RECOVERED)
332 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 339 ata_port_printk(ap, KERN_INFO, "EH complete\n");
333 }
334 340
335 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED); 341 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
336 342
337 /* tell wait_eh that we're done */ 343 /* tell wait_eh that we're done */
338 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; 344 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
339 wake_up_all(&ap->eh_wait_q); 345 wake_up_all(&ap->eh_wait_q);
340 346
341 spin_unlock_irqrestore(ap_lock, flags); 347 spin_unlock_irqrestore(ap->lock, flags);
342 348
343 DPRINTK("EXIT\n"); 349 DPRINTK("EXIT\n");
344} 350}
@@ -360,7 +366,7 @@ void ata_port_wait_eh(struct ata_port *ap)
360 retry: 366 retry:
361 spin_lock_irqsave(ap->lock, flags); 367 spin_lock_irqsave(ap->lock, flags);
362 368
363 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { 369 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
364 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 370 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
365 spin_unlock_irqrestore(ap->lock, flags); 371 spin_unlock_irqrestore(ap->lock, flags);
366 schedule(); 372 schedule();
@@ -489,7 +495,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
489 WARN_ON(!ap->ops->error_handler); 495 WARN_ON(!ap->ops->error_handler);
490 496
491 qc->flags |= ATA_QCFLAG_FAILED; 497 qc->flags |= ATA_QCFLAG_FAILED;
492 qc->ap->flags |= ATA_FLAG_EH_PENDING; 498 qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
493 499
494 /* The following will fail if timeout has already expired. 500 /* The following will fail if timeout has already expired.
495 * ata_scsi_error() takes care of such scmds on EH entry. 501 * ata_scsi_error() takes care of such scmds on EH entry.
@@ -513,7 +519,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
513{ 519{
514 WARN_ON(!ap->ops->error_handler); 520 WARN_ON(!ap->ops->error_handler);
515 521
516 ap->flags |= ATA_FLAG_EH_PENDING; 522 ap->pflags |= ATA_PFLAG_EH_PENDING;
517 scsi_schedule_eh(ap->host); 523 scsi_schedule_eh(ap->host);
518 524
519 DPRINTK("port EH scheduled\n"); 525 DPRINTK("port EH scheduled\n");
@@ -578,7 +584,7 @@ static void __ata_port_freeze(struct ata_port *ap)
578 if (ap->ops->freeze) 584 if (ap->ops->freeze)
579 ap->ops->freeze(ap); 585 ap->ops->freeze(ap);
580 586
581 ap->flags |= ATA_FLAG_FROZEN; 587 ap->pflags |= ATA_PFLAG_FROZEN;
582 588
583 DPRINTK("ata%u port frozen\n", ap->id); 589 DPRINTK("ata%u port frozen\n", ap->id);
584} 590}
@@ -646,7 +652,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
646 652
647 spin_lock_irqsave(ap->lock, flags); 653 spin_lock_irqsave(ap->lock, flags);
648 654
649 ap->flags &= ~ATA_FLAG_FROZEN; 655 ap->pflags &= ~ATA_PFLAG_FROZEN;
650 656
651 if (ap->ops->thaw) 657 if (ap->ops->thaw)
652 ap->ops->thaw(ap); 658 ap->ops->thaw(ap);
@@ -731,7 +737,7 @@ static void ata_eh_detach_dev(struct ata_device *dev)
731 737
732 if (ata_scsi_offline_dev(dev)) { 738 if (ata_scsi_offline_dev(dev)) {
733 dev->flags |= ATA_DFLAG_DETACHED; 739 dev->flags |= ATA_DFLAG_DETACHED;
734 ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 740 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
735 } 741 }
736 742
737 /* clear per-dev EH actions */ 743 /* clear per-dev EH actions */
@@ -760,8 +766,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
760 unsigned long flags; 766 unsigned long flags;
761 767
762 spin_lock_irqsave(ap->lock, flags); 768 spin_lock_irqsave(ap->lock, flags);
769
763 ata_eh_clear_action(dev, &ap->eh_info, action); 770 ata_eh_clear_action(dev, &ap->eh_info, action);
764 ap->flags |= ATA_FLAG_RECOVERED; 771
772 if (!(ap->eh_context.i.flags & ATA_EHI_QUIET))
773 ap->pflags |= ATA_PFLAG_RECOVERED;
774
765 spin_unlock_irqrestore(ap->lock, flags); 775 spin_unlock_irqrestore(ap->lock, flags);
766} 776}
767 777
@@ -1027,7 +1037,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1027 int tag, rc; 1037 int tag, rc;
1028 1038
1029 /* if frozen, we can't do much */ 1039 /* if frozen, we can't do much */
1030 if (ap->flags & ATA_FLAG_FROZEN) 1040 if (ap->pflags & ATA_PFLAG_FROZEN)
1031 return; 1041 return;
1032 1042
1033 /* is it NCQ device error? */ 1043 /* is it NCQ device error? */
@@ -1275,6 +1285,9 @@ static void ata_eh_autopsy(struct ata_port *ap)
1275 1285
1276 DPRINTK("ENTER\n"); 1286 DPRINTK("ENTER\n");
1277 1287
1288 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1289 return;
1290
1278 /* obtain and analyze SError */ 1291 /* obtain and analyze SError */
1279 rc = sata_scr_read(ap, SCR_ERROR, &serror); 1292 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1280 if (rc == 0) { 1293 if (rc == 0) {
@@ -1327,7 +1340,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1327 } 1340 }
1328 1341
1329 /* enforce default EH actions */ 1342 /* enforce default EH actions */
1330 if (ap->flags & ATA_FLAG_FROZEN || 1343 if (ap->pflags & ATA_PFLAG_FROZEN ||
1331 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1344 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1332 action |= ATA_EH_SOFTRESET; 1345 action |= ATA_EH_SOFTRESET;
1333 else if (all_err_mask) 1346 else if (all_err_mask)
@@ -1346,7 +1359,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
1346 1359
1347 /* record autopsy result */ 1360 /* record autopsy result */
1348 ehc->i.dev = failed_dev; 1361 ehc->i.dev = failed_dev;
1349 ehc->i.action = action; 1362 ehc->i.action |= action;
1350 1363
1351 DPRINTK("EXIT\n"); 1364 DPRINTK("EXIT\n");
1352} 1365}
@@ -1385,7 +1398,7 @@ static void ata_eh_report(struct ata_port *ap)
1385 return; 1398 return;
1386 1399
1387 frozen = ""; 1400 frozen = "";
1388 if (ap->flags & ATA_FLAG_FROZEN) 1401 if (ap->pflags & ATA_PFLAG_FROZEN)
1389 frozen = " frozen"; 1402 frozen = " frozen";
1390 1403
1391 if (ehc->i.dev) { 1404 if (ehc->i.dev) {
@@ -1465,7 +1478,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
1465 struct ata_eh_context *ehc = &ap->eh_context; 1478 struct ata_eh_context *ehc = &ap->eh_context;
1466 unsigned int *classes = ehc->classes; 1479 unsigned int *classes = ehc->classes;
1467 int tries = ATA_EH_RESET_TRIES; 1480 int tries = ATA_EH_RESET_TRIES;
1468 int verbose = !(ap->flags & ATA_FLAG_LOADING); 1481 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1469 unsigned int action; 1482 unsigned int action;
1470 ata_reset_fn_t reset; 1483 ata_reset_fn_t reset;
1471 int i, did_followup_srst, rc; 1484 int i, did_followup_srst, rc;
@@ -1605,7 +1618,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1605 dev = &ap->device[i]; 1618 dev = &ap->device[i];
1606 action = ata_eh_dev_action(dev); 1619 action = ata_eh_dev_action(dev);
1607 1620
1608 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { 1621 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1609 if (ata_port_offline(ap)) { 1622 if (ata_port_offline(ap)) {
1610 rc = -EIO; 1623 rc = -EIO;
1611 break; 1624 break;
@@ -1636,7 +1649,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1636 } 1649 }
1637 1650
1638 spin_lock_irqsave(ap->lock, flags); 1651 spin_lock_irqsave(ap->lock, flags);
1639 ap->flags |= ATA_FLAG_SCSI_HOTPLUG; 1652 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1640 spin_unlock_irqrestore(ap->lock, flags); 1653 spin_unlock_irqrestore(ap->lock, flags);
1641 } 1654 }
1642 } 1655 }
@@ -1648,6 +1661,164 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1648 return rc; 1661 return rc;
1649} 1662}
1650 1663
1664/**
1665 * ata_eh_suspend - handle suspend EH action
1666 * @ap: target host port
1667 * @r_failed_dev: result parameter to indicate failing device
1668 *
1669 * Handle suspend EH action. Disk devices are spinned down and
1670 * other types of devices are just marked suspended. Once
1671 * suspended, no EH action to the device is allowed until it is
1672 * resumed.
1673 *
1674 * LOCKING:
1675 * Kernel thread context (may sleep).
1676 *
1677 * RETURNS:
1678 * 0 on success, -errno otherwise
1679 */
1680static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1681{
1682 struct ata_device *dev;
1683 int i, rc = 0;
1684
1685 DPRINTK("ENTER\n");
1686
1687 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1688 unsigned long flags;
1689 unsigned int action, err_mask;
1690
1691 dev = &ap->device[i];
1692 action = ata_eh_dev_action(dev);
1693
1694 if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1695 continue;
1696
1697 WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1698
1699 ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1700
1701 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1702 /* flush cache */
1703 rc = ata_flush_cache(dev);
1704 if (rc)
1705 break;
1706
1707 /* spin down */
1708 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1709 if (err_mask) {
1710 ata_dev_printk(dev, KERN_ERR, "failed to "
1711 "spin down (err_mask=0x%x)\n",
1712 err_mask);
1713 rc = -EIO;
1714 break;
1715 }
1716 }
1717
1718 spin_lock_irqsave(ap->lock, flags);
1719 dev->flags |= ATA_DFLAG_SUSPENDED;
1720 spin_unlock_irqrestore(ap->lock, flags);
1721
1722 ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1723 }
1724
1725 if (rc)
1726 *r_failed_dev = dev;
1727
1728 DPRINTK("EXIT\n");
1729 return 0;
1730}
1731
1732/**
1733 * ata_eh_prep_resume - prep for resume EH action
1734 * @ap: target host port
1735 *
1736 * Clear SUSPENDED in preparation for scheduled resume actions.
1737 * This allows other parts of EH to access the devices being
1738 * resumed.
1739 *
1740 * LOCKING:
1741 * Kernel thread context (may sleep).
1742 */
1743static void ata_eh_prep_resume(struct ata_port *ap)
1744{
1745 struct ata_device *dev;
1746 unsigned long flags;
1747 int i;
1748
1749 DPRINTK("ENTER\n");
1750
1751 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1752 unsigned int action;
1753
1754 dev = &ap->device[i];
1755 action = ata_eh_dev_action(dev);
1756
1757 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1758 continue;
1759
1760 spin_lock_irqsave(ap->lock, flags);
1761 dev->flags &= ~ATA_DFLAG_SUSPENDED;
1762 spin_unlock_irqrestore(ap->lock, flags);
1763 }
1764
1765 DPRINTK("EXIT\n");
1766}
1767
1768/**
1769 * ata_eh_resume - handle resume EH action
1770 * @ap: target host port
1771 * @r_failed_dev: result parameter to indicate failing device
1772 *
1773 * Handle resume EH action. Target devices are already reset and
1774 * revalidated. Spinning up is the only operation left.
1775 *
1776 * LOCKING:
1777 * Kernel thread context (may sleep).
1778 *
1779 * RETURNS:
1780 * 0 on success, -errno otherwise
1781 */
1782static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1783{
1784 struct ata_device *dev;
1785 int i, rc = 0;
1786
1787 DPRINTK("ENTER\n");
1788
1789 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1790 unsigned int action, err_mask;
1791
1792 dev = &ap->device[i];
1793 action = ata_eh_dev_action(dev);
1794
1795 if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1796 continue;
1797
1798 ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1799
1800 if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1801 err_mask = ata_do_simple_cmd(dev,
1802 ATA_CMD_IDLEIMMEDIATE);
1803 if (err_mask) {
1804 ata_dev_printk(dev, KERN_ERR, "failed to "
1805 "spin up (err_mask=0x%x)\n",
1806 err_mask);
1807 rc = -EIO;
1808 break;
1809 }
1810 }
1811
1812 ata_eh_done(ap, dev, ATA_EH_RESUME);
1813 }
1814
1815 if (rc)
1816 *r_failed_dev = dev;
1817
1818 DPRINTK("EXIT\n");
1819 return 0;
1820}
1821
1651static int ata_port_nr_enabled(struct ata_port *ap) 1822static int ata_port_nr_enabled(struct ata_port *ap)
1652{ 1823{
1653 int i, cnt = 0; 1824 int i, cnt = 0;
@@ -1673,7 +1844,19 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
1673 struct ata_eh_context *ehc = &ap->eh_context; 1844 struct ata_eh_context *ehc = &ap->eh_context;
1674 int i; 1845 int i;
1675 1846
1676 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap)) 1847 /* skip if all possible devices are suspended */
1848 for (i = 0; i < ata_port_max_devices(ap); i++) {
1849 struct ata_device *dev = &ap->device[i];
1850
1851 if (ata_dev_absent(dev) || ata_dev_ready(dev))
1852 break;
1853 }
1854
1855 if (i == ata_port_max_devices(ap))
1856 return 1;
1857
1858 /* always thaw frozen port and recover failed devices */
1859 if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap))
1677 return 0; 1860 return 0;
1678 1861
1679 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1862 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
@@ -1744,9 +1927,12 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1744 rc = 0; 1927 rc = 0;
1745 1928
1746 /* if UNLOADING, finish immediately */ 1929 /* if UNLOADING, finish immediately */
1747 if (ap->flags & ATA_FLAG_UNLOADING) 1930 if (ap->pflags & ATA_PFLAG_UNLOADING)
1748 goto out; 1931 goto out;
1749 1932
1933 /* prep for resume */
1934 ata_eh_prep_resume(ap);
1935
1750 /* skip EH if possible. */ 1936 /* skip EH if possible. */
1751 if (ata_eh_skip_recovery(ap)) 1937 if (ata_eh_skip_recovery(ap))
1752 ehc->i.action = 0; 1938 ehc->i.action = 0;
@@ -1774,6 +1960,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1774 if (rc) 1960 if (rc)
1775 goto dev_fail; 1961 goto dev_fail;
1776 1962
1963 /* resume devices */
1964 rc = ata_eh_resume(ap, &dev);
1965 if (rc)
1966 goto dev_fail;
1967
1777 /* configure transfer mode if the port has been reset */ 1968 /* configure transfer mode if the port has been reset */
1778 if (ehc->i.flags & ATA_EHI_DID_RESET) { 1969 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1779 rc = ata_set_mode(ap, &dev); 1970 rc = ata_set_mode(ap, &dev);
@@ -1783,6 +1974,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1783 } 1974 }
1784 } 1975 }
1785 1976
1977 /* suspend devices */
1978 rc = ata_eh_suspend(ap, &dev);
1979 if (rc)
1980 goto dev_fail;
1981
1786 goto out; 1982 goto out;
1787 1983
1788 dev_fail: 1984 dev_fail:
@@ -1908,11 +2104,124 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1908 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2104 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1909 ata_postreset_fn_t postreset) 2105 ata_postreset_fn_t postreset)
1910{ 2106{
1911 if (!(ap->flags & ATA_FLAG_LOADING)) { 2107 ata_eh_autopsy(ap);
1912 ata_eh_autopsy(ap); 2108 ata_eh_report(ap);
1913 ata_eh_report(ap);
1914 }
1915
1916 ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2109 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1917 ata_eh_finish(ap); 2110 ata_eh_finish(ap);
1918} 2111}
2112
2113/**
2114 * ata_eh_handle_port_suspend - perform port suspend operation
2115 * @ap: port to suspend
2116 *
2117 * Suspend @ap.
2118 *
2119 * LOCKING:
2120 * Kernel thread context (may sleep).
2121 */
2122static void ata_eh_handle_port_suspend(struct ata_port *ap)
2123{
2124 unsigned long flags;
2125 int rc = 0;
2126
2127 /* are we suspending? */
2128 spin_lock_irqsave(ap->lock, flags);
2129 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2130 ap->pm_mesg.event == PM_EVENT_ON) {
2131 spin_unlock_irqrestore(ap->lock, flags);
2132 return;
2133 }
2134 spin_unlock_irqrestore(ap->lock, flags);
2135
2136 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2137
2138 /* suspend */
2139 ata_eh_freeze_port(ap);
2140
2141 if (ap->ops->port_suspend)
2142 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2143
2144 /* report result */
2145 spin_lock_irqsave(ap->lock, flags);
2146
2147 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2148 if (rc == 0)
2149 ap->pflags |= ATA_PFLAG_SUSPENDED;
2150 else
2151 ata_port_schedule_eh(ap);
2152
2153 if (ap->pm_result) {
2154 *ap->pm_result = rc;
2155 ap->pm_result = NULL;
2156 }
2157
2158 spin_unlock_irqrestore(ap->lock, flags);
2159
2160 return;
2161}
2162
2163/**
2164 * ata_eh_handle_port_resume - perform port resume operation
2165 * @ap: port to resume
2166 *
2167 * Resume @ap.
2168 *
2169 * This function also waits upto one second until all devices
2170 * hanging off this port requests resume EH action. This is to
2171 * prevent invoking EH and thus reset multiple times on resume.
2172 *
2173 * On DPM resume, where some of devices might not be resumed
2174 * together, this may delay port resume upto one second, but such
2175 * DPM resumes are rare and 1 sec delay isn't too bad.
2176 *
2177 * LOCKING:
2178 * Kernel thread context (may sleep).
2179 */
2180static void ata_eh_handle_port_resume(struct ata_port *ap)
2181{
2182 unsigned long timeout;
2183 unsigned long flags;
2184 int i, rc = 0;
2185
2186 /* are we resuming? */
2187 spin_lock_irqsave(ap->lock, flags);
2188 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2189 ap->pm_mesg.event != PM_EVENT_ON) {
2190 spin_unlock_irqrestore(ap->lock, flags);
2191 return;
2192 }
2193 spin_unlock_irqrestore(ap->lock, flags);
2194
2195 /* spurious? */
2196 if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2197 goto done;
2198
2199 if (ap->ops->port_resume)
2200 rc = ap->ops->port_resume(ap);
2201
2202 /* give devices time to request EH */
2203 timeout = jiffies + HZ; /* 1s max */
2204 while (1) {
2205 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2206 struct ata_device *dev = &ap->device[i];
2207 unsigned int action = ata_eh_dev_action(dev);
2208
2209 if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2210 !(action & ATA_EH_RESUME))
2211 break;
2212 }
2213
2214 if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2215 break;
2216 msleep(10);
2217 }
2218
2219 done:
2220 spin_lock_irqsave(ap->lock, flags);
2221 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2222 if (ap->pm_result) {
2223 *ap->pm_result = rc;
2224 ap->pm_result = NULL;
2225 }
2226 spin_unlock_irqrestore(ap->lock, flags);
2227}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 2915bca691e..7ced41ecde8 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -397,20 +397,129 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
397 } 397 }
398} 398}
399 399
400int ata_scsi_device_resume(struct scsi_device *sdev) 400/**
401 * ata_scsi_device_suspend - suspend ATA device associated with sdev
402 * @sdev: the SCSI device to suspend
403 * @state: target power management state
404 *
405 * Request suspend EH action on the ATA device associated with
406 * @sdev and wait for the operation to complete.
407 *
408 * LOCKING:
409 * Kernel thread context (may sleep).
410 *
411 * RETURNS:
412 * 0 on success, -errno otherwise.
413 */
414int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
401{ 415{
402 struct ata_port *ap = ata_shost_to_port(sdev->host); 416 struct ata_port *ap = ata_shost_to_port(sdev->host);
403 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 417 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
418 unsigned long flags;
419 unsigned int action;
420 int rc = 0;
421
422 if (!dev)
423 goto out;
424
425 spin_lock_irqsave(ap->lock, flags);
426
427 /* wait for the previous resume to complete */
428 while (dev->flags & ATA_DFLAG_SUSPENDED) {
429 spin_unlock_irqrestore(ap->lock, flags);
430 ata_port_wait_eh(ap);
431 spin_lock_irqsave(ap->lock, flags);
432 }
433
434 /* if @sdev is already detached, nothing to do */
435 if (sdev->sdev_state == SDEV_OFFLINE ||
436 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
437 goto out_unlock;
438
439 /* request suspend */
440 action = ATA_EH_SUSPEND;
441 if (state.event != PM_EVENT_SUSPEND)
442 action |= ATA_EH_PM_FREEZE;
443 ap->eh_info.dev_action[dev->devno] |= action;
444 ap->eh_info.flags |= ATA_EHI_QUIET;
445 ata_port_schedule_eh(ap);
446
447 spin_unlock_irqrestore(ap->lock, flags);
448
449 /* wait for EH to do the job */
450 ata_port_wait_eh(ap);
451
452 spin_lock_irqsave(ap->lock, flags);
453
454 /* If @sdev is still attached but the associated ATA device
455 * isn't suspended, the operation failed.
456 */
457 if (sdev->sdev_state != SDEV_OFFLINE &&
458 sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
459 !(dev->flags & ATA_DFLAG_SUSPENDED))
460 rc = -EIO;
404 461
405 return ata_device_resume(dev); 462 out_unlock:
463 spin_unlock_irqrestore(ap->lock, flags);
464 out:
465 if (rc == 0)
466 sdev->sdev_gendev.power.power_state = state;
467 return rc;
406} 468}
407 469
408int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) 470/**
471 * ata_scsi_device_resume - resume ATA device associated with sdev
472 * @sdev: the SCSI device to resume
473 *
474 * Request resume EH action on the ATA device associated with
475 * @sdev and return immediately. This enables parallel
476 * wakeup/spinup of devices.
477 *
478 * LOCKING:
479 * Kernel thread context (may sleep).
480 *
481 * RETURNS:
482 * 0.
483 */
484int ata_scsi_device_resume(struct scsi_device *sdev)
409{ 485{
410 struct ata_port *ap = ata_shost_to_port(sdev->host); 486 struct ata_port *ap = ata_shost_to_port(sdev->host);
411 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 487 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
488 struct ata_eh_info *ehi = &ap->eh_info;
489 unsigned long flags;
490 unsigned int action;
491
492 if (!dev)
493 goto out;
494
495 spin_lock_irqsave(ap->lock, flags);
496
497 /* if @sdev is already detached, nothing to do */
498 if (sdev->sdev_state == SDEV_OFFLINE ||
499 sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
500 goto out_unlock;
412 501
413 return ata_device_suspend(dev, state); 502 /* request resume */
503 action = ATA_EH_RESUME;
504 if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
505 __ata_ehi_hotplugged(ehi);
506 else
507 action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
508 ehi->dev_action[dev->devno] |= action;
509
510 /* We don't want autopsy and verbose EH messages. Disable
511 * those if we're the only device on this link.
512 */
513 if (ata_port_max_devices(ap) == 1)
514 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
515
516 ata_port_schedule_eh(ap);
517
518 out_unlock:
519 spin_unlock_irqrestore(ap->lock, flags);
520 out:
521 sdev->sdev_gendev.power.power_state = PMSG_ON;
522 return 0;
414} 523}
415 524
416/** 525/**
@@ -2930,7 +3039,7 @@ void ata_scsi_hotplug(void *data)
2930 struct ata_port *ap = data; 3039 struct ata_port *ap = data;
2931 int i; 3040 int i;
2932 3041
2933 if (ap->flags & ATA_FLAG_UNLOADING) { 3042 if (ap->pflags & ATA_PFLAG_UNLOADING) {
2934 DPRINTK("ENTER/EXIT - unloading\n"); 3043 DPRINTK("ENTER/EXIT - unloading\n");
2935 return; 3044 return;
2936 } 3045 }
@@ -3011,6 +3120,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3011 if (dev) { 3120 if (dev) {
3012 ap->eh_info.probe_mask |= 1 << dev->devno; 3121 ap->eh_info.probe_mask |= 1 << dev->devno;
3013 ap->eh_info.action |= ATA_EH_SOFTRESET; 3122 ap->eh_info.action |= ATA_EH_SOFTRESET;
3123 ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
3014 } else 3124 } else
3015 rc = -EINVAL; 3125 rc = -EINVAL;
3016 } 3126 }
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 7aabb45c35e..d0a85073ebf 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -109,6 +109,7 @@ enum {
109}; 109};
110 110
111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 111static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
112static int sil_pci_device_resume(struct pci_dev *pdev);
112static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); 113static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
113static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); 114static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
114static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 115static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@@ -160,6 +161,8 @@ static struct pci_driver sil_pci_driver = {
160 .id_table = sil_pci_tbl, 161 .id_table = sil_pci_tbl,
161 .probe = sil_init_one, 162 .probe = sil_init_one,
162 .remove = ata_pci_remove_one, 163 .remove = ata_pci_remove_one,
164 .suspend = ata_pci_device_suspend,
165 .resume = sil_pci_device_resume,
163}; 166};
164 167
165static struct scsi_host_template sil_sht = { 168static struct scsi_host_template sil_sht = {
@@ -178,6 +181,8 @@ static struct scsi_host_template sil_sht = {
178 .slave_configure = ata_scsi_slave_config, 181 .slave_configure = ata_scsi_slave_config,
179 .slave_destroy = ata_scsi_slave_destroy, 182 .slave_destroy = ata_scsi_slave_destroy,
180 .bios_param = ata_std_bios_param, 183 .bios_param = ata_std_bios_param,
184 .suspend = ata_scsi_device_suspend,
185 .resume = ata_scsi_device_resume,
181}; 186};
182 187
183static const struct ata_port_operations sil_ops = { 188static const struct ata_port_operations sil_ops = {
@@ -370,7 +375,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
370 * during hardreset makes controllers with broken SIEN 375 * during hardreset makes controllers with broken SIEN
371 * repeat probing needlessly. 376 * repeat probing needlessly.
372 */ 377 */
373 if (!(ap->flags & ATA_FLAG_FROZEN)) { 378 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
374 ata_ehi_hotplugged(&ap->eh_info); 379 ata_ehi_hotplugged(&ap->eh_info);
375 ap->eh_info.serror |= serror; 380 ap->eh_info.serror |= serror;
376 } 381 }
@@ -561,6 +566,52 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
561 } 566 }
562} 567}
563 568
569static void sil_init_controller(struct pci_dev *pdev,
570 int n_ports, unsigned long host_flags,
571 void __iomem *mmio_base)
572{
573 u8 cls;
574 u32 tmp;
575 int i;
576
577 /* Initialize FIFO PCI bus arbitration */
578 cls = sil_get_device_cache_line(pdev);
579 if (cls) {
580 cls >>= 3;
581 cls++; /* cls = (line_size/8)+1 */
582 for (i = 0; i < n_ports; i++)
583 writew(cls << 8 | cls,
584 mmio_base + sil_port[i].fifo_cfg);
585 } else
586 dev_printk(KERN_WARNING, &pdev->dev,
587 "cache line size not set. Driver may not function\n");
588
589 /* Apply R_ERR on DMA activate FIS errata workaround */
590 if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
591 int cnt;
592
593 for (i = 0, cnt = 0; i < n_ports; i++) {
594 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
595 if ((tmp & 0x3) != 0x01)
596 continue;
597 if (!cnt)
598 dev_printk(KERN_INFO, &pdev->dev,
599 "Applying R_ERR on DMA activate "
600 "FIS errata fix\n");
601 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
602 cnt++;
603 }
604 }
605
606 if (n_ports == 4) {
607 /* flip the magic "make 4 ports work" bit */
608 tmp = readl(mmio_base + sil_port[2].bmdma);
609 if ((tmp & SIL_INTR_STEERING) == 0)
610 writel(tmp | SIL_INTR_STEERING,
611 mmio_base + sil_port[2].bmdma);
612 }
613}
614
564static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 615static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
565{ 616{
566 static int printed_version; 617 static int printed_version;
@@ -570,8 +621,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
570 int rc; 621 int rc;
571 unsigned int i; 622 unsigned int i;
572 int pci_dev_busy = 0; 623 int pci_dev_busy = 0;
573 u32 tmp;
574 u8 cls;
575 624
576 if (!printed_version++) 625 if (!printed_version++)
577 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 626 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -630,42 +679,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
630 ata_std_ports(&probe_ent->port[i]); 679 ata_std_ports(&probe_ent->port[i]);
631 } 680 }
632 681
633 /* Initialize FIFO PCI bus arbitration */ 682 sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
634 cls = sil_get_device_cache_line(pdev); 683 mmio_base);
635 if (cls) {
636 cls >>= 3;
637 cls++; /* cls = (line_size/8)+1 */
638 for (i = 0; i < probe_ent->n_ports; i++)
639 writew(cls << 8 | cls,
640 mmio_base + sil_port[i].fifo_cfg);
641 } else
642 dev_printk(KERN_WARNING, &pdev->dev,
643 "cache line size not set. Driver may not function\n");
644
645 /* Apply R_ERR on DMA activate FIS errata workaround */
646 if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
647 int cnt;
648
649 for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
650 tmp = readl(mmio_base + sil_port[i].sfis_cfg);
651 if ((tmp & 0x3) != 0x01)
652 continue;
653 if (!cnt)
654 dev_printk(KERN_INFO, &pdev->dev,
655 "Applying R_ERR on DMA activate "
656 "FIS errata fix\n");
657 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
658 cnt++;
659 }
660 }
661
662 if (ent->driver_data == sil_3114) {
663 /* flip the magic "make 4 ports work" bit */
664 tmp = readl(mmio_base + sil_port[2].bmdma);
665 if ((tmp & SIL_INTR_STEERING) == 0)
666 writel(tmp | SIL_INTR_STEERING,
667 mmio_base + sil_port[2].bmdma);
668 }
669 684
670 pci_set_master(pdev); 685 pci_set_master(pdev);
671 686
@@ -685,6 +700,18 @@ err_out:
685 return rc; 700 return rc;
686} 701}
687 702
703static int sil_pci_device_resume(struct pci_dev *pdev)
704{
705 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
706
707 ata_pci_device_do_resume(pdev);
708 sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
709 host_set->mmio_base);
710 ata_host_set_resume(host_set);
711
712 return 0;
713}
714
688static int __init sil_init(void) 715static int __init sil_init(void)
689{ 716{
690 return pci_module_init(&sil_pci_driver); 717 return pci_module_init(&sil_pci_driver);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 07a1c6a8a41..2e0f4a4076a 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -92,6 +92,7 @@ enum {
92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ 92 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ 93 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ 94 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
95 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
95 96
96 /* 97 /*
97 * Port registers 98 * Port registers
@@ -338,6 +339,7 @@ static int sil24_port_start(struct ata_port *ap);
338static void sil24_port_stop(struct ata_port *ap); 339static void sil24_port_stop(struct ata_port *ap);
339static void sil24_host_stop(struct ata_host_set *host_set); 340static void sil24_host_stop(struct ata_host_set *host_set);
340static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 341static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
342static int sil24_pci_device_resume(struct pci_dev *pdev);
341 343
342static const struct pci_device_id sil24_pci_tbl[] = { 344static const struct pci_device_id sil24_pci_tbl[] = {
343 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 345 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
@@ -353,6 +355,8 @@ static struct pci_driver sil24_pci_driver = {
353 .id_table = sil24_pci_tbl, 355 .id_table = sil24_pci_tbl,
354 .probe = sil24_init_one, 356 .probe = sil24_init_one,
355 .remove = ata_pci_remove_one, /* safe? */ 357 .remove = ata_pci_remove_one, /* safe? */
358 .suspend = ata_pci_device_suspend,
359 .resume = sil24_pci_device_resume,
356}; 360};
357 361
358static struct scsi_host_template sil24_sht = { 362static struct scsi_host_template sil24_sht = {
@@ -372,6 +376,8 @@ static struct scsi_host_template sil24_sht = {
372 .slave_configure = ata_scsi_slave_config, 376 .slave_configure = ata_scsi_slave_config,
373 .slave_destroy = ata_scsi_slave_destroy, 377 .slave_destroy = ata_scsi_slave_destroy,
374 .bios_param = ata_std_bios_param, 378 .bios_param = ata_std_bios_param,
379 .suspend = ata_scsi_device_suspend,
380 .resume = ata_scsi_device_resume,
375}; 381};
376 382
377static const struct ata_port_operations sil24_ops = { 383static const struct ata_port_operations sil24_ops = {
@@ -607,7 +613,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
607 /* SStatus oscillates between zero and valid status after 613 /* SStatus oscillates between zero and valid status after
608 * DEV_RST, debounce it. 614 * DEV_RST, debounce it.
609 */ 615 */
610 rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst); 616 rc = sata_phy_debounce(ap, sata_deb_timing_long);
611 if (rc) { 617 if (rc) {
612 reason = "PHY debouncing failed"; 618 reason = "PHY debouncing failed";
613 goto err; 619 goto err;
@@ -988,6 +994,64 @@ static void sil24_host_stop(struct ata_host_set *host_set)
988 kfree(hpriv); 994 kfree(hpriv);
989} 995}
990 996
997static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
998 unsigned long host_flags,
999 void __iomem *host_base,
1000 void __iomem *port_base)
1001{
1002 u32 tmp;
1003 int i;
1004
1005 /* GPIO off */
1006 writel(0, host_base + HOST_FLASH_CMD);
1007
1008 /* clear global reset & mask interrupts during initialization */
1009 writel(0, host_base + HOST_CTRL);
1010
1011 /* init ports */
1012 for (i = 0; i < n_ports; i++) {
1013 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1014
1015 /* Initial PHY setting */
1016 writel(0x20c, port + PORT_PHY_CFG);
1017
1018 /* Clear port RST */
1019 tmp = readl(port + PORT_CTRL_STAT);
1020 if (tmp & PORT_CS_PORT_RST) {
1021 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1022 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1023 PORT_CS_PORT_RST,
1024 PORT_CS_PORT_RST, 10, 100);
1025 if (tmp & PORT_CS_PORT_RST)
1026 dev_printk(KERN_ERR, &pdev->dev,
1027 "failed to clear port RST\n");
1028 }
1029
1030 /* Configure IRQ WoC */
1031 if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1032 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1033 else
1034 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1035
1036 /* Zero error counters. */
1037 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1038 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1039 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1040 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1041 writel(0x0000, port + PORT_CRC_ERR_CNT);
1042 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1043
1044 /* Always use 64bit activation */
1045 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1046
1047 /* Clear port multiplier enable and resume bits */
1048 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1049 }
1050
1051 /* Turn on interrupts */
1052 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
1053}
1054
991static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1055static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
992{ 1056{
993 static int printed_version = 0; 1057 static int printed_version = 0;
@@ -1076,9 +1140,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1076 } 1140 }
1077 } 1141 }
1078 1142
1079 /* GPIO off */
1080 writel(0, host_base + HOST_FLASH_CMD);
1081
1082 /* Apply workaround for completion IRQ loss on PCI-X errata */ 1143 /* Apply workaround for completion IRQ loss on PCI-X errata */
1083 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1144 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
1084 tmp = readl(host_base + HOST_CTRL); 1145 tmp = readl(host_base + HOST_CTRL);
@@ -1090,56 +1151,18 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1090 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1151 probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
1091 } 1152 }
1092 1153
1093 /* clear global reset & mask interrupts during initialization */
1094 writel(0, host_base + HOST_CTRL);
1095
1096 for (i = 0; i < probe_ent->n_ports; i++) { 1154 for (i = 0; i < probe_ent->n_ports; i++) {
1097 void __iomem *port = port_base + i * PORT_REGS_SIZE; 1155 unsigned long portu =
1098 unsigned long portu = (unsigned long)port; 1156 (unsigned long)port_base + i * PORT_REGS_SIZE;
1099 1157
1100 probe_ent->port[i].cmd_addr = portu; 1158 probe_ent->port[i].cmd_addr = portu;
1101 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1159 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
1102 1160
1103 ata_std_ports(&probe_ent->port[i]); 1161 ata_std_ports(&probe_ent->port[i]);
1104
1105 /* Initial PHY setting */
1106 writel(0x20c, port + PORT_PHY_CFG);
1107
1108 /* Clear port RST */
1109 tmp = readl(port + PORT_CTRL_STAT);
1110 if (tmp & PORT_CS_PORT_RST) {
1111 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
1112 tmp = ata_wait_register(port + PORT_CTRL_STAT,
1113 PORT_CS_PORT_RST,
1114 PORT_CS_PORT_RST, 10, 100);
1115 if (tmp & PORT_CS_PORT_RST)
1116 dev_printk(KERN_ERR, &pdev->dev,
1117 "failed to clear port RST\n");
1118 }
1119
1120 /* Configure IRQ WoC */
1121 if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
1122 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
1123 else
1124 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
1125
1126 /* Zero error counters. */
1127 writel(0x8000, port + PORT_DECODE_ERR_THRESH);
1128 writel(0x8000, port + PORT_CRC_ERR_THRESH);
1129 writel(0x8000, port + PORT_HSHK_ERR_THRESH);
1130 writel(0x0000, port + PORT_DECODE_ERR_CNT);
1131 writel(0x0000, port + PORT_CRC_ERR_CNT);
1132 writel(0x0000, port + PORT_HSHK_ERR_CNT);
1133
1134 /* Always use 64bit activation */
1135 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1136
1137 /* Clear port multiplier enable and resume bits */
1138 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
1139 } 1162 }
1140 1163
1141 /* Turn on interrupts */ 1164 sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
1142 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1165 host_base, port_base);
1143 1166
1144 pci_set_master(pdev); 1167 pci_set_master(pdev);
1145 1168
@@ -1162,6 +1185,25 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1162 return rc; 1185 return rc;
1163} 1186}
1164 1187
1188static int sil24_pci_device_resume(struct pci_dev *pdev)
1189{
1190 struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
1191 struct sil24_host_priv *hpriv = host_set->private_data;
1192
1193 ata_pci_device_do_resume(pdev);
1194
1195 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1196 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
1197
1198 sil24_init_controller(pdev, host_set->n_ports,
1199 host_set->ports[0]->flags,
1200 hpriv->host_base, hpriv->port_base);
1201
1202 ata_host_set_resume(host_set);
1203
1204 return 0;
1205}
1206
1165static int __init sil24_init(void) 1207static int __init sil24_init(void)
1166{ 1208{
1167 return pci_module_init(&sil24_pci_driver); 1209 return pci_module_init(&sil24_pci_driver);
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 916fe6fba75..ad37871594f 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -297,7 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = {
297 .bmdma_status = ata_bmdma_status, 297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep, 298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot, 299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer, 300 .data_xfer = ata_mmio_data_xfer,
301 .freeze = ata_bmdma_freeze, 301 .freeze = ata_bmdma_freeze,
302 .thaw = ata_bmdma_thaw, 302 .thaw = ata_bmdma_thaw,
303 .error_handler = ata_bmdma_error_handler, 303 .error_handler = ata_bmdma_error_handler,
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index a7d664383da..54c6b2adf7b 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -41,6 +41,7 @@
41#include <asm/mach/serial_at91.h> 41#include <asm/mach/serial_at91.h>
42#include <asm/arch/board.h> 42#include <asm/arch/board.h>
43#include <asm/arch/system.h> 43#include <asm/arch/system.h>
44#include <asm/arch/gpio.h>
44 45
45#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 46#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
46#define SUPPORT_SYSRQ 47#define SUPPORT_SYSRQ
@@ -140,9 +141,9 @@ static void at91_set_mctrl(struct uart_port *port, u_int mctrl)
140 */ 141 */
141 if (port->mapbase == AT91_BASE_US0) { 142 if (port->mapbase == AT91_BASE_US0) {
142 if (mctrl & TIOCM_RTS) 143 if (mctrl & TIOCM_RTS)
143 at91_sys_write(AT91_PIOA + PIO_CODR, AT91_PA21_RTS0); 144 at91_set_gpio_value(AT91_PIN_PA21, 0);
144 else 145 else
145 at91_sys_write(AT91_PIOA + PIO_SODR, AT91_PA21_RTS0); 146 at91_set_gpio_value(AT91_PIN_PA21, 1);
146 } 147 }
147 } 148 }
148 149
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 5980c45998c..89ba0df14c2 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -454,7 +454,7 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho
454 fl->fl_ops = &nlmclnt_lock_ops; 454 fl->fl_ops = &nlmclnt_lock_ops;
455} 455}
456 456
457static void do_vfs_lock(struct file_lock *fl) 457static int do_vfs_lock(struct file_lock *fl)
458{ 458{
459 int res = 0; 459 int res = 0;
460 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 460 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
@@ -467,9 +467,7 @@ static void do_vfs_lock(struct file_lock *fl)
467 default: 467 default:
468 BUG(); 468 BUG();
469 } 469 }
470 if (res < 0) 470 return res;
471 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
472 __FUNCTION__);
473} 471}
474 472
475/* 473/*
@@ -498,6 +496,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
498 struct nlm_host *host = req->a_host; 496 struct nlm_host *host = req->a_host;
499 struct nlm_res *resp = &req->a_res; 497 struct nlm_res *resp = &req->a_res;
500 struct nlm_wait *block = NULL; 498 struct nlm_wait *block = NULL;
499 unsigned char fl_flags = fl->fl_flags;
501 int status = -ENOLCK; 500 int status = -ENOLCK;
502 501
503 if (!host->h_monitored && nsm_monitor(host) < 0) { 502 if (!host->h_monitored && nsm_monitor(host) < 0) {
@@ -505,6 +504,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
505 host->h_name); 504 host->h_name);
506 goto out; 505 goto out;
507 } 506 }
507 fl->fl_flags |= FL_ACCESS;
508 status = do_vfs_lock(fl);
509 if (status < 0)
510 goto out;
508 511
509 block = nlmclnt_prepare_block(host, fl); 512 block = nlmclnt_prepare_block(host, fl);
510again: 513again:
@@ -539,9 +542,10 @@ again:
539 up_read(&host->h_rwsem); 542 up_read(&host->h_rwsem);
540 goto again; 543 goto again;
541 } 544 }
542 fl->fl_flags |= FL_SLEEP;
543 /* Ensure the resulting lock will get added to granted list */ 545 /* Ensure the resulting lock will get added to granted list */
544 do_vfs_lock(fl); 546 fl->fl_flags = fl_flags | FL_SLEEP;
547 if (do_vfs_lock(fl) < 0)
548 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
545 up_read(&host->h_rwsem); 549 up_read(&host->h_rwsem);
546 } 550 }
547 status = nlm_stat_to_errno(resp->status); 551 status = nlm_stat_to_errno(resp->status);
@@ -552,6 +556,7 @@ out_unblock:
552 nlmclnt_cancel(host, req->a_args.block, fl); 556 nlmclnt_cancel(host, req->a_args.block, fl);
553out: 557out:
554 nlm_release_call(req); 558 nlm_release_call(req);
559 fl->fl_flags = fl_flags;
555 return status; 560 return status;
556} 561}
557 562
@@ -606,15 +611,19 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
606{ 611{
607 struct nlm_host *host = req->a_host; 612 struct nlm_host *host = req->a_host;
608 struct nlm_res *resp = &req->a_res; 613 struct nlm_res *resp = &req->a_res;
609 int status; 614 int status = 0;
610 615
611 /* 616 /*
612 * Note: the server is supposed to either grant us the unlock 617 * Note: the server is supposed to either grant us the unlock
613 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either 618 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
614 * case, we want to unlock. 619 * case, we want to unlock.
615 */ 620 */
621 fl->fl_flags |= FL_EXISTS;
616 down_read(&host->h_rwsem); 622 down_read(&host->h_rwsem);
617 do_vfs_lock(fl); 623 if (do_vfs_lock(fl) == -ENOENT) {
624 up_read(&host->h_rwsem);
625 goto out;
626 }
618 up_read(&host->h_rwsem); 627 up_read(&host->h_rwsem);
619 628
620 if (req->a_flags & RPC_TASK_ASYNC) 629 if (req->a_flags & RPC_TASK_ASYNC)
@@ -624,7 +633,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
624 if (status < 0) 633 if (status < 0)
625 goto out; 634 goto out;
626 635
627 status = 0;
628 if (resp->status == NLM_LCK_GRANTED) 636 if (resp->status == NLM_LCK_GRANTED)
629 goto out; 637 goto out;
630 638
diff --git a/fs/locks.c b/fs/locks.c
index 1ad29c9b625..b0b41a64e10 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -725,6 +725,10 @@ next_task:
725/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks 725/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
726 * at the head of the list, but that's secret knowledge known only to 726 * at the head of the list, but that's secret knowledge known only to
727 * flock_lock_file and posix_lock_file. 727 * flock_lock_file and posix_lock_file.
728 *
729 * Note that if called with an FL_EXISTS argument, the caller may determine
730 * whether or not a lock was successfully freed by testing the return
731 * value for -ENOENT.
728 */ 732 */
729static int flock_lock_file(struct file *filp, struct file_lock *request) 733static int flock_lock_file(struct file *filp, struct file_lock *request)
730{ 734{
@@ -735,6 +739,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
735 int found = 0; 739 int found = 0;
736 740
737 lock_kernel(); 741 lock_kernel();
742 if (request->fl_flags & FL_ACCESS)
743 goto find_conflict;
738 for_each_lock(inode, before) { 744 for_each_lock(inode, before) {
739 struct file_lock *fl = *before; 745 struct file_lock *fl = *before;
740 if (IS_POSIX(fl)) 746 if (IS_POSIX(fl))
@@ -750,8 +756,11 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
750 break; 756 break;
751 } 757 }
752 758
753 if (request->fl_type == F_UNLCK) 759 if (request->fl_type == F_UNLCK) {
760 if ((request->fl_flags & FL_EXISTS) && !found)
761 error = -ENOENT;
754 goto out; 762 goto out;
763 }
755 764
756 error = -ENOMEM; 765 error = -ENOMEM;
757 new_fl = locks_alloc_lock(); 766 new_fl = locks_alloc_lock();
@@ -764,6 +773,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
764 if (found) 773 if (found)
765 cond_resched(); 774 cond_resched();
766 775
776find_conflict:
767 for_each_lock(inode, before) { 777 for_each_lock(inode, before) {
768 struct file_lock *fl = *before; 778 struct file_lock *fl = *before;
769 if (IS_POSIX(fl)) 779 if (IS_POSIX(fl))
@@ -777,6 +787,8 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
777 locks_insert_block(fl, request); 787 locks_insert_block(fl, request);
778 goto out; 788 goto out;
779 } 789 }
790 if (request->fl_flags & FL_ACCESS)
791 goto out;
780 locks_copy_lock(new_fl, request); 792 locks_copy_lock(new_fl, request);
781 locks_insert_lock(&inode->i_flock, new_fl); 793 locks_insert_lock(&inode->i_flock, new_fl);
782 new_fl = NULL; 794 new_fl = NULL;
@@ -948,8 +960,11 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
948 960
949 error = 0; 961 error = 0;
950 if (!added) { 962 if (!added) {
951 if (request->fl_type == F_UNLCK) 963 if (request->fl_type == F_UNLCK) {
964 if (request->fl_flags & FL_EXISTS)
965 error = -ENOENT;
952 goto out; 966 goto out;
967 }
953 968
954 if (!new_fl) { 969 if (!new_fl) {
955 error = -ENOLCK; 970 error = -ENOLCK;
@@ -996,6 +1011,10 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
996 * Add a POSIX style lock to a file. 1011 * Add a POSIX style lock to a file.
997 * We merge adjacent & overlapping locks whenever possible. 1012 * We merge adjacent & overlapping locks whenever possible.
998 * POSIX locks are sorted by owner task, then by starting address 1013 * POSIX locks are sorted by owner task, then by starting address
1014 *
1015 * Note that if called with an FL_EXISTS argument, the caller may determine
1016 * whether or not a lock was successfully freed by testing the return
1017 * value for -ENOENT.
999 */ 1018 */
1000int posix_lock_file(struct file *filp, struct file_lock *fl) 1019int posix_lock_file(struct file *filp, struct file_lock *fl)
1001{ 1020{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 3ddda6f7ecc..e7ffb4deb3e 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -690,7 +690,9 @@ int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
690 goto out_force; 690 goto out_force;
691 /* This is an open(2) */ 691 /* This is an open(2) */
692 if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && 692 if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
693 !(server->flags & NFS_MOUNT_NOCTO)) 693 !(server->flags & NFS_MOUNT_NOCTO) &&
694 (S_ISREG(inode->i_mode) ||
695 S_ISDIR(inode->i_mode)))
694 goto out_force; 696 goto out_force;
695 } 697 }
696 return nfs_revalidate_inode(server, inode); 698 return nfs_revalidate_inode(server, inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 4cdd1b499e3..fecd3b095de 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -67,25 +67,19 @@ struct nfs_direct_req {
67 struct kref kref; /* release manager */ 67 struct kref kref; /* release manager */
68 68
69 /* I/O parameters */ 69 /* I/O parameters */
70 struct list_head list, /* nfs_read/write_data structs */
71 rewrite_list; /* saved nfs_write_data structs */
72 struct nfs_open_context *ctx; /* file open context info */ 70 struct nfs_open_context *ctx; /* file open context info */
73 struct kiocb * iocb; /* controlling i/o request */ 71 struct kiocb * iocb; /* controlling i/o request */
74 struct inode * inode; /* target file of i/o */ 72 struct inode * inode; /* target file of i/o */
75 unsigned long user_addr; /* location of user's buffer */
76 size_t user_count; /* total bytes to move */
77 loff_t pos; /* starting offset in file */
78 struct page ** pages; /* pages in our buffer */
79 unsigned int npages; /* count of pages */
80 73
81 /* completion state */ 74 /* completion state */
75 atomic_t io_count; /* i/os we're waiting for */
82 spinlock_t lock; /* protect completion state */ 76 spinlock_t lock; /* protect completion state */
83 int outstanding; /* i/os we're waiting for */
84 ssize_t count, /* bytes actually processed */ 77 ssize_t count, /* bytes actually processed */
85 error; /* any reported error */ 78 error; /* any reported error */
86 struct completion completion; /* wait for i/o completion */ 79 struct completion completion; /* wait for i/o completion */
87 80
88 /* commit state */ 81 /* commit state */
82 struct list_head rewrite_list; /* saved nfs_write_data structs */
89 struct nfs_write_data * commit_data; /* special write_data for commits */ 83 struct nfs_write_data * commit_data; /* special write_data for commits */
90 int flags; 84 int flags;
91#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 85#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
@@ -93,8 +87,37 @@ struct nfs_direct_req {
93 struct nfs_writeverf verf; /* unstable write verifier */ 87 struct nfs_writeverf verf; /* unstable write verifier */
94}; 88};
95 89
96static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
97static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 90static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
91static const struct rpc_call_ops nfs_write_direct_ops;
92
93static inline void get_dreq(struct nfs_direct_req *dreq)
94{
95 atomic_inc(&dreq->io_count);
96}
97
98static inline int put_dreq(struct nfs_direct_req *dreq)
99{
100 return atomic_dec_and_test(&dreq->io_count);
101}
102
103/*
104 * "size" is never larger than rsize or wsize.
105 */
106static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
107{
108 int page_count;
109
110 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
111 page_count -= user_addr >> PAGE_SHIFT;
112 BUG_ON(page_count < 0);
113
114 return page_count;
115}
116
117static inline unsigned int nfs_max_pages(unsigned int size)
118{
119 return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
120}
98 121
99/** 122/**
100 * nfs_direct_IO - NFS address space operation for direct I/O 123 * nfs_direct_IO - NFS address space operation for direct I/O
@@ -118,50 +141,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
118 return -EINVAL; 141 return -EINVAL;
119} 142}
120 143
121static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) 144static void nfs_direct_dirty_pages(struct page **pages, int npages)
122{ 145{
123 int i; 146 int i;
124 for (i = 0; i < npages; i++) { 147 for (i = 0; i < npages; i++) {
125 struct page *page = pages[i]; 148 struct page *page = pages[i];
126 if (do_dirty && !PageCompound(page)) 149 if (!PageCompound(page))
127 set_page_dirty_lock(page); 150 set_page_dirty_lock(page);
128 page_cache_release(page);
129 } 151 }
130 kfree(pages);
131} 152}
132 153
133static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) 154static void nfs_direct_release_pages(struct page **pages, int npages)
134{ 155{
135 int result = -ENOMEM; 156 int i;
136 unsigned long page_count; 157 for (i = 0; i < npages; i++)
137 size_t array_size; 158 page_cache_release(pages[i]);
138
139 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
140 page_count -= user_addr >> PAGE_SHIFT;
141
142 array_size = (page_count * sizeof(struct page *));
143 *pages = kmalloc(array_size, GFP_KERNEL);
144 if (*pages) {
145 down_read(&current->mm->mmap_sem);
146 result = get_user_pages(current, current->mm, user_addr,
147 page_count, (rw == READ), 0,
148 *pages, NULL);
149 up_read(&current->mm->mmap_sem);
150 if (result != page_count) {
151 /*
152 * If we got fewer pages than expected from
153 * get_user_pages(), the user buffer runs off the
154 * end of a mapping; return EFAULT.
155 */
156 if (result >= 0) {
157 nfs_free_user_pages(*pages, result, 0);
158 result = -EFAULT;
159 } else
160 kfree(*pages);
161 *pages = NULL;
162 }
163 }
164 return result;
165} 159}
166 160
167static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 161static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
@@ -173,13 +167,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
173 return NULL; 167 return NULL;
174 168
175 kref_init(&dreq->kref); 169 kref_init(&dreq->kref);
170 kref_get(&dreq->kref);
176 init_completion(&dreq->completion); 171 init_completion(&dreq->completion);
177 INIT_LIST_HEAD(&dreq->list);
178 INIT_LIST_HEAD(&dreq->rewrite_list); 172 INIT_LIST_HEAD(&dreq->rewrite_list);
179 dreq->iocb = NULL; 173 dreq->iocb = NULL;
180 dreq->ctx = NULL; 174 dreq->ctx = NULL;
181 spin_lock_init(&dreq->lock); 175 spin_lock_init(&dreq->lock);
182 dreq->outstanding = 0; 176 atomic_set(&dreq->io_count, 0);
183 dreq->count = 0; 177 dreq->count = 0;
184 dreq->error = 0; 178 dreq->error = 0;
185 dreq->flags = 0; 179 dreq->flags = 0;
@@ -220,18 +214,11 @@ out:
220} 214}
221 215
222/* 216/*
223 * We must hold a reference to all the pages in this direct read request 217 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
224 * until the RPCs complete. This could be long *after* we are woken up in 218 * the iocb is still valid here if this is a synchronous request.
225 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
226 *
227 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
228 * can't trust the iocb is still valid here if this is a synchronous
229 * request. If the waiter is woken prematurely, the iocb is long gone.
230 */ 219 */
231static void nfs_direct_complete(struct nfs_direct_req *dreq) 220static void nfs_direct_complete(struct nfs_direct_req *dreq)
232{ 221{
233 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
234
235 if (dreq->iocb) { 222 if (dreq->iocb) {
236 long res = (long) dreq->error; 223 long res = (long) dreq->error;
237 if (!res) 224 if (!res)
@@ -244,48 +231,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
244} 231}
245 232
246/* 233/*
247 * Note we also set the number of requests we have in the dreq when we are 234 * We must hold a reference to all the pages in this direct read request
248 * done. This prevents races with I/O completion so we will always wait 235 * until the RPCs complete. This could be long *after* we are woken up in
249 * until all requests have been dispatched and completed. 236 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
250 */ 237 */
251static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
252{
253 struct list_head *list;
254 struct nfs_direct_req *dreq;
255 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
256
257 dreq = nfs_direct_req_alloc();
258 if (!dreq)
259 return NULL;
260
261 list = &dreq->list;
262 for(;;) {
263 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
264
265 if (unlikely(!data)) {
266 while (!list_empty(list)) {
267 data = list_entry(list->next,
268 struct nfs_read_data, pages);
269 list_del(&data->pages);
270 nfs_readdata_free(data);
271 }
272 kref_put(&dreq->kref, nfs_direct_req_release);
273 return NULL;
274 }
275
276 INIT_LIST_HEAD(&data->pages);
277 list_add(&data->pages, list);
278
279 data->req = (struct nfs_page *) dreq;
280 dreq->outstanding++;
281 if (nbytes <= rsize)
282 break;
283 nbytes -= rsize;
284 }
285 kref_get(&dreq->kref);
286 return dreq;
287}
288
289static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 238static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
290{ 239{
291 struct nfs_read_data *data = calldata; 240 struct nfs_read_data *data = calldata;
@@ -294,6 +243,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
294 if (nfs_readpage_result(task, data) != 0) 243 if (nfs_readpage_result(task, data) != 0)
295 return; 244 return;
296 245
246 nfs_direct_dirty_pages(data->pagevec, data->npages);
247 nfs_direct_release_pages(data->pagevec, data->npages);
248
297 spin_lock(&dreq->lock); 249 spin_lock(&dreq->lock);
298 250
299 if (likely(task->tk_status >= 0)) 251 if (likely(task->tk_status >= 0))
@@ -301,13 +253,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
301 else 253 else
302 dreq->error = task->tk_status; 254 dreq->error = task->tk_status;
303 255
304 if (--dreq->outstanding) {
305 spin_unlock(&dreq->lock);
306 return;
307 }
308
309 spin_unlock(&dreq->lock); 256 spin_unlock(&dreq->lock);
310 nfs_direct_complete(dreq); 257
258 if (put_dreq(dreq))
259 nfs_direct_complete(dreq);
311} 260}
312 261
313static const struct rpc_call_ops nfs_read_direct_ops = { 262static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -316,41 +265,60 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
316}; 265};
317 266
318/* 267/*
319 * For each nfs_read_data struct that was allocated on the list, dispatch 268 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
320 * an NFS READ operation 269 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
270 * bail and stop sending more reads. Read length accounting is
271 * handled automatically by nfs_direct_read_result(). Otherwise, if
272 * no requests have been sent, just return an error.
321 */ 273 */
322static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) 274static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
323{ 275{
324 struct nfs_open_context *ctx = dreq->ctx; 276 struct nfs_open_context *ctx = dreq->ctx;
325 struct inode *inode = ctx->dentry->d_inode; 277 struct inode *inode = ctx->dentry->d_inode;
326 struct list_head *list = &dreq->list;
327 struct page **pages = dreq->pages;
328 size_t count = dreq->user_count;
329 loff_t pos = dreq->pos;
330 size_t rsize = NFS_SERVER(inode)->rsize; 278 size_t rsize = NFS_SERVER(inode)->rsize;
331 unsigned int curpage, pgbase; 279 unsigned int rpages = nfs_max_pages(rsize);
280 unsigned int pgbase;
281 int result;
282 ssize_t started = 0;
283
284 get_dreq(dreq);
332 285
333 curpage = 0; 286 pgbase = user_addr & ~PAGE_MASK;
334 pgbase = dreq->user_addr & ~PAGE_MASK;
335 do { 287 do {
336 struct nfs_read_data *data; 288 struct nfs_read_data *data;
337 size_t bytes; 289 size_t bytes;
338 290
291 result = -ENOMEM;
292 data = nfs_readdata_alloc(rpages);
293 if (unlikely(!data))
294 break;
295
339 bytes = rsize; 296 bytes = rsize;
340 if (count < rsize) 297 if (count < rsize)
341 bytes = count; 298 bytes = count;
342 299
343 BUG_ON(list_empty(list)); 300 data->npages = nfs_direct_count_pages(user_addr, bytes);
344 data = list_entry(list->next, struct nfs_read_data, pages); 301 down_read(&current->mm->mmap_sem);
345 list_del_init(&data->pages); 302 result = get_user_pages(current, current->mm, user_addr,
303 data->npages, 1, 0, data->pagevec, NULL);
304 up_read(&current->mm->mmap_sem);
305 if (unlikely(result < data->npages)) {
306 if (result > 0)
307 nfs_direct_release_pages(data->pagevec, result);
308 nfs_readdata_release(data);
309 break;
310 }
311
312 get_dreq(dreq);
346 313
314 data->req = (struct nfs_page *) dreq;
347 data->inode = inode; 315 data->inode = inode;
348 data->cred = ctx->cred; 316 data->cred = ctx->cred;
349 data->args.fh = NFS_FH(inode); 317 data->args.fh = NFS_FH(inode);
350 data->args.context = ctx; 318 data->args.context = ctx;
351 data->args.offset = pos; 319 data->args.offset = pos;
352 data->args.pgbase = pgbase; 320 data->args.pgbase = pgbase;
353 data->args.pages = &pages[curpage]; 321 data->args.pages = data->pagevec;
354 data->args.count = bytes; 322 data->args.count = bytes;
355 data->res.fattr = &data->fattr; 323 data->res.fattr = &data->fattr;
356 data->res.eof = 0; 324 data->res.eof = 0;
@@ -373,33 +341,35 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
373 bytes, 341 bytes,
374 (unsigned long long)data->args.offset); 342 (unsigned long long)data->args.offset);
375 343
344 started += bytes;
345 user_addr += bytes;
376 pos += bytes; 346 pos += bytes;
377 pgbase += bytes; 347 pgbase += bytes;
378 curpage += pgbase >> PAGE_SHIFT;
379 pgbase &= ~PAGE_MASK; 348 pgbase &= ~PAGE_MASK;
380 349
381 count -= bytes; 350 count -= bytes;
382 } while (count != 0); 351 } while (count != 0);
383 BUG_ON(!list_empty(list)); 352
353 if (put_dreq(dreq))
354 nfs_direct_complete(dreq);
355
356 if (started)
357 return 0;
358 return result < 0 ? (ssize_t) result : -EFAULT;
384} 359}
385 360
386static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) 361static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
387{ 362{
388 ssize_t result; 363 ssize_t result = 0;
389 sigset_t oldset; 364 sigset_t oldset;
390 struct inode *inode = iocb->ki_filp->f_mapping->host; 365 struct inode *inode = iocb->ki_filp->f_mapping->host;
391 struct rpc_clnt *clnt = NFS_CLIENT(inode); 366 struct rpc_clnt *clnt = NFS_CLIENT(inode);
392 struct nfs_direct_req *dreq; 367 struct nfs_direct_req *dreq;
393 368
394 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); 369 dreq = nfs_direct_req_alloc();
395 if (!dreq) 370 if (!dreq)
396 return -ENOMEM; 371 return -ENOMEM;
397 372
398 dreq->user_addr = user_addr;
399 dreq->user_count = count;
400 dreq->pos = pos;
401 dreq->pages = pages;
402 dreq->npages = nr_pages;
403 dreq->inode = inode; 373 dreq->inode = inode;
404 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 374 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
405 if (!is_sync_kiocb(iocb)) 375 if (!is_sync_kiocb(iocb))
@@ -407,8 +377,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
407 377
408 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); 378 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
409 rpc_clnt_sigmask(clnt, &oldset); 379 rpc_clnt_sigmask(clnt, &oldset);
410 nfs_direct_read_schedule(dreq); 380 result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
411 result = nfs_direct_wait(dreq); 381 if (!result)
382 result = nfs_direct_wait(dreq);
412 rpc_clnt_sigunmask(clnt, &oldset); 383 rpc_clnt_sigunmask(clnt, &oldset);
413 384
414 return result; 385 return result;
@@ -416,10 +387,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
416 387
417static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 388static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
418{ 389{
419 list_splice_init(&dreq->rewrite_list, &dreq->list); 390 while (!list_empty(&dreq->rewrite_list)) {
420 while (!list_empty(&dreq->list)) { 391 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
421 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
422 list_del(&data->pages); 392 list_del(&data->pages);
393 nfs_direct_release_pages(data->pagevec, data->npages);
423 nfs_writedata_release(data); 394 nfs_writedata_release(data);
424 } 395 }
425} 396}
@@ -427,14 +398,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
427#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 398#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
428static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 399static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
429{ 400{
430 struct list_head *pos; 401 struct inode *inode = dreq->inode;
402 struct list_head *p;
403 struct nfs_write_data *data;
431 404
432 list_splice_init(&dreq->rewrite_list, &dreq->list);
433 list_for_each(pos, &dreq->list)
434 dreq->outstanding++;
435 dreq->count = 0; 405 dreq->count = 0;
406 get_dreq(dreq);
407
408 list_for_each(p, &dreq->rewrite_list) {
409 data = list_entry(p, struct nfs_write_data, pages);
410
411 get_dreq(dreq);
412
413 /*
414 * Reset data->res.
415 */
416 nfs_fattr_init(&data->fattr);
417 data->res.count = data->args.count;
418 memset(&data->verf, 0, sizeof(data->verf));
419
420 /*
421 * Reuse data->task; data->args should not have changed
422 * since the original request was sent.
423 */
424 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
425 &nfs_write_direct_ops, data);
426 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
427
428 data->task.tk_priority = RPC_PRIORITY_NORMAL;
429 data->task.tk_cookie = (unsigned long) inode;
430
431 /*
432 * We're called via an RPC callback, so BKL is already held.
433 */
434 rpc_execute(&data->task);
435
436 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
437 data->task.tk_pid,
438 inode->i_sb->s_id,
439 (long long)NFS_FILEID(inode),
440 data->args.count,
441 (unsigned long long)data->args.offset);
442 }
436 443
437 nfs_direct_write_schedule(dreq, FLUSH_STABLE); 444 if (put_dreq(dreq))
445 nfs_direct_write_complete(dreq, inode);
438} 446}
439 447
440static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 448static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
@@ -471,8 +479,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
471 data->cred = dreq->ctx->cred; 479 data->cred = dreq->ctx->cred;
472 480
473 data->args.fh = NFS_FH(data->inode); 481 data->args.fh = NFS_FH(data->inode);
474 data->args.offset = dreq->pos; 482 data->args.offset = 0;
475 data->args.count = dreq->user_count; 483 data->args.count = 0;
476 data->res.count = 0; 484 data->res.count = 0;
477 data->res.fattr = &data->fattr; 485 data->res.fattr = &data->fattr;
478 data->res.verf = &data->verf; 486 data->res.verf = &data->verf;
@@ -534,47 +542,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
534} 542}
535#endif 543#endif
536 544
537static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
538{
539 struct list_head *list;
540 struct nfs_direct_req *dreq;
541 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
542
543 dreq = nfs_direct_req_alloc();
544 if (!dreq)
545 return NULL;
546
547 list = &dreq->list;
548 for(;;) {
549 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
550
551 if (unlikely(!data)) {
552 while (!list_empty(list)) {
553 data = list_entry(list->next,
554 struct nfs_write_data, pages);
555 list_del(&data->pages);
556 nfs_writedata_free(data);
557 }
558 kref_put(&dreq->kref, nfs_direct_req_release);
559 return NULL;
560 }
561
562 INIT_LIST_HEAD(&data->pages);
563 list_add(&data->pages, list);
564
565 data->req = (struct nfs_page *) dreq;
566 dreq->outstanding++;
567 if (nbytes <= wsize)
568 break;
569 nbytes -= wsize;
570 }
571
572 nfs_alloc_commit_data(dreq);
573
574 kref_get(&dreq->kref);
575 return dreq;
576}
577
578static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 545static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
579{ 546{
580 struct nfs_write_data *data = calldata; 547 struct nfs_write_data *data = calldata;
@@ -604,8 +571,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
604 } 571 }
605 } 572 }
606 } 573 }
607 /* In case we have to resend */
608 data->args.stable = NFS_FILE_SYNC;
609 574
610 spin_unlock(&dreq->lock); 575 spin_unlock(&dreq->lock);
611} 576}
@@ -619,14 +584,8 @@ static void nfs_direct_write_release(void *calldata)
619 struct nfs_write_data *data = calldata; 584 struct nfs_write_data *data = calldata;
620 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 585 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
621 586
622 spin_lock(&dreq->lock); 587 if (put_dreq(dreq))
623 if (--dreq->outstanding) { 588 nfs_direct_write_complete(dreq, data->inode);
624 spin_unlock(&dreq->lock);
625 return;
626 }
627 spin_unlock(&dreq->lock);
628
629 nfs_direct_write_complete(dreq, data->inode);
630} 589}
631 590
632static const struct rpc_call_ops nfs_write_direct_ops = { 591static const struct rpc_call_ops nfs_write_direct_ops = {
@@ -635,41 +594,62 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
635}; 594};
636 595
637/* 596/*
638 * For each nfs_write_data struct that was allocated on the list, dispatch 597 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
639 * an NFS WRITE operation 598 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
599 * bail and stop sending more writes. Write length accounting is
600 * handled automatically by nfs_direct_write_result(). Otherwise, if
601 * no requests have been sent, just return an error.
640 */ 602 */
641static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) 603static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
642{ 604{
643 struct nfs_open_context *ctx = dreq->ctx; 605 struct nfs_open_context *ctx = dreq->ctx;
644 struct inode *inode = ctx->dentry->d_inode; 606 struct inode *inode = ctx->dentry->d_inode;
645 struct list_head *list = &dreq->list;
646 struct page **pages = dreq->pages;
647 size_t count = dreq->user_count;
648 loff_t pos = dreq->pos;
649 size_t wsize = NFS_SERVER(inode)->wsize; 607 size_t wsize = NFS_SERVER(inode)->wsize;
650 unsigned int curpage, pgbase; 608 unsigned int wpages = nfs_max_pages(wsize);
609 unsigned int pgbase;
610 int result;
611 ssize_t started = 0;
651 612
652 curpage = 0; 613 get_dreq(dreq);
653 pgbase = dreq->user_addr & ~PAGE_MASK; 614
615 pgbase = user_addr & ~PAGE_MASK;
654 do { 616 do {
655 struct nfs_write_data *data; 617 struct nfs_write_data *data;
656 size_t bytes; 618 size_t bytes;
657 619
620 result = -ENOMEM;
621 data = nfs_writedata_alloc(wpages);
622 if (unlikely(!data))
623 break;
624
658 bytes = wsize; 625 bytes = wsize;
659 if (count < wsize) 626 if (count < wsize)
660 bytes = count; 627 bytes = count;
661 628
662 BUG_ON(list_empty(list)); 629 data->npages = nfs_direct_count_pages(user_addr, bytes);
663 data = list_entry(list->next, struct nfs_write_data, pages); 630 down_read(&current->mm->mmap_sem);
631 result = get_user_pages(current, current->mm, user_addr,
632 data->npages, 0, 0, data->pagevec, NULL);
633 up_read(&current->mm->mmap_sem);
634 if (unlikely(result < data->npages)) {
635 if (result > 0)
636 nfs_direct_release_pages(data->pagevec, result);
637 nfs_writedata_release(data);
638 break;
639 }
640
641 get_dreq(dreq);
642
664 list_move_tail(&data->pages, &dreq->rewrite_list); 643 list_move_tail(&data->pages, &dreq->rewrite_list);
665 644
645 data->req = (struct nfs_page *) dreq;
666 data->inode = inode; 646 data->inode = inode;
667 data->cred = ctx->cred; 647 data->cred = ctx->cred;
668 data->args.fh = NFS_FH(inode); 648 data->args.fh = NFS_FH(inode);
669 data->args.context = ctx; 649 data->args.context = ctx;
670 data->args.offset = pos; 650 data->args.offset = pos;
671 data->args.pgbase = pgbase; 651 data->args.pgbase = pgbase;
672 data->args.pages = &pages[curpage]; 652 data->args.pages = data->pagevec;
673 data->args.count = bytes; 653 data->args.count = bytes;
674 data->res.fattr = &data->fattr; 654 data->res.fattr = &data->fattr;
675 data->res.count = bytes; 655 data->res.count = bytes;
@@ -693,19 +673,26 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
693 bytes, 673 bytes,
694 (unsigned long long)data->args.offset); 674 (unsigned long long)data->args.offset);
695 675
676 started += bytes;
677 user_addr += bytes;
696 pos += bytes; 678 pos += bytes;
697 pgbase += bytes; 679 pgbase += bytes;
698 curpage += pgbase >> PAGE_SHIFT;
699 pgbase &= ~PAGE_MASK; 680 pgbase &= ~PAGE_MASK;
700 681
701 count -= bytes; 682 count -= bytes;
702 } while (count != 0); 683 } while (count != 0);
703 BUG_ON(!list_empty(list)); 684
685 if (put_dreq(dreq))
686 nfs_direct_write_complete(dreq, inode);
687
688 if (started)
689 return 0;
690 return result < 0 ? (ssize_t) result : -EFAULT;
704} 691}
705 692
706static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) 693static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
707{ 694{
708 ssize_t result; 695 ssize_t result = 0;
709 sigset_t oldset; 696 sigset_t oldset;
710 struct inode *inode = iocb->ki_filp->f_mapping->host; 697 struct inode *inode = iocb->ki_filp->f_mapping->host;
711 struct rpc_clnt *clnt = NFS_CLIENT(inode); 698 struct rpc_clnt *clnt = NFS_CLIENT(inode);
@@ -713,17 +700,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
713 size_t wsize = NFS_SERVER(inode)->wsize; 700 size_t wsize = NFS_SERVER(inode)->wsize;
714 int sync = 0; 701 int sync = 0;
715 702
716 dreq = nfs_direct_write_alloc(count, wsize); 703 dreq = nfs_direct_req_alloc();
717 if (!dreq) 704 if (!dreq)
718 return -ENOMEM; 705 return -ENOMEM;
706 nfs_alloc_commit_data(dreq);
707
719 if (dreq->commit_data == NULL || count < wsize) 708 if (dreq->commit_data == NULL || count < wsize)
720 sync = FLUSH_STABLE; 709 sync = FLUSH_STABLE;
721 710
722 dreq->user_addr = user_addr;
723 dreq->user_count = count;
724 dreq->pos = pos;
725 dreq->pages = pages;
726 dreq->npages = nr_pages;
727 dreq->inode = inode; 711 dreq->inode = inode;
728 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 712 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
729 if (!is_sync_kiocb(iocb)) 713 if (!is_sync_kiocb(iocb))
@@ -734,8 +718,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
734 nfs_begin_data_update(inode); 718 nfs_begin_data_update(inode);
735 719
736 rpc_clnt_sigmask(clnt, &oldset); 720 rpc_clnt_sigmask(clnt, &oldset);
737 nfs_direct_write_schedule(dreq, sync); 721 result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
738 result = nfs_direct_wait(dreq); 722 if (!result)
723 result = nfs_direct_wait(dreq);
739 rpc_clnt_sigunmask(clnt, &oldset); 724 rpc_clnt_sigunmask(clnt, &oldset);
740 725
741 return result; 726 return result;
@@ -765,8 +750,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
765ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 750ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
766{ 751{
767 ssize_t retval = -EINVAL; 752 ssize_t retval = -EINVAL;
768 int page_count;
769 struct page **pages;
770 struct file *file = iocb->ki_filp; 753 struct file *file = iocb->ki_filp;
771 struct address_space *mapping = file->f_mapping; 754 struct address_space *mapping = file->f_mapping;
772 755
@@ -788,14 +771,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count,
788 if (retval) 771 if (retval)
789 goto out; 772 goto out;
790 773
791 retval = nfs_get_user_pages(READ, (unsigned long) buf, 774 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
792 count, &pages);
793 if (retval < 0)
794 goto out;
795 page_count = retval;
796
797 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
798 pages, page_count);
799 if (retval > 0) 775 if (retval > 0)
800 iocb->ki_pos = pos + retval; 776 iocb->ki_pos = pos + retval;
801 777
@@ -831,8 +807,6 @@ out:
831ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) 807ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
832{ 808{
833 ssize_t retval; 809 ssize_t retval;
834 int page_count;
835 struct page **pages;
836 struct file *file = iocb->ki_filp; 810 struct file *file = iocb->ki_filp;
837 struct address_space *mapping = file->f_mapping; 811 struct address_space *mapping = file->f_mapping;
838 812
@@ -860,14 +834,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t
860 if (retval) 834 if (retval)
861 goto out; 835 goto out;
862 836
863 retval = nfs_get_user_pages(WRITE, (unsigned long) buf, 837 retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
864 count, &pages);
865 if (retval < 0)
866 goto out;
867 page_count = retval;
868
869 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
870 pos, pages, page_count);
871 838
872 /* 839 /*
873 * XXX: nfs_end_data_update() already ensures this file's 840 * XXX: nfs_end_data_update() already ensures this file's
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b4916b09219..e6ee97f19d8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3144,9 +3144,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
3144 default: 3144 default:
3145 BUG(); 3145 BUG();
3146 } 3146 }
3147 if (res < 0)
3148 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
3149 __FUNCTION__);
3150 return res; 3147 return res;
3151} 3148}
3152 3149
@@ -3258,8 +3255,6 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
3258 return ERR_PTR(-ENOMEM); 3255 return ERR_PTR(-ENOMEM);
3259 } 3256 }
3260 3257
3261 /* Unlock _before_ we do the RPC call */
3262 do_vfs_lock(fl->fl_file, fl);
3263 return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data); 3258 return rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data);
3264} 3259}
3265 3260
@@ -3270,30 +3265,28 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
3270 struct rpc_task *task; 3265 struct rpc_task *task;
3271 int status = 0; 3266 int status = 0;
3272 3267
3273 /* Is this a delegated lock? */
3274 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3275 goto out_unlock;
3276 /* Is this open_owner holding any locks on the server? */
3277 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
3278 goto out_unlock;
3279
3280 status = nfs4_set_lock_state(state, request); 3268 status = nfs4_set_lock_state(state, request);
3269 /* Unlock _before_ we do the RPC call */
3270 request->fl_flags |= FL_EXISTS;
3271 if (do_vfs_lock(request->fl_file, request) == -ENOENT)
3272 goto out;
3281 if (status != 0) 3273 if (status != 0)
3282 goto out_unlock; 3274 goto out;
3275 /* Is this a delegated lock? */
3276 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3277 goto out;
3283 lsp = request->fl_u.nfs4_fl.owner; 3278 lsp = request->fl_u.nfs4_fl.owner;
3284 status = -ENOMEM;
3285 seqid = nfs_alloc_seqid(&lsp->ls_seqid); 3279 seqid = nfs_alloc_seqid(&lsp->ls_seqid);
3280 status = -ENOMEM;
3286 if (seqid == NULL) 3281 if (seqid == NULL)
3287 goto out_unlock; 3282 goto out;
3288 task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid); 3283 task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid);
3289 status = PTR_ERR(task); 3284 status = PTR_ERR(task);
3290 if (IS_ERR(task)) 3285 if (IS_ERR(task))
3291 goto out_unlock; 3286 goto out;
3292 status = nfs4_wait_for_completion_rpc_task(task); 3287 status = nfs4_wait_for_completion_rpc_task(task);
3293 rpc_release_task(task); 3288 rpc_release_task(task);
3294 return status; 3289out:
3295out_unlock:
3296 do_vfs_lock(request->fl_file, request);
3297 return status; 3290 return status;
3298} 3291}
3299 3292
@@ -3461,10 +3454,10 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
3461 struct nfs4_exception exception = { }; 3454 struct nfs4_exception exception = { };
3462 int err; 3455 int err;
3463 3456
3464 /* Cache the lock if possible... */
3465 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3466 return 0;
3467 do { 3457 do {
3458 /* Cache the lock if possible... */
3459 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
3460 return 0;
3468 err = _nfs4_do_setlk(state, F_SETLK, request, 1); 3461 err = _nfs4_do_setlk(state, F_SETLK, request, 1);
3469 if (err != -NFS4ERR_DELAY) 3462 if (err != -NFS4ERR_DELAY)
3470 break; 3463 break;
@@ -3483,6 +3476,8 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
3483 if (err != 0) 3476 if (err != 0)
3484 return err; 3477 return err;
3485 do { 3478 do {
3479 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
3480 return 0;
3486 err = _nfs4_do_setlk(state, F_SETLK, request, 0); 3481 err = _nfs4_do_setlk(state, F_SETLK, request, 0);
3487 if (err != -NFS4ERR_DELAY) 3482 if (err != -NFS4ERR_DELAY)
3488 break; 3483 break;
@@ -3494,29 +3489,42 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
3494static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 3489static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3495{ 3490{
3496 struct nfs4_client *clp = state->owner->so_client; 3491 struct nfs4_client *clp = state->owner->so_client;
3492 unsigned char fl_flags = request->fl_flags;
3497 int status; 3493 int status;
3498 3494
3499 /* Is this a delegated open? */ 3495 /* Is this a delegated open? */
3500 if (NFS_I(state->inode)->delegation_state != 0) {
3501 /* Yes: cache locks! */
3502 status = do_vfs_lock(request->fl_file, request);
3503 /* ...but avoid races with delegation recall... */
3504 if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags))
3505 return status;
3506 }
3507 down_read(&clp->cl_sem);
3508 status = nfs4_set_lock_state(state, request); 3496 status = nfs4_set_lock_state(state, request);
3509 if (status != 0) 3497 if (status != 0)
3510 goto out; 3498 goto out;
3499 request->fl_flags |= FL_ACCESS;
3500 status = do_vfs_lock(request->fl_file, request);
3501 if (status < 0)
3502 goto out;
3503 down_read(&clp->cl_sem);
3504 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
3505 struct nfs_inode *nfsi = NFS_I(state->inode);
3506 /* Yes: cache locks! */
3507 down_read(&nfsi->rwsem);
3508 /* ...but avoid races with delegation recall... */
3509 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
3510 request->fl_flags = fl_flags & ~FL_SLEEP;
3511 status = do_vfs_lock(request->fl_file, request);
3512 up_read(&nfsi->rwsem);
3513 goto out_unlock;
3514 }
3515 up_read(&nfsi->rwsem);
3516 }
3511 status = _nfs4_do_setlk(state, cmd, request, 0); 3517 status = _nfs4_do_setlk(state, cmd, request, 0);
3512 if (status != 0) 3518 if (status != 0)
3513 goto out; 3519 goto out_unlock;
3514 /* Note: we always want to sleep here! */ 3520 /* Note: we always want to sleep here! */
3515 request->fl_flags |= FL_SLEEP; 3521 request->fl_flags = fl_flags | FL_SLEEP;
3516 if (do_vfs_lock(request->fl_file, request) < 0) 3522 if (do_vfs_lock(request->fl_file, request) < 0)
3517 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__); 3523 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
3518out: 3524out_unlock:
3519 up_read(&clp->cl_sem); 3525 up_read(&clp->cl_sem);
3526out:
3527 request->fl_flags = fl_flags;
3520 return status; 3528 return status;
3521} 3529}
3522 3530
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index bca5734ca9f..86bac6a5008 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -578,7 +578,7 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un
578 return ret; 578 return ret;
579} 579}
580 580
581static void nfs_cancel_requests(struct list_head *head) 581static void nfs_cancel_dirty_list(struct list_head *head)
582{ 582{
583 struct nfs_page *req; 583 struct nfs_page *req;
584 while(!list_empty(head)) { 584 while(!list_empty(head)) {
@@ -589,6 +589,19 @@ static void nfs_cancel_requests(struct list_head *head)
589 } 589 }
590} 590}
591 591
592static void nfs_cancel_commit_list(struct list_head *head)
593{
594 struct nfs_page *req;
595
596 while(!list_empty(head)) {
597 req = nfs_list_entry(head->next);
598 nfs_list_remove_request(req);
599 nfs_inode_remove_request(req);
600 nfs_clear_page_writeback(req);
601 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
602 }
603}
604
592/* 605/*
593 * nfs_scan_dirty - Scan an inode for dirty requests 606 * nfs_scan_dirty - Scan an inode for dirty requests
594 * @inode: NFS inode to scan 607 * @inode: NFS inode to scan
@@ -1381,6 +1394,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1381 nfs_list_remove_request(req); 1394 nfs_list_remove_request(req);
1382 nfs_mark_request_commit(req); 1395 nfs_mark_request_commit(req);
1383 nfs_clear_page_writeback(req); 1396 nfs_clear_page_writeback(req);
1397 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1384 } 1398 }
1385 return -ENOMEM; 1399 return -ENOMEM;
1386} 1400}
@@ -1499,7 +1513,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1499 if (pages != 0) { 1513 if (pages != 0) {
1500 spin_unlock(&nfsi->req_lock); 1514 spin_unlock(&nfsi->req_lock);
1501 if (how & FLUSH_INVALIDATE) 1515 if (how & FLUSH_INVALIDATE)
1502 nfs_cancel_requests(&head); 1516 nfs_cancel_dirty_list(&head);
1503 else 1517 else
1504 ret = nfs_flush_list(inode, &head, pages, how); 1518 ret = nfs_flush_list(inode, &head, pages, how);
1505 spin_lock(&nfsi->req_lock); 1519 spin_lock(&nfsi->req_lock);
@@ -1512,7 +1526,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1512 break; 1526 break;
1513 if (how & FLUSH_INVALIDATE) { 1527 if (how & FLUSH_INVALIDATE) {
1514 spin_unlock(&nfsi->req_lock); 1528 spin_unlock(&nfsi->req_lock);
1515 nfs_cancel_requests(&head); 1529 nfs_cancel_commit_list(&head);
1516 spin_lock(&nfsi->req_lock); 1530 spin_lock(&nfsi->req_lock);
1517 continue; 1531 continue;
1518 } 1532 }
diff --git a/include/asm-arm/arch-at91rm9200/irqs.h b/include/asm-arm/arch-at91rm9200/irqs.h
index 2dc93b174a8..f63842c2c09 100644
--- a/include/asm-arm/arch-at91rm9200/irqs.h
+++ b/include/asm-arm/arch-at91rm9200/irqs.h
@@ -39,12 +39,4 @@
39 */ 39 */
40#define NR_IRQS (NR_AIC_IRQS + (4 * 32)) 40#define NR_IRQS (NR_AIC_IRQS + (4 * 32))
41 41
42
43#ifndef __ASSEMBLY__
44/*
45 * Initialize the IRQ controller.
46 */
47extern void at91rm9200_init_irq(unsigned int priority[]);
48#endif
49
50#endif 42#endif
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index a21185d4788..31080448520 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -43,6 +43,7 @@ typedef u64 cputime64_t;
43 43
44#define cputime64_zero ((cputime64_t)0) 44#define cputime64_zero ((cputime64_t)0)
45#define cputime64_add(__a, __b) ((__a) + (__b)) 45#define cputime64_add(__a, __b) ((__a) + (__b))
46#define cputime64_sub(__a, __b) ((__a) - (__b))
46#define cputime_to_cputime64(__ct) (__ct) 47#define cputime_to_cputime64(__ct) (__ct)
47 48
48#ifdef __KERNEL__ 49#ifdef __KERNEL__
@@ -74,6 +75,23 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
74 return ct; 75 return ct;
75} 76}
76 77
78static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
79{
80 cputime_t ct;
81 u64 sec;
82
83 /* have to be a little careful about overflow */
84 ct = jif % HZ;
85 sec = jif / HZ;
86 if (ct) {
87 ct *= tb_ticks_per_sec;
88 do_div(ct, HZ);
89 }
90 if (sec)
91 ct += (cputime_t) sec * tb_ticks_per_sec;
92 return ct;
93}
94
77static inline u64 cputime64_to_jiffies64(const cputime_t ct) 95static inline u64 cputime64_to_jiffies64(const cputime_t ct)
78{ 96{
79 return mulhdu(ct, __cputime_jiffies_factor); 97 return mulhdu(ct, __cputime_jiffies_factor);
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 0f5b89c9323..27c46fbeebd 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -160,6 +160,20 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
160 BUG(); 160 BUG();
161} 161}
162 162
163static inline void
164dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
165 enum dma_data_direction direction)
166{
167 BUG();
168}
169
170static inline void
171dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
172 enum dma_data_direction direction)
173{
174 BUG();
175}
176
163#endif /* PCI */ 177#endif /* PCI */
164 178
165 179
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 134b3206824..43aef9b230f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -716,6 +716,7 @@ extern spinlock_t files_lock;
716#define FL_POSIX 1 716#define FL_POSIX 1
717#define FL_FLOCK 2 717#define FL_FLOCK 2
718#define FL_ACCESS 8 /* not trying to lock, just looking */ 718#define FL_ACCESS 8 /* not trying to lock, just looking */
719#define FL_EXISTS 16 /* when unlocking, test for existence */
719#define FL_LEASE 32 /* lease held on this file */ 720#define FL_LEASE 32 /* lease held on this file */
720#define FL_CLOSE 64 /* unlock on close */ 721#define FL_CLOSE 64 /* unlock on close */
721#define FL_SLEEP 128 /* A blocking lock */ 722#define FL_SLEEP 128 /* A blocking lock */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f4284bf8975..6cc497a2b6d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -131,6 +131,7 @@ enum {
131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
132 132
133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
134 ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */
134 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 135 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
135 136
136 ATA_DFLAG_DETACH = (1 << 16), 137 ATA_DFLAG_DETACH = (1 << 16),
@@ -160,22 +161,28 @@ enum {
160 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 161 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
161 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 162 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
162 * Register FIS clearing BSY */ 163 * Register FIS clearing BSY */
163
164 ATA_FLAG_DEBUGMSG = (1 << 13), 164 ATA_FLAG_DEBUGMSG = (1 << 13),
165 ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
166 165
167 ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */ 166 /* The following flag belongs to ap->pflags but is kept in
168 ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */ 167 * ap->flags because it's referenced in many LLDs and will be
169 ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */ 168 * removed in not-too-distant future.
170 ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */ 169 */
171 ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */ 170 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
172 ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */ 171
173 ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */ 172 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
174 173
175 ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */ 174 /* struct ata_port pflags */
176 ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */ 175 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
176 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
177 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
178 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
179 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
180 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
181 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
177 182
178 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ 183 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
184 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
185 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
179 186
180 /* struct ata_queued_cmd flags */ 187 /* struct ata_queued_cmd flags */
181 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 188 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
@@ -248,12 +255,19 @@ enum {
248 ATA_EH_REVALIDATE = (1 << 0), 255 ATA_EH_REVALIDATE = (1 << 0),
249 ATA_EH_SOFTRESET = (1 << 1), 256 ATA_EH_SOFTRESET = (1 << 1),
250 ATA_EH_HARDRESET = (1 << 2), 257 ATA_EH_HARDRESET = (1 << 2),
258 ATA_EH_SUSPEND = (1 << 3),
259 ATA_EH_RESUME = (1 << 4),
260 ATA_EH_PM_FREEZE = (1 << 5),
251 261
252 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 262 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
253 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, 263 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
264 ATA_EH_RESUME | ATA_EH_PM_FREEZE,
254 265
255 /* ata_eh_info->flags */ 266 /* ata_eh_info->flags */
256 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */
269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
270 ATA_EHI_QUIET = (1 << 3), /* be quiet */
257 271
258 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
259 273
@@ -486,6 +500,7 @@ struct ata_port {
486 const struct ata_port_operations *ops; 500 const struct ata_port_operations *ops;
487 spinlock_t *lock; 501 spinlock_t *lock;
488 unsigned long flags; /* ATA_FLAG_xxx */ 502 unsigned long flags; /* ATA_FLAG_xxx */
503 unsigned int pflags; /* ATA_PFLAG_xxx */
489 unsigned int id; /* unique id req'd by scsi midlyr */ 504 unsigned int id; /* unique id req'd by scsi midlyr */
490 unsigned int port_no; /* unique port #; from zero */ 505 unsigned int port_no; /* unique port #; from zero */
491 unsigned int hard_port_no; /* hardware port #; from zero */ 506 unsigned int hard_port_no; /* hardware port #; from zero */
@@ -535,6 +550,9 @@ struct ata_port {
535 struct list_head eh_done_q; 550 struct list_head eh_done_q;
536 wait_queue_head_t eh_wait_q; 551 wait_queue_head_t eh_wait_q;
537 552
553 pm_message_t pm_mesg;
554 int *pm_result;
555
538 void *private_data; 556 void *private_data;
539 557
540 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 558 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
@@ -589,6 +607,9 @@ struct ata_port_operations {
589 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 607 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
590 u32 val); 608 u32 val);
591 609
610 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
611 int (*port_resume) (struct ata_port *ap);
612
592 int (*port_start) (struct ata_port *ap); 613 int (*port_start) (struct ata_port *ap);
593 void (*port_stop) (struct ata_port *ap); 614 void (*port_stop) (struct ata_port *ap);
594 615
@@ -622,9 +643,18 @@ struct ata_timing {
622 643
623#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 644#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
624 645
625extern const unsigned long sata_deb_timing_boot[]; 646extern const unsigned long sata_deb_timing_normal[];
626extern const unsigned long sata_deb_timing_eh[]; 647extern const unsigned long sata_deb_timing_hotplug[];
627extern const unsigned long sata_deb_timing_before_fsrst[]; 648extern const unsigned long sata_deb_timing_long[];
649
650static inline const unsigned long *
651sata_ehc_deb_timing(struct ata_eh_context *ehc)
652{
653 if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
654 return sata_deb_timing_hotplug;
655 else
656 return sata_deb_timing_normal;
657}
628 658
629extern void ata_port_probe(struct ata_port *); 659extern void ata_port_probe(struct ata_port *);
630extern void __sata_phy_reset(struct ata_port *ap); 660extern void __sata_phy_reset(struct ata_port *ap);
@@ -644,6 +674,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
644extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 674extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
645 unsigned int n_ports); 675 unsigned int n_ports);
646extern void ata_pci_remove_one (struct pci_dev *pdev); 676extern void ata_pci_remove_one (struct pci_dev *pdev);
677extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
678extern void ata_pci_device_do_resume(struct pci_dev *pdev);
647extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); 679extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
648extern int ata_pci_device_resume(struct pci_dev *pdev); 680extern int ata_pci_device_resume(struct pci_dev *pdev);
649extern int ata_pci_clear_simplex(struct pci_dev *pdev); 681extern int ata_pci_clear_simplex(struct pci_dev *pdev);
@@ -664,8 +696,9 @@ extern int ata_port_online(struct ata_port *ap);
664extern int ata_port_offline(struct ata_port *ap); 696extern int ata_port_offline(struct ata_port *ap);
665extern int ata_scsi_device_resume(struct scsi_device *); 697extern int ata_scsi_device_resume(struct scsi_device *);
666extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 698extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
667extern int ata_device_resume(struct ata_device *); 699extern int ata_host_set_suspend(struct ata_host_set *host_set,
668extern int ata_device_suspend(struct ata_device *, pm_message_t state); 700 pm_message_t mesg);
701extern void ata_host_set_resume(struct ata_host_set *host_set);
669extern int ata_ratelimit(void); 702extern int ata_ratelimit(void);
670extern unsigned int ata_busy_sleep(struct ata_port *ap, 703extern unsigned int ata_busy_sleep(struct ata_port *ap,
671 unsigned long timeout_pat, 704 unsigned long timeout_pat,
@@ -825,19 +858,24 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
825 (ehi)->desc_len = 0; \ 858 (ehi)->desc_len = 0; \
826} while (0) 859} while (0)
827 860
828static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 861static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
829{ 862{
830 if (ehi->flags & ATA_EHI_HOTPLUGGED) 863 if (ehi->flags & ATA_EHI_HOTPLUGGED)
831 return; 864 return;
832 865
833 ehi->flags |= ATA_EHI_HOTPLUGGED; 866 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
834 ehi->hotplug_timestamp = jiffies; 867 ehi->hotplug_timestamp = jiffies;
835 868
836 ehi->err_mask |= AC_ERR_ATA_BUS;
837 ehi->action |= ATA_EH_SOFTRESET; 869 ehi->action |= ATA_EH_SOFTRESET;
838 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 870 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
839} 871}
840 872
873static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
874{
875 __ata_ehi_hotplugged(ehi);
876 ehi->err_mask |= AC_ERR_ATA_BUS;
877}
878
841/* 879/*
842 * qc helpers 880 * qc helpers
843 */ 881 */
@@ -921,6 +959,11 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
921 return ata_class_absent(dev->class); 959 return ata_class_absent(dev->class);
922} 960}
923 961
962static inline unsigned int ata_dev_ready(const struct ata_device *dev)
963{
964 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
965}
966
924/* 967/*
925 * port helpers 968 * port helpers
926 */ 969 */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7c7320fa51a..2d3fb6416d9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -729,6 +729,7 @@ struct nfs_read_data {
729 struct list_head pages; /* Coalesced read requests */ 729 struct list_head pages; /* Coalesced read requests */
730 struct nfs_page *req; /* multi ops per nfs_page */ 730 struct nfs_page *req; /* multi ops per nfs_page */
731 struct page **pagevec; 731 struct page **pagevec;
732 unsigned int npages; /* active pages in pagevec */
732 struct nfs_readargs args; 733 struct nfs_readargs args;
733 struct nfs_readres res; 734 struct nfs_readres res;
734#ifdef CONFIG_NFS_V4 735#ifdef CONFIG_NFS_V4
@@ -747,6 +748,7 @@ struct nfs_write_data {
747 struct list_head pages; /* Coalesced requests we wish to flush */ 748 struct list_head pages; /* Coalesced requests we wish to flush */
748 struct nfs_page *req; /* multi ops per nfs_page */ 749 struct nfs_page *req; /* multi ops per nfs_page */
749 struct page **pagevec; 750 struct page **pagevec;
751 unsigned int npages; /* active pages in pagevec */
750 struct nfs_writeargs args; /* argument struct */ 752 struct nfs_writeargs args; /* argument struct */
751 struct nfs_writeres res; /* result struct */ 753 struct nfs_writeres res; /* result struct */
752#ifdef CONFIG_NFS_V4 754#ifdef CONFIG_NFS_V4
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 685081c0134..c09396d2c77 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2019,6 +2019,13 @@
2019#define PCI_VENDOR_ID_TDI 0x192E 2019#define PCI_VENDOR_ID_TDI 0x192E
2020#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2020#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2021 2021
2022#define PCI_VENDOR_ID_JMICRON 0x197B
2023#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
2024#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
2025#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
2026#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
2027#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
2028#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
2022 2029
2023#define PCI_VENDOR_ID_TEKRAM 0x1de1 2030#define PCI_VENDOR_ID_TEKRAM 0x1de1
2024#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2031#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5b9397b3323..599423cc9d0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -250,15 +250,17 @@ tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
250 RTA_PUT(skb, a->order, 0, NULL); 250 RTA_PUT(skb, a->order, 0, NULL);
251 err = tcf_action_dump_1(skb, a, bind, ref); 251 err = tcf_action_dump_1(skb, a, bind, ref);
252 if (err < 0) 252 if (err < 0)
253 goto rtattr_failure; 253 goto errout;
254 r->rta_len = skb->tail - (u8*)r; 254 r->rta_len = skb->tail - (u8*)r;
255 } 255 }
256 256
257 return 0; 257 return 0;
258 258
259rtattr_failure: 259rtattr_failure:
260 err = -EINVAL;
261errout:
260 skb_trim(skb, b - skb->data); 262 skb_trim(skb, b - skb->data);
261 return -err; 263 return err;
262} 264}
263 265
264struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, 266struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
@@ -305,6 +307,7 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
305 goto err_mod; 307 goto err_mod;
306 } 308 }
307#endif 309#endif
310 *err = -ENOENT;
308 goto err_out; 311 goto err_out;
309 } 312 }
310 313
@@ -776,7 +779,7 @@ replay:
776 return ret; 779 return ret;
777} 780}
778 781
779static char * 782static struct rtattr *
780find_dump_kind(struct nlmsghdr *n) 783find_dump_kind(struct nlmsghdr *n)
781{ 784{
782 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1]; 785 struct rtattr *tb1, *tb2[TCA_ACT_MAX+1];
@@ -804,7 +807,7 @@ find_dump_kind(struct nlmsghdr *n)
804 return NULL; 807 return NULL;
805 kind = tb2[TCA_ACT_KIND-1]; 808 kind = tb2[TCA_ACT_KIND-1];
806 809
807 return (char *) RTA_DATA(kind); 810 return kind;
808} 811}
809 812
810static int 813static int
@@ -817,16 +820,15 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
817 struct tc_action a; 820 struct tc_action a;
818 int ret = 0; 821 int ret = 0;
819 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 822 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
820 char *kind = find_dump_kind(cb->nlh); 823 struct rtattr *kind = find_dump_kind(cb->nlh);
821 824
822 if (kind == NULL) { 825 if (kind == NULL) {
823 printk("tc_dump_action: action bad kind\n"); 826 printk("tc_dump_action: action bad kind\n");
824 return 0; 827 return 0;
825 } 828 }
826 829
827 a_o = tc_lookup_action_n(kind); 830 a_o = tc_lookup_action(kind);
828 if (a_o == NULL) { 831 if (a_o == NULL) {
829 printk("failed to find %s\n", kind);
830 return 0; 832 return 0;
831 } 833 }
832 834
@@ -834,7 +836,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
834 a.ops = a_o; 836 a.ops = a_o;
835 837
836 if (a_o->walk == NULL) { 838 if (a_o->walk == NULL) {
837 printk("tc_dump_action: %s !capable of dumping table\n", kind); 839 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
838 goto rtattr_failure; 840 goto rtattr_failure;
839 } 841 }
840 842
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 49174f0d0a3..6ac45103a27 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -191,7 +191,6 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
191 do { 191 do {
192 /* Are any pointers crossing a page boundary? */ 192 /* Are any pointers crossing a page boundary? */
193 if (pgto_base == 0) { 193 if (pgto_base == 0) {
194 flush_dcache_page(*pgto);
195 pgto_base = PAGE_CACHE_SIZE; 194 pgto_base = PAGE_CACHE_SIZE;
196 pgto--; 195 pgto--;
197 } 196 }
@@ -211,11 +210,11 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
211 vto = kmap_atomic(*pgto, KM_USER0); 210 vto = kmap_atomic(*pgto, KM_USER0);
212 vfrom = kmap_atomic(*pgfrom, KM_USER1); 211 vfrom = kmap_atomic(*pgfrom, KM_USER1);
213 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 212 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
213 flush_dcache_page(*pgto);
214 kunmap_atomic(vfrom, KM_USER1); 214 kunmap_atomic(vfrom, KM_USER1);
215 kunmap_atomic(vto, KM_USER0); 215 kunmap_atomic(vto, KM_USER0);
216 216
217 } while ((len -= copy) != 0); 217 } while ((len -= copy) != 0);
218 flush_dcache_page(*pgto);
219} 218}
220 219
221/* 220/*