diff options
Diffstat (limited to 'drivers')
187 files changed, 7096 insertions, 6379 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8a70a9edabda..6b658d84d521 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -48,8 +48,6 @@ config CRYPTO_DEV_PADLOCK_SHA | |||
48 | If unsure say M. The compiled module will be | 48 | If unsure say M. The compiled module will be |
49 | called padlock-sha.ko | 49 | called padlock-sha.ko |
50 | 50 | ||
51 | source "arch/s390/crypto/Kconfig" | ||
52 | |||
53 | config CRYPTO_DEV_GEODE | 51 | config CRYPTO_DEV_GEODE |
54 | tristate "Support for the Geode LX AES engine" | 52 | tristate "Support for the Geode LX AES engine" |
55 | depends on X86_32 && PCI | 53 | depends on X86_32 && PCI |
@@ -83,6 +81,67 @@ config ZCRYPT_MONOLITHIC | |||
83 | that contains all parts of the crypto device driver (ap bus, | 81 | that contains all parts of the crypto device driver (ap bus, |
84 | request router and all the card drivers). | 82 | request router and all the card drivers). |
85 | 83 | ||
84 | config CRYPTO_SHA1_S390 | ||
85 | tristate "SHA1 digest algorithm" | ||
86 | depends on S390 | ||
87 | select CRYPTO_ALGAPI | ||
88 | help | ||
89 | This is the s390 hardware accelerated implementation of the | ||
90 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | ||
91 | |||
92 | config CRYPTO_SHA256_S390 | ||
93 | tristate "SHA256 digest algorithm" | ||
94 | depends on S390 | ||
95 | select CRYPTO_ALGAPI | ||
96 | help | ||
97 | This is the s390 hardware accelerated implementation of the | ||
98 | SHA256 secure hash standard (DFIPS 180-2). | ||
99 | |||
100 | This version of SHA implements a 256 bit hash with 128 bits of | ||
101 | security against collision attacks. | ||
102 | |||
103 | config CRYPTO_DES_S390 | ||
104 | tristate "DES and Triple DES cipher algorithms" | ||
105 | depends on S390 | ||
106 | select CRYPTO_ALGAPI | ||
107 | select CRYPTO_BLKCIPHER | ||
108 | help | ||
109 | This us the s390 hardware accelerated implementation of the | ||
110 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | ||
111 | |||
112 | config CRYPTO_AES_S390 | ||
113 | tristate "AES cipher algorithms" | ||
114 | depends on S390 | ||
115 | select CRYPTO_ALGAPI | ||
116 | select CRYPTO_BLKCIPHER | ||
117 | help | ||
118 | This is the s390 hardware accelerated implementation of the | ||
119 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | ||
120 | algorithm. | ||
121 | |||
122 | Rijndael appears to be consistently a very good performer in | ||
123 | both hardware and software across a wide range of computing | ||
124 | environments regardless of its use in feedback or non-feedback | ||
125 | modes. Its key setup time is excellent, and its key agility is | ||
126 | good. Rijndael's very low memory requirements make it very well | ||
127 | suited for restricted-space environments, in which it also | ||
128 | demonstrates excellent performance. Rijndael's operations are | ||
129 | among the easiest to defend against power and timing attacks. | ||
130 | |||
131 | On s390 the System z9-109 currently only supports the key size | ||
132 | of 128 bit. | ||
133 | |||
134 | config S390_PRNG | ||
135 | tristate "Pseudo random number generator device driver" | ||
136 | depends on S390 | ||
137 | default "m" | ||
138 | help | ||
139 | Select this option if you want to use the s390 pseudo random number | ||
140 | generator. The PRNG is part of the cryptographic processor functions | ||
141 | and uses triple-DES to generate secure random numbers like the | ||
142 | ANSI X9.17 standard. The PRNG is usable via the char device | ||
143 | /dev/prandom. | ||
144 | |||
86 | config CRYPTO_DEV_HIFN_795X | 145 | config CRYPTO_DEV_HIFN_795X |
87 | tristate "Driver HIFN 795x crypto accelerator chips" | 146 | tristate "Driver HIFN 795x crypto accelerator chips" |
88 | select CRYPTO_DES | 147 | select CRYPTO_DES |
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index a37cb6b8593c..35812823787b 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* ------------------------------------------------------------------------- */ | 1 | /* ------------------------------------------------------------------------- |
2 | /* i2c-algo-bit.c i2c driver algorithms for bit-shift adapters */ | 2 | * i2c-algo-bit.c i2c driver algorithms for bit-shift adapters |
3 | /* ------------------------------------------------------------------------- */ | 3 | * ------------------------------------------------------------------------- |
4 | /* Copyright (C) 1995-2000 Simon G. Vogl | 4 | * Copyright (C) 1995-2000 Simon G. Vogl |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify | 6 | This program is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published by | 7 | it under the terms of the GNU General Public License as published by |
@@ -15,8 +15,8 @@ | |||
15 | 15 | ||
16 | You should have received a copy of the GNU General Public License | 16 | You should have received a copy of the GNU General Public License |
17 | along with this program; if not, write to the Free Software | 17 | along with this program; if not, write to the Free Software |
18 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ | 18 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | /* ------------------------------------------------------------------------- */ | 19 | * ------------------------------------------------------------------------- */ |
20 | 20 | ||
21 | /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki | 21 | /* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki |
22 | <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */ | 22 | <kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */ |
@@ -60,26 +60,26 @@ MODULE_PARM_DESC(i2c_debug, | |||
60 | 60 | ||
61 | /* --- setting states on the bus with the right timing: --------------- */ | 61 | /* --- setting states on the bus with the right timing: --------------- */ |
62 | 62 | ||
63 | #define setsda(adap,val) adap->setsda(adap->data, val) | 63 | #define setsda(adap, val) adap->setsda(adap->data, val) |
64 | #define setscl(adap,val) adap->setscl(adap->data, val) | 64 | #define setscl(adap, val) adap->setscl(adap->data, val) |
65 | #define getsda(adap) adap->getsda(adap->data) | 65 | #define getsda(adap) adap->getsda(adap->data) |
66 | #define getscl(adap) adap->getscl(adap->data) | 66 | #define getscl(adap) adap->getscl(adap->data) |
67 | 67 | ||
68 | static inline void sdalo(struct i2c_algo_bit_data *adap) | 68 | static inline void sdalo(struct i2c_algo_bit_data *adap) |
69 | { | 69 | { |
70 | setsda(adap,0); | 70 | setsda(adap, 0); |
71 | udelay((adap->udelay + 1) / 2); | 71 | udelay((adap->udelay + 1) / 2); |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline void sdahi(struct i2c_algo_bit_data *adap) | 74 | static inline void sdahi(struct i2c_algo_bit_data *adap) |
75 | { | 75 | { |
76 | setsda(adap,1); | 76 | setsda(adap, 1); |
77 | udelay((adap->udelay + 1) / 2); | 77 | udelay((adap->udelay + 1) / 2); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline void scllo(struct i2c_algo_bit_data *adap) | 80 | static inline void scllo(struct i2c_algo_bit_data *adap) |
81 | { | 81 | { |
82 | setscl(adap,0); | 82 | setscl(adap, 0); |
83 | udelay(adap->udelay / 2); | 83 | udelay(adap->udelay / 2); |
84 | } | 84 | } |
85 | 85 | ||
@@ -91,22 +91,21 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
91 | { | 91 | { |
92 | unsigned long start; | 92 | unsigned long start; |
93 | 93 | ||
94 | setscl(adap,1); | 94 | setscl(adap, 1); |
95 | 95 | ||
96 | /* Not all adapters have scl sense line... */ | 96 | /* Not all adapters have scl sense line... */ |
97 | if (!adap->getscl) | 97 | if (!adap->getscl) |
98 | goto done; | 98 | goto done; |
99 | 99 | ||
100 | start=jiffies; | 100 | start = jiffies; |
101 | while (! getscl(adap) ) { | 101 | while (!getscl(adap)) { |
102 | /* the hw knows how to read the clock line, | 102 | /* This hw knows how to read the clock line, so we wait |
103 | * so we wait until it actually gets high. | 103 | * until it actually gets high. This is safer as some |
104 | * This is safer as some chips may hold it low | 104 | * chips may hold it low ("clock stretching") while they |
105 | * while they are processing data internally. | 105 | * are processing data internally. |
106 | */ | 106 | */ |
107 | if (time_after_eq(jiffies, start+adap->timeout)) { | 107 | if (time_after_eq(jiffies, start + adap->timeout)) |
108 | return -ETIMEDOUT; | 108 | return -ETIMEDOUT; |
109 | } | ||
110 | cond_resched(); | 109 | cond_resched(); |
111 | } | 110 | } |
112 | #ifdef DEBUG | 111 | #ifdef DEBUG |
@@ -118,11 +117,11 @@ static int sclhi(struct i2c_algo_bit_data *adap) | |||
118 | done: | 117 | done: |
119 | udelay(adap->udelay); | 118 | udelay(adap->udelay); |
120 | return 0; | 119 | return 0; |
121 | } | 120 | } |
122 | 121 | ||
123 | 122 | ||
124 | /* --- other auxiliary functions -------------------------------------- */ | 123 | /* --- other auxiliary functions -------------------------------------- */ |
125 | static void i2c_start(struct i2c_algo_bit_data *adap) | 124 | static void i2c_start(struct i2c_algo_bit_data *adap) |
126 | { | 125 | { |
127 | /* assert: scl, sda are high */ | 126 | /* assert: scl, sda are high */ |
128 | setsda(adap, 0); | 127 | setsda(adap, 0); |
@@ -130,7 +129,7 @@ static void i2c_start(struct i2c_algo_bit_data *adap) | |||
130 | scllo(adap); | 129 | scllo(adap); |
131 | } | 130 | } |
132 | 131 | ||
133 | static void i2c_repstart(struct i2c_algo_bit_data *adap) | 132 | static void i2c_repstart(struct i2c_algo_bit_data *adap) |
134 | { | 133 | { |
135 | /* assert: scl is low */ | 134 | /* assert: scl is low */ |
136 | sdahi(adap); | 135 | sdahi(adap); |
@@ -141,18 +140,18 @@ static void i2c_repstart(struct i2c_algo_bit_data *adap) | |||
141 | } | 140 | } |
142 | 141 | ||
143 | 142 | ||
144 | static void i2c_stop(struct i2c_algo_bit_data *adap) | 143 | static void i2c_stop(struct i2c_algo_bit_data *adap) |
145 | { | 144 | { |
146 | /* assert: scl is low */ | 145 | /* assert: scl is low */ |
147 | sdalo(adap); | 146 | sdalo(adap); |
148 | sclhi(adap); | 147 | sclhi(adap); |
149 | setsda(adap, 1); | 148 | setsda(adap, 1); |
150 | udelay(adap->udelay); | 149 | udelay(adap->udelay); |
151 | } | 150 | } |
152 | 151 | ||
153 | 152 | ||
154 | 153 | ||
155 | /* send a byte without start cond., look for arbitration, | 154 | /* send a byte without start cond., look for arbitration, |
156 | check ackn. from slave */ | 155 | check ackn. from slave */ |
157 | /* returns: | 156 | /* returns: |
158 | * 1 if the device acknowledged | 157 | * 1 if the device acknowledged |
@@ -167,27 +166,33 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
167 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; | 166 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
168 | 167 | ||
169 | /* assert: scl is low */ | 168 | /* assert: scl is low */ |
170 | for ( i=7 ; i>=0 ; i-- ) { | 169 | for (i = 7; i >= 0; i--) { |
171 | sb = (c >> i) & 1; | 170 | sb = (c >> i) & 1; |
172 | setsda(adap,sb); | 171 | setsda(adap, sb); |
173 | udelay((adap->udelay + 1) / 2); | 172 | udelay((adap->udelay + 1) / 2); |
174 | if (sclhi(adap)<0) { /* timed out */ | 173 | if (sclhi(adap) < 0) { /* timed out */ |
175 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 174 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " |
176 | "timeout at bit #%d\n", (int)c, i); | 175 | "timeout at bit #%d\n", (int)c, i); |
177 | return -ETIMEDOUT; | 176 | return -ETIMEDOUT; |
178 | }; | 177 | } |
179 | /* do arbitration here: | 178 | /* FIXME do arbitration here: |
180 | * if ( sb && ! getsda(adap) ) -> ouch! Get out of here. | 179 | * if (sb && !getsda(adap)) -> ouch! Get out of here. |
180 | * | ||
181 | * Report a unique code, so higher level code can retry | ||
182 | * the whole (combined) message and *NOT* issue STOP. | ||
181 | */ | 183 | */ |
182 | scllo(adap); | 184 | scllo(adap); |
183 | } | 185 | } |
184 | sdahi(adap); | 186 | sdahi(adap); |
185 | if (sclhi(adap)<0){ /* timeout */ | 187 | if (sclhi(adap) < 0) { /* timeout */ |
186 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " | 188 | bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " |
187 | "timeout at ack\n", (int)c); | 189 | "timeout at ack\n", (int)c); |
188 | return -ETIMEDOUT; | 190 | return -ETIMEDOUT; |
189 | }; | 191 | } |
190 | /* read ack: SDA should be pulled down by slave */ | 192 | |
193 | /* read ack: SDA should be pulled down by slave, or it may | ||
194 | * NAK (usually to report problems with the data we wrote). | ||
195 | */ | ||
191 | ack = !getsda(adap); /* ack: sda is pulled low -> success */ | 196 | ack = !getsda(adap); /* ack: sda is pulled low -> success */ |
192 | bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c, | 197 | bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c, |
193 | ack ? "A" : "NA"); | 198 | ack ? "A" : "NA"); |
@@ -198,24 +203,24 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) | |||
198 | } | 203 | } |
199 | 204 | ||
200 | 205 | ||
201 | static int i2c_inb(struct i2c_adapter *i2c_adap) | 206 | static int i2c_inb(struct i2c_adapter *i2c_adap) |
202 | { | 207 | { |
203 | /* read byte via i2c port, without start/stop sequence */ | 208 | /* read byte via i2c port, without start/stop sequence */ |
204 | /* acknowledge is sent in i2c_read. */ | 209 | /* acknowledge is sent in i2c_read. */ |
205 | int i; | 210 | int i; |
206 | unsigned char indata=0; | 211 | unsigned char indata = 0; |
207 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; | 212 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
208 | 213 | ||
209 | /* assert: scl is low */ | 214 | /* assert: scl is low */ |
210 | sdahi(adap); | 215 | sdahi(adap); |
211 | for (i=0;i<8;i++) { | 216 | for (i = 0; i < 8; i++) { |
212 | if (sclhi(adap)<0) { /* timeout */ | 217 | if (sclhi(adap) < 0) { /* timeout */ |
213 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " | 218 | bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " |
214 | "#%d\n", 7 - i); | 219 | "#%d\n", 7 - i); |
215 | return -ETIMEDOUT; | 220 | return -ETIMEDOUT; |
216 | }; | 221 | } |
217 | indata *= 2; | 222 | indata *= 2; |
218 | if ( getsda(adap) ) | 223 | if (getsda(adap)) |
219 | indata |= 0x01; | 224 | indata |= 0x01; |
220 | setscl(adap, 0); | 225 | setscl(adap, 0); |
221 | udelay(i == 7 ? adap->udelay / 2 : adap->udelay); | 226 | udelay(i == 7 ? adap->udelay / 2 : adap->udelay); |
@@ -228,66 +233,67 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) | |||
228 | * Sanity check for the adapter hardware - check the reaction of | 233 | * Sanity check for the adapter hardware - check the reaction of |
229 | * the bus lines only if it seems to be idle. | 234 | * the bus lines only if it seems to be idle. |
230 | */ | 235 | */ |
231 | static int test_bus(struct i2c_algo_bit_data *adap, char* name) { | 236 | static int test_bus(struct i2c_algo_bit_data *adap, char *name) |
232 | int scl,sda; | 237 | { |
238 | int scl, sda; | ||
233 | 239 | ||
234 | if (adap->getscl==NULL) | 240 | if (adap->getscl == NULL) |
235 | pr_info("%s: Testing SDA only, SCL is not readable\n", name); | 241 | pr_info("%s: Testing SDA only, SCL is not readable\n", name); |
236 | 242 | ||
237 | sda=getsda(adap); | 243 | sda = getsda(adap); |
238 | scl=(adap->getscl==NULL?1:getscl(adap)); | 244 | scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
239 | if (!scl || !sda ) { | 245 | if (!scl || !sda) { |
240 | printk(KERN_WARNING "%s: bus seems to be busy\n", name); | 246 | printk(KERN_WARNING "%s: bus seems to be busy\n", name); |
241 | goto bailout; | 247 | goto bailout; |
242 | } | 248 | } |
243 | 249 | ||
244 | sdalo(adap); | 250 | sdalo(adap); |
245 | sda=getsda(adap); | 251 | sda = getsda(adap); |
246 | scl=(adap->getscl==NULL?1:getscl(adap)); | 252 | scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
247 | if ( 0 != sda ) { | 253 | if (sda) { |
248 | printk(KERN_WARNING "%s: SDA stuck high!\n", name); | 254 | printk(KERN_WARNING "%s: SDA stuck high!\n", name); |
249 | goto bailout; | 255 | goto bailout; |
250 | } | 256 | } |
251 | if ( 0 == scl ) { | 257 | if (!scl) { |
252 | printk(KERN_WARNING "%s: SCL unexpected low " | 258 | printk(KERN_WARNING "%s: SCL unexpected low " |
253 | "while pulling SDA low!\n", name); | 259 | "while pulling SDA low!\n", name); |
254 | goto bailout; | 260 | goto bailout; |
255 | } | 261 | } |
256 | 262 | ||
257 | sdahi(adap); | 263 | sdahi(adap); |
258 | sda=getsda(adap); | 264 | sda = getsda(adap); |
259 | scl=(adap->getscl==NULL?1:getscl(adap)); | 265 | scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
260 | if ( 0 == sda ) { | 266 | if (!sda) { |
261 | printk(KERN_WARNING "%s: SDA stuck low!\n", name); | 267 | printk(KERN_WARNING "%s: SDA stuck low!\n", name); |
262 | goto bailout; | 268 | goto bailout; |
263 | } | 269 | } |
264 | if ( 0 == scl ) { | 270 | if (!scl) { |
265 | printk(KERN_WARNING "%s: SCL unexpected low " | 271 | printk(KERN_WARNING "%s: SCL unexpected low " |
266 | "while pulling SDA high!\n", name); | 272 | "while pulling SDA high!\n", name); |
267 | goto bailout; | 273 | goto bailout; |
268 | } | 274 | } |
269 | 275 | ||
270 | scllo(adap); | 276 | scllo(adap); |
271 | sda=getsda(adap); | 277 | sda = getsda(adap); |
272 | scl=(adap->getscl==NULL?0:getscl(adap)); | 278 | scl = (adap->getscl == NULL) ? 0 : getscl(adap); |
273 | if ( 0 != scl ) { | 279 | if (scl) { |
274 | printk(KERN_WARNING "%s: SCL stuck high!\n", name); | 280 | printk(KERN_WARNING "%s: SCL stuck high!\n", name); |
275 | goto bailout; | 281 | goto bailout; |
276 | } | 282 | } |
277 | if ( 0 == sda ) { | 283 | if (!sda) { |
278 | printk(KERN_WARNING "%s: SDA unexpected low " | 284 | printk(KERN_WARNING "%s: SDA unexpected low " |
279 | "while pulling SCL low!\n", name); | 285 | "while pulling SCL low!\n", name); |
280 | goto bailout; | 286 | goto bailout; |
281 | } | 287 | } |
282 | 288 | ||
283 | sclhi(adap); | 289 | sclhi(adap); |
284 | sda=getsda(adap); | 290 | sda = getsda(adap); |
285 | scl=(adap->getscl==NULL?1:getscl(adap)); | 291 | scl = (adap->getscl == NULL) ? 1 : getscl(adap); |
286 | if ( 0 == scl ) { | 292 | if (!scl) { |
287 | printk(KERN_WARNING "%s: SCL stuck low!\n", name); | 293 | printk(KERN_WARNING "%s: SCL stuck low!\n", name); |
288 | goto bailout; | 294 | goto bailout; |
289 | } | 295 | } |
290 | if ( 0 == sda ) { | 296 | if (!sda) { |
291 | printk(KERN_WARNING "%s: SDA unexpected low " | 297 | printk(KERN_WARNING "%s: SDA unexpected low " |
292 | "while pulling SCL high!\n", name); | 298 | "while pulling SCL high!\n", name); |
293 | goto bailout; | 299 | goto bailout; |
@@ -314,9 +320,10 @@ static int try_address(struct i2c_adapter *i2c_adap, | |||
314 | unsigned char addr, int retries) | 320 | unsigned char addr, int retries) |
315 | { | 321 | { |
316 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; | 322 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
317 | int i,ret = -1; | 323 | int i, ret = -1; |
318 | for (i=0;i<=retries;i++) { | 324 | |
319 | ret = i2c_outb(i2c_adap,addr); | 325 | for (i = 0; i <= retries; i++) { |
326 | ret = i2c_outb(i2c_adap, addr); | ||
320 | if (ret == 1 || i == retries) | 327 | if (ret == 1 || i == retries) |
321 | break; | 328 | break; |
322 | bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); | 329 | bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n"); |
@@ -338,20 +345,38 @@ static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
338 | { | 345 | { |
339 | const unsigned char *temp = msg->buf; | 346 | const unsigned char *temp = msg->buf; |
340 | int count = msg->len; | 347 | int count = msg->len; |
341 | unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; | 348 | unsigned short nak_ok = msg->flags & I2C_M_IGNORE_NAK; |
342 | int retval; | 349 | int retval; |
343 | int wrcount=0; | 350 | int wrcount = 0; |
344 | 351 | ||
345 | while (count > 0) { | 352 | while (count > 0) { |
346 | retval = i2c_outb(i2c_adap, *temp); | 353 | retval = i2c_outb(i2c_adap, *temp); |
347 | if ((retval>0) || (nak_ok && (retval==0))) { /* ok or ignored NAK */ | 354 | |
348 | count--; | 355 | /* OK/ACK; or ignored NAK */ |
356 | if ((retval > 0) || (nak_ok && (retval == 0))) { | ||
357 | count--; | ||
349 | temp++; | 358 | temp++; |
350 | wrcount++; | 359 | wrcount++; |
351 | } else { /* arbitration or no acknowledge */ | 360 | |
352 | dev_err(&i2c_adap->dev, "sendbytes: error - bailout.\n"); | 361 | /* A slave NAKing the master means the slave didn't like |
353 | return (retval<0)? retval : -EFAULT; | 362 | * something about the data it saw. For example, maybe |
354 | /* got a better one ?? */ | 363 | * the SMBus PEC was wrong. |
364 | */ | ||
365 | } else if (retval == 0) { | ||
366 | dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n"); | ||
367 | return -EIO; | ||
368 | |||
369 | /* Timeout; or (someday) lost arbitration | ||
370 | * | ||
371 | * FIXME Lost ARB implies retrying the transaction from | ||
372 | * the first message, after the "winning" master issues | ||
373 | * its STOP. As a rule, upper layer code has no reason | ||
374 | * to know or care about this ... it is *NOT* an error. | ||
375 | */ | ||
376 | } else { | ||
377 | dev_err(&i2c_adap->dev, "sendbytes: error %d\n", | ||
378 | retval); | ||
379 | return retval; | ||
355 | } | 380 | } |
356 | } | 381 | } |
357 | return wrcount; | 382 | return wrcount; |
@@ -376,14 +401,14 @@ static int acknak(struct i2c_adapter *i2c_adap, int is_ack) | |||
376 | static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | 401 | static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) |
377 | { | 402 | { |
378 | int inval; | 403 | int inval; |
379 | int rdcount=0; /* counts bytes read */ | 404 | int rdcount = 0; /* counts bytes read */ |
380 | unsigned char *temp = msg->buf; | 405 | unsigned char *temp = msg->buf; |
381 | int count = msg->len; | 406 | int count = msg->len; |
382 | const unsigned flags = msg->flags; | 407 | const unsigned flags = msg->flags; |
383 | 408 | ||
384 | while (count > 0) { | 409 | while (count > 0) { |
385 | inval = i2c_inb(i2c_adap); | 410 | inval = i2c_inb(i2c_adap); |
386 | if (inval>=0) { | 411 | if (inval >= 0) { |
387 | *temp = inval; | 412 | *temp = inval; |
388 | rdcount++; | 413 | rdcount++; |
389 | } else { /* read timed out */ | 414 | } else { /* read timed out */ |
@@ -431,7 +456,7 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
431 | * returns: | 456 | * returns: |
432 | * 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set | 457 | * 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set |
433 | * -x an error occurred (like: -EREMOTEIO if the device did not answer, or | 458 | * -x an error occurred (like: -EREMOTEIO if the device did not answer, or |
434 | * -ETIMEDOUT, for example if the lines are stuck...) | 459 | * -ETIMEDOUT, for example if the lines are stuck...) |
435 | */ | 460 | */ |
436 | static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | 461 | static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) |
437 | { | 462 | { |
@@ -443,10 +468,10 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
443 | int ret, retries; | 468 | int ret, retries; |
444 | 469 | ||
445 | retries = nak_ok ? 0 : i2c_adap->retries; | 470 | retries = nak_ok ? 0 : i2c_adap->retries; |
446 | 471 | ||
447 | if ( (flags & I2C_M_TEN) ) { | 472 | if (flags & I2C_M_TEN) { |
448 | /* a ten bit address */ | 473 | /* a ten bit address */ |
449 | addr = 0xf0 | (( msg->addr >> 7) & 0x03); | 474 | addr = 0xf0 | ((msg->addr >> 7) & 0x03); |
450 | bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); | 475 | bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); |
451 | /* try extended address code...*/ | 476 | /* try extended address code...*/ |
452 | ret = try_address(i2c_adap, addr, retries); | 477 | ret = try_address(i2c_adap, addr, retries); |
@@ -456,33 +481,33 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) | |||
456 | return -EREMOTEIO; | 481 | return -EREMOTEIO; |
457 | } | 482 | } |
458 | /* the remaining 8 bit address */ | 483 | /* the remaining 8 bit address */ |
459 | ret = i2c_outb(i2c_adap,msg->addr & 0x7f); | 484 | ret = i2c_outb(i2c_adap, msg->addr & 0x7f); |
460 | if ((ret != 1) && !nak_ok) { | 485 | if ((ret != 1) && !nak_ok) { |
461 | /* the chip did not ack / xmission error occurred */ | 486 | /* the chip did not ack / xmission error occurred */ |
462 | dev_err(&i2c_adap->dev, "died at 2nd address code\n"); | 487 | dev_err(&i2c_adap->dev, "died at 2nd address code\n"); |
463 | return -EREMOTEIO; | 488 | return -EREMOTEIO; |
464 | } | 489 | } |
465 | if ( flags & I2C_M_RD ) { | 490 | if (flags & I2C_M_RD) { |
466 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " | 491 | bit_dbg(3, &i2c_adap->dev, "emitting repeated " |
467 | "start condition\n"); | 492 | "start condition\n"); |
468 | i2c_repstart(adap); | 493 | i2c_repstart(adap); |
469 | /* okay, now switch into reading mode */ | 494 | /* okay, now switch into reading mode */ |
470 | addr |= 0x01; | 495 | addr |= 0x01; |
471 | ret = try_address(i2c_adap, addr, retries); | 496 | ret = try_address(i2c_adap, addr, retries); |
472 | if ((ret!=1) && !nak_ok) { | 497 | if ((ret != 1) && !nak_ok) { |
473 | dev_err(&i2c_adap->dev, | 498 | dev_err(&i2c_adap->dev, |
474 | "died at repeated address code\n"); | 499 | "died at repeated address code\n"); |
475 | return -EREMOTEIO; | 500 | return -EREMOTEIO; |
476 | } | 501 | } |
477 | } | 502 | } |
478 | } else { /* normal 7bit address */ | 503 | } else { /* normal 7bit address */ |
479 | addr = ( msg->addr << 1 ); | 504 | addr = msg->addr << 1; |
480 | if (flags & I2C_M_RD ) | 505 | if (flags & I2C_M_RD) |
481 | addr |= 1; | 506 | addr |= 1; |
482 | if (flags & I2C_M_REV_DIR_ADDR ) | 507 | if (flags & I2C_M_REV_DIR_ADDR) |
483 | addr ^= 1; | 508 | addr ^= 1; |
484 | ret = try_address(i2c_adap, addr, retries); | 509 | ret = try_address(i2c_adap, addr, retries); |
485 | if ((ret!=1) && !nak_ok) | 510 | if ((ret != 1) && !nak_ok) |
486 | return -EREMOTEIO; | 511 | return -EREMOTEIO; |
487 | } | 512 | } |
488 | 513 | ||
@@ -494,15 +519,14 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
494 | { | 519 | { |
495 | struct i2c_msg *pmsg; | 520 | struct i2c_msg *pmsg; |
496 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; | 521 | struct i2c_algo_bit_data *adap = i2c_adap->algo_data; |
497 | 522 | int i, ret; | |
498 | int i,ret; | ||
499 | unsigned short nak_ok; | 523 | unsigned short nak_ok; |
500 | 524 | ||
501 | bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); | 525 | bit_dbg(3, &i2c_adap->dev, "emitting start condition\n"); |
502 | i2c_start(adap); | 526 | i2c_start(adap); |
503 | for (i=0;i<num;i++) { | 527 | for (i = 0; i < num; i++) { |
504 | pmsg = &msgs[i]; | 528 | pmsg = &msgs[i]; |
505 | nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; | 529 | nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; |
506 | if (!(pmsg->flags & I2C_M_NOSTART)) { | 530 | if (!(pmsg->flags & I2C_M_NOSTART)) { |
507 | if (i) { | 531 | if (i) { |
508 | bit_dbg(3, &i2c_adap->dev, "emitting " | 532 | bit_dbg(3, &i2c_adap->dev, "emitting " |
@@ -517,7 +541,7 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, | |||
517 | goto bailout; | 541 | goto bailout; |
518 | } | 542 | } |
519 | } | 543 | } |
520 | if (pmsg->flags & I2C_M_RD ) { | 544 | if (pmsg->flags & I2C_M_RD) { |
521 | /* read bytes into buffer*/ | 545 | /* read bytes into buffer*/ |
522 | ret = readbytes(i2c_adap, pmsg); | 546 | ret = readbytes(i2c_adap, pmsg); |
523 | if (ret >= 1) | 547 | if (ret >= 1) |
@@ -551,7 +575,7 @@ bailout: | |||
551 | 575 | ||
552 | static u32 bit_func(struct i2c_adapter *adap) | 576 | static u32 bit_func(struct i2c_adapter *adap) |
553 | { | 577 | { |
554 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | 578 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | |
555 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | | 579 | I2C_FUNC_SMBUS_READ_BLOCK_DATA | |
556 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | | 580 | I2C_FUNC_SMBUS_BLOCK_PROC_CALL | |
557 | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; | 581 | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; |
@@ -565,8 +589,8 @@ static const struct i2c_algorithm i2c_bit_algo = { | |||
565 | .functionality = bit_func, | 589 | .functionality = bit_func, |
566 | }; | 590 | }; |
567 | 591 | ||
568 | /* | 592 | /* |
569 | * registering functions to load algorithms at runtime | 593 | * registering functions to load algorithms at runtime |
570 | */ | 594 | */ |
571 | static int i2c_bit_prepare_bus(struct i2c_adapter *adap) | 595 | static int i2c_bit_prepare_bus(struct i2c_adapter *adap) |
572 | { | 596 | { |
@@ -574,7 +598,7 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap) | |||
574 | 598 | ||
575 | if (bit_test) { | 599 | if (bit_test) { |
576 | int ret = test_bus(bit_adap, adap->name); | 600 | int ret = test_bus(bit_adap, adap->name); |
577 | if (ret<0) | 601 | if (ret < 0) |
578 | return -ENODEV; | 602 | return -ENODEV; |
579 | } | 603 | } |
580 | 604 | ||
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c index ab2e6f3498b4..8907b0191677 100644 --- a/drivers/i2c/algos/i2c-algo-pcf.c +++ b/drivers/i2c/algos/i2c-algo-pcf.c | |||
@@ -203,35 +203,6 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap) | |||
203 | /* ----- Utility functions | 203 | /* ----- Utility functions |
204 | */ | 204 | */ |
205 | 205 | ||
206 | static inline int try_address(struct i2c_algo_pcf_data *adap, | ||
207 | unsigned char addr, int retries) | ||
208 | { | ||
209 | int i, status, ret = -1; | ||
210 | int wfp; | ||
211 | for (i=0;i<retries;i++) { | ||
212 | i2c_outb(adap, addr); | ||
213 | i2c_start(adap); | ||
214 | status = get_pcf(adap, 1); | ||
215 | if ((wfp = wait_for_pin(adap, &status)) >= 0) { | ||
216 | if ((status & I2C_PCF_LRB) == 0) { | ||
217 | i2c_stop(adap); | ||
218 | break; /* success! */ | ||
219 | } | ||
220 | } | ||
221 | if (wfp == -EINTR) { | ||
222 | /* arbitration lost */ | ||
223 | udelay(adap->udelay); | ||
224 | return -EINTR; | ||
225 | } | ||
226 | i2c_stop(adap); | ||
227 | udelay(adap->udelay); | ||
228 | } | ||
229 | DEB2(if (i) printk(KERN_DEBUG "i2c-algo-pcf.o: needed %d retries for %d\n",i, | ||
230 | addr)); | ||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | |||
235 | static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf, | 206 | static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf, |
236 | int count, int last) | 207 | int count, int last) |
237 | { | 208 | { |
@@ -321,47 +292,19 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, | |||
321 | } | 292 | } |
322 | 293 | ||
323 | 294 | ||
324 | static inline int pcf_doAddress(struct i2c_algo_pcf_data *adap, | 295 | static int pcf_doAddress(struct i2c_algo_pcf_data *adap, |
325 | struct i2c_msg *msg, int retries) | 296 | struct i2c_msg *msg) |
326 | { | 297 | { |
327 | unsigned short flags = msg->flags; | 298 | unsigned short flags = msg->flags; |
328 | unsigned char addr; | 299 | unsigned char addr; |
329 | int ret; | 300 | |
330 | if ( (flags & I2C_M_TEN) ) { | 301 | addr = msg->addr << 1; |
331 | /* a ten bit address */ | 302 | if (flags & I2C_M_RD) |
332 | addr = 0xf0 | (( msg->addr >> 7) & 0x03); | 303 | addr |= 1; |
333 | DEB2(printk(KERN_DEBUG "addr0: %d\n",addr)); | 304 | if (flags & I2C_M_REV_DIR_ADDR) |
334 | /* try extended address code...*/ | 305 | addr ^= 1; |
335 | ret = try_address(adap, addr, retries); | 306 | i2c_outb(adap, addr); |
336 | if (ret!=1) { | 307 | |
337 | printk(KERN_ERR "died at extended address code.\n"); | ||
338 | return -EREMOTEIO; | ||
339 | } | ||
340 | /* the remaining 8 bit address */ | ||
341 | i2c_outb(adap,msg->addr & 0x7f); | ||
342 | /* Status check comes here */ | ||
343 | if (ret != 1) { | ||
344 | printk(KERN_ERR "died at 2nd address code.\n"); | ||
345 | return -EREMOTEIO; | ||
346 | } | ||
347 | if ( flags & I2C_M_RD ) { | ||
348 | i2c_repstart(adap); | ||
349 | /* okay, now switch into reading mode */ | ||
350 | addr |= 0x01; | ||
351 | ret = try_address(adap, addr, retries); | ||
352 | if (ret!=1) { | ||
353 | printk(KERN_ERR "died at extended address code.\n"); | ||
354 | return -EREMOTEIO; | ||
355 | } | ||
356 | } | ||
357 | } else { /* normal 7bit address */ | ||
358 | addr = ( msg->addr << 1 ); | ||
359 | if (flags & I2C_M_RD ) | ||
360 | addr |= 1; | ||
361 | if (flags & I2C_M_REV_DIR_ADDR ) | ||
362 | addr ^= 1; | ||
363 | i2c_outb(adap, addr); | ||
364 | } | ||
365 | return 0; | 308 | return 0; |
366 | } | 309 | } |
367 | 310 | ||
@@ -390,7 +333,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, | |||
390 | pmsg->flags & I2C_M_RD ? "read" : "write", | 333 | pmsg->flags & I2C_M_RD ? "read" : "write", |
391 | pmsg->len, pmsg->addr, i + 1, num);) | 334 | pmsg->len, pmsg->addr, i + 1, num);) |
392 | 335 | ||
393 | ret = pcf_doAddress(adap, pmsg, i2c_adap->retries); | 336 | ret = pcf_doAddress(adap, pmsg); |
394 | 337 | ||
395 | /* Send START */ | 338 | /* Send START */ |
396 | if (i == 0) { | 339 | if (i == 0) { |
@@ -453,7 +396,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, | |||
453 | static u32 pcf_func(struct i2c_adapter *adap) | 396 | static u32 pcf_func(struct i2c_adapter *adap) |
454 | { | 397 | { |
455 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | | 398 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | |
456 | I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING; | 399 | I2C_FUNC_PROTOCOL_MANGLING; |
457 | } | 400 | } |
458 | 401 | ||
459 | /* -----exported algorithm data: ------------------------------------- */ | 402 | /* -----exported algorithm data: ------------------------------------- */ |
@@ -475,9 +418,7 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap) | |||
475 | 418 | ||
476 | /* register new adapter to i2c module... */ | 419 | /* register new adapter to i2c module... */ |
477 | adap->algo = &pcf_algo; | 420 | adap->algo = &pcf_algo; |
478 | 421 | adap->timeout = 100; | |
479 | adap->timeout = 100; /* default values, should */ | ||
480 | adap->retries = 3; /* be replaced by defines */ | ||
481 | 422 | ||
482 | if ((rval = pcf_init_8584(pcf_adap))) | 423 | if ((rval = pcf_init_8584(pcf_adap))) |
483 | return rval; | 424 | return rval; |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c466c6cfc2e5..8d12b26bb6c6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -182,7 +182,8 @@ config I2C_I801 | |||
182 | will be called i2c-i801. | 182 | will be called i2c-i801. |
183 | 183 | ||
184 | config I2C_I810 | 184 | config I2C_I810 |
185 | tristate "Intel 810/815" | 185 | tristate "Intel 810/815 (DEPRECATED)" |
186 | default n | ||
186 | depends on PCI | 187 | depends on PCI |
187 | select I2C_ALGOBIT | 188 | select I2C_ALGOBIT |
188 | help | 189 | help |
@@ -195,6 +196,8 @@ config I2C_I810 | |||
195 | i815 | 196 | i815 |
196 | i845G | 197 | i845G |
197 | 198 | ||
199 | This driver is deprecated in favor of the i810fb and intelfb drivers. | ||
200 | |||
198 | This driver can also be built as a module. If so, the module | 201 | This driver can also be built as a module. If so, the module |
199 | will be called i2c-i810. | 202 | will be called i2c-i810. |
200 | 203 | ||
@@ -259,20 +262,6 @@ config I2C_IOP3XX | |||
259 | This driver can also be built as a module. If so, the module | 262 | This driver can also be built as a module. If so, the module |
260 | will be called i2c-iop3xx. | 263 | will be called i2c-iop3xx. |
261 | 264 | ||
262 | config I2C_IXP4XX | ||
263 | tristate "IXP4xx GPIO-Based I2C Interface (DEPRECATED)" | ||
264 | depends on ARCH_IXP4XX | ||
265 | select I2C_ALGOBIT | ||
266 | help | ||
267 | Say Y here if you have an Intel IXP4xx(420,421,422,425) based | ||
268 | system and are using GPIO lines for an I2C bus. | ||
269 | |||
270 | This support is also available as a module. If so, the module | ||
271 | will be called i2c-ixp4xx. | ||
272 | |||
273 | This driver is deprecated and will be dropped soon. Use i2c-gpio | ||
274 | instead. | ||
275 | |||
276 | config I2C_IXP2000 | 265 | config I2C_IXP2000 |
277 | tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)" | 266 | tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)" |
278 | depends on ARCH_IXP2000 | 267 | depends on ARCH_IXP2000 |
@@ -396,7 +385,8 @@ config I2C_PASEMI | |||
396 | Supports the PA Semi PWRficient on-chip SMBus interfaces. | 385 | Supports the PA Semi PWRficient on-chip SMBus interfaces. |
397 | 386 | ||
398 | config I2C_PROSAVAGE | 387 | config I2C_PROSAVAGE |
399 | tristate "S3/VIA (Pro)Savage" | 388 | tristate "S3/VIA (Pro)Savage (DEPRECATED)" |
389 | default n | ||
400 | depends on PCI | 390 | depends on PCI |
401 | select I2C_ALGOBIT | 391 | select I2C_ALGOBIT |
402 | help | 392 | help |
@@ -407,6 +397,8 @@ config I2C_PROSAVAGE | |||
407 | S3/VIA KM266/VT8375 aka ProSavage8 | 397 | S3/VIA KM266/VT8375 aka ProSavage8 |
408 | S3/VIA KM133/VT8365 aka Savage4 | 398 | S3/VIA KM133/VT8365 aka Savage4 |
409 | 399 | ||
400 | This driver is deprecated in favor of the savagefb driver. | ||
401 | |||
410 | This support is also available as a module. If so, the module | 402 | This support is also available as a module. If so, the module |
411 | will be called i2c-prosavage. | 403 | will be called i2c-prosavage. |
412 | 404 | ||
@@ -418,13 +410,16 @@ config I2C_S3C2410 | |||
418 | Samsung S3C2410 based System-on-Chip devices. | 410 | Samsung S3C2410 based System-on-Chip devices. |
419 | 411 | ||
420 | config I2C_SAVAGE4 | 412 | config I2C_SAVAGE4 |
421 | tristate "S3 Savage 4" | 413 | tristate "S3 Savage 4 (DEPRECATED)" |
422 | depends on PCI && EXPERIMENTAL | 414 | default n |
415 | depends on PCI | ||
423 | select I2C_ALGOBIT | 416 | select I2C_ALGOBIT |
424 | help | 417 | help |
425 | If you say yes to this option, support will be included for the | 418 | If you say yes to this option, support will be included for the |
426 | S3 Savage 4 I2C interface. | 419 | S3 Savage 4 I2C interface. |
427 | 420 | ||
421 | This driver is deprecated in favor of the savagefb driver. | ||
422 | |||
428 | This driver can also be built as a module. If so, the module | 423 | This driver can also be built as a module. If so, the module |
429 | will be called i2c-savage4. | 424 | will be called i2c-savage4. |
430 | 425 | ||
@@ -611,7 +606,7 @@ config I2C_VIAPRO | |||
611 | VT8231 | 606 | VT8231 |
612 | VT8233/A | 607 | VT8233/A |
613 | VT8235 | 608 | VT8235 |
614 | VT8237R/A | 609 | VT8237R/A/S |
615 | VT8251 | 610 | VT8251 |
616 | CX700 | 611 | CX700 |
617 | 612 | ||
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 81d43c27cf93..ea7068f1eb6b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile | |||
@@ -20,7 +20,6 @@ obj-$(CONFIG_I2C_I810) += i2c-i810.o | |||
20 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o | 20 | obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o |
21 | obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o | 21 | obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o |
22 | obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o | 22 | obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o |
23 | obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o | ||
24 | obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o | 23 | obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o |
25 | obj-$(CONFIG_I2C_MPC) += i2c-mpc.o | 24 | obj-$(CONFIG_I2C_MPC) += i2c-mpc.o |
26 | obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o | 25 | obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o |
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index 7490dc1771ae..573abe440842 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c | |||
@@ -334,6 +334,10 @@ static int __devinit amd756_probe(struct pci_dev *pdev, | |||
334 | int error; | 334 | int error; |
335 | u8 temp; | 335 | u8 temp; |
336 | 336 | ||
337 | /* driver_data might come from user-space, so check it */ | ||
338 | if (id->driver_data > ARRAY_SIZE(chipname)) | ||
339 | return -EINVAL; | ||
340 | |||
337 | if (amd756_ioport) { | 341 | if (amd756_ioport) { |
338 | dev_err(&pdev->dev, "Only one device supported " | 342 | dev_err(&pdev->dev, "Only one device supported " |
339 | "(you have a strange motherboard, btw)\n"); | 343 | "(you have a strange motherboard, btw)\n"); |
@@ -405,6 +409,7 @@ static struct pci_driver amd756_driver = { | |||
405 | .id_table = amd756_ids, | 409 | .id_table = amd756_ids, |
406 | .probe = amd756_probe, | 410 | .probe = amd756_probe, |
407 | .remove = __devexit_p(amd756_remove), | 411 | .remove = __devexit_p(amd756_remove), |
412 | .dynids.use_driver_data = 1, | ||
408 | }; | 413 | }; |
409 | 414 | ||
410 | static int __init amd756_init(void) | 415 | static int __init amd756_init(void) |
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c index 2f684166c43d..1953b26da56a 100644 --- a/drivers/i2c/busses/i2c-au1550.c +++ b/drivers/i2c/busses/i2c-au1550.c | |||
@@ -30,14 +30,22 @@ | |||
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/platform_device.h> | ||
33 | #include <linux/init.h> | 34 | #include <linux/init.h> |
34 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
35 | #include <linux/i2c.h> | 36 | #include <linux/i2c.h> |
37 | #include <linux/slab.h> | ||
36 | 38 | ||
37 | #include <asm/mach-au1x00/au1xxx.h> | 39 | #include <asm/mach-au1x00/au1xxx.h> |
38 | #include <asm/mach-au1x00/au1xxx_psc.h> | 40 | #include <asm/mach-au1x00/au1xxx_psc.h> |
39 | 41 | ||
40 | #include "i2c-au1550.h" | 42 | struct i2c_au1550_data { |
43 | u32 psc_base; | ||
44 | int xfer_timeout; | ||
45 | int ack_timeout; | ||
46 | struct i2c_adapter adap; | ||
47 | struct resource *ioarea; | ||
48 | }; | ||
41 | 49 | ||
42 | static int | 50 | static int |
43 | wait_xfer_done(struct i2c_au1550_data *adap) | 51 | wait_xfer_done(struct i2c_au1550_data *adap) |
@@ -105,7 +113,7 @@ wait_master_done(struct i2c_au1550_data *adap) | |||
105 | } | 113 | } |
106 | 114 | ||
107 | static int | 115 | static int |
108 | do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd) | 116 | do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q) |
109 | { | 117 | { |
110 | volatile psc_smb_t *sp; | 118 | volatile psc_smb_t *sp; |
111 | u32 stat; | 119 | u32 stat; |
@@ -134,6 +142,10 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd) | |||
134 | if (rd) | 142 | if (rd) |
135 | addr |= 1; | 143 | addr |= 1; |
136 | 144 | ||
145 | /* zero-byte xfers stop immediately */ | ||
146 | if (q) | ||
147 | addr |= PSC_SMBTXRX_STP; | ||
148 | |||
137 | /* Put byte into fifo, start up master. | 149 | /* Put byte into fifo, start up master. |
138 | */ | 150 | */ |
139 | sp->psc_smbtxrx = addr; | 151 | sp->psc_smbtxrx = addr; |
@@ -142,7 +154,7 @@ do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd) | |||
142 | au_sync(); | 154 | au_sync(); |
143 | if (wait_ack(adap)) | 155 | if (wait_ack(adap)) |
144 | return -EIO; | 156 | return -EIO; |
145 | return 0; | 157 | return (q) ? wait_master_done(adap) : 0; |
146 | } | 158 | } |
147 | 159 | ||
148 | static u32 | 160 | static u32 |
@@ -262,7 +274,8 @@ au1550_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) | |||
262 | 274 | ||
263 | for (i = 0; !err && i < num; i++) { | 275 | for (i = 0; !err && i < num; i++) { |
264 | p = &msgs[i]; | 276 | p = &msgs[i]; |
265 | err = do_address(adap, p->addr, p->flags & I2C_M_RD); | 277 | err = do_address(adap, p->addr, p->flags & I2C_M_RD, |
278 | (p->len == 0)); | ||
266 | if (err || !p->len) | 279 | if (err || !p->len) |
267 | continue; | 280 | continue; |
268 | if (p->flags & I2C_M_RD) | 281 | if (p->flags & I2C_M_RD) |
@@ -294,18 +307,48 @@ static const struct i2c_algorithm au1550_algo = { | |||
294 | * Prior to calling us, the 50MHz clock frequency and routing | 307 | * Prior to calling us, the 50MHz clock frequency and routing |
295 | * must have been set up for the PSC indicated by the adapter. | 308 | * must have been set up for the PSC indicated by the adapter. |
296 | */ | 309 | */ |
297 | int | 310 | static int __devinit |
298 | i2c_au1550_add_bus(struct i2c_adapter *i2c_adap) | 311 | i2c_au1550_probe(struct platform_device *pdev) |
299 | { | 312 | { |
300 | struct i2c_au1550_data *adap = i2c_adap->algo_data; | 313 | struct i2c_au1550_data *priv; |
301 | volatile psc_smb_t *sp; | 314 | volatile psc_smb_t *sp; |
302 | u32 stat; | 315 | struct resource *r; |
316 | u32 stat; | ||
317 | int ret; | ||
318 | |||
319 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
320 | if (!r) { | ||
321 | ret = -ENODEV; | ||
322 | goto out; | ||
323 | } | ||
324 | |||
325 | priv = kzalloc(sizeof(struct i2c_au1550_data), GFP_KERNEL); | ||
326 | if (!priv) { | ||
327 | ret = -ENOMEM; | ||
328 | goto out; | ||
329 | } | ||
330 | |||
331 | priv->ioarea = request_mem_region(r->start, r->end - r->start + 1, | ||
332 | pdev->name); | ||
333 | if (!priv->ioarea) { | ||
334 | ret = -EBUSY; | ||
335 | goto out_mem; | ||
336 | } | ||
303 | 337 | ||
304 | i2c_adap->algo = &au1550_algo; | 338 | priv->psc_base = r->start; |
339 | priv->xfer_timeout = 200; | ||
340 | priv->ack_timeout = 200; | ||
341 | |||
342 | priv->adap.id = I2C_HW_AU1550_PSC; | ||
343 | priv->adap.nr = pdev->id; | ||
344 | priv->adap.algo = &au1550_algo; | ||
345 | priv->adap.algo_data = priv; | ||
346 | priv->adap.dev.parent = &pdev->dev; | ||
347 | strlcpy(priv->adap.name, "Au1xxx PSC I2C", sizeof(priv->adap.name)); | ||
305 | 348 | ||
306 | /* Now, set up the PSC for SMBus PIO mode. | 349 | /* Now, set up the PSC for SMBus PIO mode. |
307 | */ | 350 | */ |
308 | sp = (volatile psc_smb_t *)(adap->psc_base); | 351 | sp = (volatile psc_smb_t *)priv->psc_base; |
309 | sp->psc_ctrl = PSC_CTRL_DISABLE; | 352 | sp->psc_ctrl = PSC_CTRL_DISABLE; |
310 | au_sync(); | 353 | au_sync(); |
311 | sp->psc_sel = PSC_SEL_PS_SMBUSMODE; | 354 | sp->psc_sel = PSC_SEL_PS_SMBUSMODE; |
@@ -343,87 +386,87 @@ i2c_au1550_add_bus(struct i2c_adapter *i2c_adap) | |||
343 | au_sync(); | 386 | au_sync(); |
344 | } while ((stat & PSC_SMBSTAT_DR) == 0); | 387 | } while ((stat & PSC_SMBSTAT_DR) == 0); |
345 | 388 | ||
346 | return i2c_add_adapter(i2c_adap); | 389 | ret = i2c_add_numbered_adapter(&priv->adap); |
347 | } | 390 | if (ret == 0) { |
391 | platform_set_drvdata(pdev, priv); | ||
392 | return 0; | ||
393 | } | ||
348 | 394 | ||
395 | /* disable the PSC */ | ||
396 | sp->psc_smbcfg = 0; | ||
397 | sp->psc_ctrl = PSC_CTRL_DISABLE; | ||
398 | au_sync(); | ||
349 | 399 | ||
350 | int | 400 | release_resource(priv->ioarea); |
351 | i2c_au1550_del_bus(struct i2c_adapter *adap) | 401 | kfree(priv->ioarea); |
402 | out_mem: | ||
403 | kfree(priv); | ||
404 | out: | ||
405 | return ret; | ||
406 | } | ||
407 | |||
408 | static int __devexit | ||
409 | i2c_au1550_remove(struct platform_device *pdev) | ||
352 | { | 410 | { |
353 | return i2c_del_adapter(adap); | 411 | struct i2c_au1550_data *priv = platform_get_drvdata(pdev); |
412 | volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base; | ||
413 | |||
414 | platform_set_drvdata(pdev, NULL); | ||
415 | i2c_del_adapter(&priv->adap); | ||
416 | sp->psc_smbcfg = 0; | ||
417 | sp->psc_ctrl = PSC_CTRL_DISABLE; | ||
418 | au_sync(); | ||
419 | release_resource(priv->ioarea); | ||
420 | kfree(priv->ioarea); | ||
421 | kfree(priv); | ||
422 | return 0; | ||
354 | } | 423 | } |
355 | 424 | ||
356 | static int | 425 | static int |
357 | pb1550_reg(struct i2c_client *client) | 426 | i2c_au1550_suspend(struct platform_device *pdev, pm_message_t state) |
358 | { | 427 | { |
428 | struct i2c_au1550_data *priv = platform_get_drvdata(pdev); | ||
429 | volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base; | ||
430 | |||
431 | sp->psc_ctrl = PSC_CTRL_SUSPEND; | ||
432 | au_sync(); | ||
359 | return 0; | 433 | return 0; |
360 | } | 434 | } |
361 | 435 | ||
362 | static int | 436 | static int |
363 | pb1550_unreg(struct i2c_client *client) | 437 | i2c_au1550_resume(struct platform_device *pdev) |
364 | { | 438 | { |
439 | struct i2c_au1550_data *priv = platform_get_drvdata(pdev); | ||
440 | volatile psc_smb_t *sp = (volatile psc_smb_t *)priv->psc_base; | ||
441 | |||
442 | sp->psc_ctrl = PSC_CTRL_ENABLE; | ||
443 | au_sync(); | ||
444 | while (!(sp->psc_smbstat & PSC_SMBSTAT_SR)) | ||
445 | au_sync(); | ||
365 | return 0; | 446 | return 0; |
366 | } | 447 | } |
367 | 448 | ||
368 | static struct i2c_au1550_data pb1550_i2c_info = { | 449 | static struct platform_driver au1xpsc_smbus_driver = { |
369 | SMBUS_PSC_BASE, 200, 200 | 450 | .driver = { |
370 | }; | 451 | .name = "au1xpsc_smbus", |
371 | 452 | .owner = THIS_MODULE, | |
372 | static struct i2c_adapter pb1550_board_adapter = { | 453 | }, |
373 | name: "pb1550 adapter", | 454 | .probe = i2c_au1550_probe, |
374 | id: I2C_HW_AU1550_PSC, | 455 | .remove = __devexit_p(i2c_au1550_remove), |
375 | algo: NULL, | 456 | .suspend = i2c_au1550_suspend, |
376 | algo_data: &pb1550_i2c_info, | 457 | .resume = i2c_au1550_resume, |
377 | client_register: pb1550_reg, | ||
378 | client_unregister: pb1550_unreg, | ||
379 | }; | 458 | }; |
380 | 459 | ||
381 | /* BIG hack to support the control interface on the Wolfson WM8731 | ||
382 | * audio codec on the Pb1550 board. We get an address and two data | ||
383 | * bytes to write, create an i2c message, and send it across the | ||
384 | * i2c transfer function. We do this here because we have access to | ||
385 | * the i2c adapter structure. | ||
386 | */ | ||
387 | static struct i2c_msg wm_i2c_msg; /* We don't want this stuff on the stack */ | ||
388 | static u8 i2cbuf[2]; | ||
389 | |||
390 | int | ||
391 | pb1550_wm_codec_write(u8 addr, u8 reg, u8 val) | ||
392 | { | ||
393 | wm_i2c_msg.addr = addr; | ||
394 | wm_i2c_msg.flags = 0; | ||
395 | wm_i2c_msg.buf = i2cbuf; | ||
396 | wm_i2c_msg.len = 2; | ||
397 | i2cbuf[0] = reg; | ||
398 | i2cbuf[1] = val; | ||
399 | |||
400 | return pb1550_board_adapter.algo->master_xfer(&pb1550_board_adapter, &wm_i2c_msg, 1); | ||
401 | } | ||
402 | |||
403 | static int __init | 460 | static int __init |
404 | i2c_au1550_init(void) | 461 | i2c_au1550_init(void) |
405 | { | 462 | { |
406 | printk(KERN_INFO "Au1550 I2C: "); | 463 | return platform_driver_register(&au1xpsc_smbus_driver); |
407 | |||
408 | /* This is where we would set up a 50MHz clock source | ||
409 | * and routing. On the Pb1550, the SMBus is PSC2, which | ||
410 | * uses a shared clock with USB. This has been already | ||
411 | * configured by Yamon as a 48MHz clock, close enough | ||
412 | * for our work. | ||
413 | */ | ||
414 | if (i2c_au1550_add_bus(&pb1550_board_adapter) < 0) { | ||
415 | printk("failed to initialize.\n"); | ||
416 | return -ENODEV; | ||
417 | } | ||
418 | |||
419 | printk("initialized.\n"); | ||
420 | return 0; | ||
421 | } | 464 | } |
422 | 465 | ||
423 | static void __exit | 466 | static void __exit |
424 | i2c_au1550_exit(void) | 467 | i2c_au1550_exit(void) |
425 | { | 468 | { |
426 | i2c_au1550_del_bus(&pb1550_board_adapter); | 469 | platform_driver_unregister(&au1xpsc_smbus_driver); |
427 | } | 470 | } |
428 | 471 | ||
429 | MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); | 472 | MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC."); |
diff --git a/drivers/i2c/busses/i2c-au1550.h b/drivers/i2c/busses/i2c-au1550.h deleted file mode 100644 index fce15d161ae7..000000000000 --- a/drivers/i2c/busses/i2c-au1550.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Embedded Edge, LLC <dan@embeddededge.com> | ||
3 | * 2.6 port by Matt Porter <mporter@kernel.crashing.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | #ifndef I2C_AU1550_H | ||
21 | #define I2C_AU1550_H | ||
22 | |||
23 | struct i2c_au1550_data { | ||
24 | u32 psc_base; | ||
25 | int xfer_timeout; | ||
26 | int ack_timeout; | ||
27 | }; | ||
28 | |||
29 | int i2c_au1550_add_bus(struct i2c_adapter *); | ||
30 | int i2c_au1550_del_bus(struct i2c_adapter *); | ||
31 | |||
32 | #endif /* I2C_AU1550_H */ | ||
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 67224a424aba..7dbdaeb707a9 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c | |||
@@ -550,6 +550,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev) | |||
550 | 550 | ||
551 | p_adap = &iface->adap; | 551 | p_adap = &iface->adap; |
552 | p_adap->id = I2C_HW_BLACKFIN; | 552 | p_adap->id = I2C_HW_BLACKFIN; |
553 | p_adap->nr = dev->id; | ||
553 | strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); | 554 | strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); |
554 | p_adap->algo = &bfin_twi_algorithm; | 555 | p_adap->algo = &bfin_twi_algorithm; |
555 | p_adap->algo_data = iface; | 556 | p_adap->algo_data = iface; |
@@ -576,7 +577,7 @@ static int i2c_bfin_twi_probe(struct platform_device *dev) | |||
576 | bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); | 577 | bfin_write_TWI_CONTROL(bfin_read_TWI_CONTROL() | TWI_ENA); |
577 | SSYNC(); | 578 | SSYNC(); |
578 | 579 | ||
579 | rc = i2c_add_adapter(p_adap); | 580 | rc = i2c_add_numbered_adapter(p_adap); |
580 | if (rc < 0) | 581 | if (rc < 0) |
581 | free_irq(iface->irq, iface); | 582 | free_irq(iface->irq, iface); |
582 | else | 583 | else |
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 67679882ebef..cce5a614758d 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c | |||
@@ -510,7 +510,6 @@ static int davinci_i2c_probe(struct platform_device *pdev) | |||
510 | 510 | ||
511 | /* FIXME */ | 511 | /* FIXME */ |
512 | adap->timeout = 1; | 512 | adap->timeout = 1; |
513 | adap->retries = 1; | ||
514 | 513 | ||
515 | adap->nr = pdev->id; | 514 | adap->nr = pdev->id; |
516 | r = i2c_add_numbered_adapter(adap); | 515 | r = i2c_add_numbered_adapter(adap); |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index ac27e5f84ebe..aa9157913b9a 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -4,6 +4,7 @@ | |||
4 | Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>, | 4 | Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>, |
5 | Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker | 5 | Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker |
6 | <mdsxyz123@yahoo.com> | 6 | <mdsxyz123@yahoo.com> |
7 | Copyright (C) 2007 Jean Delvare <khali@linux-fr.org> | ||
7 | 8 | ||
8 | This program is free software; you can redistribute it and/or modify | 9 | This program is free software; you can redistribute it and/or modify |
9 | it under the terms of the GNU General Public License as published by | 10 | it under the terms of the GNU General Public License as published by |
@@ -21,25 +22,34 @@ | |||
21 | */ | 22 | */ |
22 | 23 | ||
23 | /* | 24 | /* |
24 | SUPPORTED DEVICES PCI ID | 25 | Supports the following Intel I/O Controller Hubs (ICH): |
25 | 82801AA 2413 | 26 | |
26 | 82801AB 2423 | 27 | I/O Block I2C |
27 | 82801BA 2443 | 28 | region SMBus Block proc. block |
28 | 82801CA/CAM 2483 | 29 | Chip name PCI ID size PEC buffer call read |
29 | 82801DB 24C3 (HW PEC supported) | 30 | ---------------------------------------------------------------------- |
30 | 82801EB 24D3 (HW PEC supported) | 31 | 82801AA (ICH) 0x2413 16 no no no no |
31 | 6300ESB 25A4 | 32 | 82801AB (ICH0) 0x2423 16 no no no no |
32 | ICH6 266A | 33 | 82801BA (ICH2) 0x2443 16 no no no no |
33 | ICH7 27DA | 34 | 82801CA (ICH3) 0x2483 32 soft no no no |
34 | ESB2 269B | 35 | 82801DB (ICH4) 0x24c3 32 hard yes no no |
35 | ICH8 283E | 36 | 82801E (ICH5) 0x24d3 32 hard yes yes yes |
36 | ICH9 2930 | 37 | 6300ESB 0x25a4 32 hard yes yes yes |
37 | Tolapai 5032 | 38 | 82801F (ICH6) 0x266a 32 hard yes yes yes |
38 | This driver supports several versions of Intel's I/O Controller Hubs (ICH). | 39 | 6310ESB/6320ESB 0x269b 32 hard yes yes yes |
39 | For SMBus support, they are similar to the PIIX4 and are part | 40 | 82801G (ICH7) 0x27da 32 hard yes yes yes |
40 | of Intel's '810' and other chipsets. | 41 | 82801H (ICH8) 0x283e 32 hard yes yes yes |
41 | See the file Documentation/i2c/busses/i2c-i801 for details. | 42 | 82801I (ICH9) 0x2930 32 hard yes yes yes |
42 | I2C Block Read and Process Call are not supported. | 43 | Tolapai 0x5032 32 hard yes ? ? |
44 | |||
45 | Features supported by this driver: | ||
46 | Software PEC no | ||
47 | Hardware PEC yes | ||
48 | Block buffer yes | ||
49 | Block process call transaction no | ||
50 | I2C block read transaction yes (doesn't use the block buffer) | ||
51 | |||
52 | See the file Documentation/i2c/busses/i2c-i801 for details. | ||
43 | */ | 53 | */ |
44 | 54 | ||
45 | /* Note: we assume there can only be one I801, with one SMBus interface */ | 55 | /* Note: we assume there can only be one I801, with one SMBus interface */ |
@@ -62,9 +72,9 @@ | |||
62 | #define SMBHSTDAT0 (5 + i801_smba) | 72 | #define SMBHSTDAT0 (5 + i801_smba) |
63 | #define SMBHSTDAT1 (6 + i801_smba) | 73 | #define SMBHSTDAT1 (6 + i801_smba) |
64 | #define SMBBLKDAT (7 + i801_smba) | 74 | #define SMBBLKDAT (7 + i801_smba) |
65 | #define SMBPEC (8 + i801_smba) /* ICH4 only */ | 75 | #define SMBPEC (8 + i801_smba) /* ICH3 and later */ |
66 | #define SMBAUXSTS (12 + i801_smba) /* ICH4 only */ | 76 | #define SMBAUXSTS (12 + i801_smba) /* ICH4 and later */ |
67 | #define SMBAUXCTL (13 + i801_smba) /* ICH4 only */ | 77 | #define SMBAUXCTL (13 + i801_smba) /* ICH4 and later */ |
68 | 78 | ||
69 | /* PCI Address Constants */ | 79 | /* PCI Address Constants */ |
70 | #define SMBBAR 4 | 80 | #define SMBBAR 4 |
@@ -91,13 +101,13 @@ | |||
91 | #define I801_BYTE 0x04 | 101 | #define I801_BYTE 0x04 |
92 | #define I801_BYTE_DATA 0x08 | 102 | #define I801_BYTE_DATA 0x08 |
93 | #define I801_WORD_DATA 0x0C | 103 | #define I801_WORD_DATA 0x0C |
94 | #define I801_PROC_CALL 0x10 /* later chips only, unimplemented */ | 104 | #define I801_PROC_CALL 0x10 /* unimplemented */ |
95 | #define I801_BLOCK_DATA 0x14 | 105 | #define I801_BLOCK_DATA 0x14 |
96 | #define I801_I2C_BLOCK_DATA 0x18 /* unimplemented */ | 106 | #define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */ |
97 | #define I801_BLOCK_LAST 0x34 | 107 | #define I801_BLOCK_LAST 0x34 |
98 | #define I801_I2C_BLOCK_LAST 0x38 /* unimplemented */ | 108 | #define I801_I2C_BLOCK_LAST 0x38 /* ICH5 and later */ |
99 | #define I801_START 0x40 | 109 | #define I801_START 0x40 |
100 | #define I801_PEC_EN 0x80 /* ICH4 only */ | 110 | #define I801_PEC_EN 0x80 /* ICH3 and later */ |
101 | 111 | ||
102 | /* I801 Hosts Status register bits */ | 112 | /* I801 Hosts Status register bits */ |
103 | #define SMBHSTSTS_BYTE_DONE 0x80 | 113 | #define SMBHSTSTS_BYTE_DONE 0x80 |
@@ -113,7 +123,12 @@ static unsigned long i801_smba; | |||
113 | static unsigned char i801_original_hstcfg; | 123 | static unsigned char i801_original_hstcfg; |
114 | static struct pci_driver i801_driver; | 124 | static struct pci_driver i801_driver; |
115 | static struct pci_dev *I801_dev; | 125 | static struct pci_dev *I801_dev; |
116 | static int isich4; | 126 | |
127 | #define FEATURE_SMBUS_PEC (1 << 0) | ||
128 | #define FEATURE_BLOCK_BUFFER (1 << 1) | ||
129 | #define FEATURE_BLOCK_PROC (1 << 2) | ||
130 | #define FEATURE_I2C_BLOCK_READ (1 << 3) | ||
131 | static unsigned int i801_features; | ||
117 | 132 | ||
118 | static int i801_transaction(int xact) | 133 | static int i801_transaction(int xact) |
119 | { | 134 | { |
@@ -242,7 +257,8 @@ static int i801_block_transaction_by_block(union i2c_smbus_data *data, | |||
242 | } | 257 | } |
243 | 258 | ||
244 | static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, | 259 | static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, |
245 | char read_write, int hwpec) | 260 | char read_write, int command, |
261 | int hwpec) | ||
246 | { | 262 | { |
247 | int i, len; | 263 | int i, len; |
248 | int smbcmd; | 264 | int smbcmd; |
@@ -259,16 +275,24 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, | |||
259 | } | 275 | } |
260 | 276 | ||
261 | for (i = 1; i <= len; i++) { | 277 | for (i = 1; i <= len; i++) { |
262 | if (i == len && read_write == I2C_SMBUS_READ) | 278 | if (i == len && read_write == I2C_SMBUS_READ) { |
263 | smbcmd = I801_BLOCK_LAST; | 279 | if (command == I2C_SMBUS_I2C_BLOCK_DATA) |
264 | else | 280 | smbcmd = I801_I2C_BLOCK_LAST; |
265 | smbcmd = I801_BLOCK_DATA; | 281 | else |
282 | smbcmd = I801_BLOCK_LAST; | ||
283 | } else { | ||
284 | if (command == I2C_SMBUS_I2C_BLOCK_DATA | ||
285 | && read_write == I2C_SMBUS_READ) | ||
286 | smbcmd = I801_I2C_BLOCK_DATA; | ||
287 | else | ||
288 | smbcmd = I801_BLOCK_DATA; | ||
289 | } | ||
266 | outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT); | 290 | outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT); |
267 | 291 | ||
268 | dev_dbg(&I801_dev->dev, "Block (pre %d): CNT=%02x, CMD=%02x, " | 292 | dev_dbg(&I801_dev->dev, "Block (pre %d): CNT=%02x, CMD=%02x, " |
269 | "ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i, | 293 | "ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i, |
270 | inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), | 294 | inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), |
271 | inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT)); | 295 | inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT)); |
272 | 296 | ||
273 | /* Make sure the SMBus host is ready to start transmitting */ | 297 | /* Make sure the SMBus host is ready to start transmitting */ |
274 | temp = inb_p(SMBHSTSTS); | 298 | temp = inb_p(SMBHSTSTS); |
@@ -332,7 +356,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, | |||
332 | dev_dbg(&I801_dev->dev, "Error: no response!\n"); | 356 | dev_dbg(&I801_dev->dev, "Error: no response!\n"); |
333 | } | 357 | } |
334 | 358 | ||
335 | if (i == 1 && read_write == I2C_SMBUS_READ) { | 359 | if (i == 1 && read_write == I2C_SMBUS_READ |
360 | && command != I2C_SMBUS_I2C_BLOCK_DATA) { | ||
336 | len = inb_p(SMBHSTDAT0); | 361 | len = inb_p(SMBHSTDAT0); |
337 | if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) | 362 | if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) |
338 | return -1; | 363 | return -1; |
@@ -353,9 +378,9 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, | |||
353 | temp); | 378 | temp); |
354 | } | 379 | } |
355 | dev_dbg(&I801_dev->dev, "Block (post %d): CNT=%02x, CMD=%02x, " | 380 | dev_dbg(&I801_dev->dev, "Block (post %d): CNT=%02x, CMD=%02x, " |
356 | "ADD=%02x, DAT0=%02x, BLKDAT=%02x\n", i, | 381 | "ADD=%02x, DAT0=%02x, DAT1=%02x, BLKDAT=%02x\n", i, |
357 | inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), | 382 | inb_p(SMBHSTCNT), inb_p(SMBHSTCMD), inb_p(SMBHSTADD), |
358 | inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT)); | 383 | inb_p(SMBHSTDAT0), inb_p(SMBHSTDAT1), inb_p(SMBBLKDAT)); |
359 | 384 | ||
360 | if (result < 0) | 385 | if (result < 0) |
361 | return result; | 386 | return result; |
@@ -384,33 +409,38 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write, | |||
384 | pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc); | 409 | pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc); |
385 | pci_write_config_byte(I801_dev, SMBHSTCFG, | 410 | pci_write_config_byte(I801_dev, SMBHSTCFG, |
386 | hostc | SMBHSTCFG_I2C_EN); | 411 | hostc | SMBHSTCFG_I2C_EN); |
387 | } else { | 412 | } else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) { |
388 | dev_err(&I801_dev->dev, | 413 | dev_err(&I801_dev->dev, |
389 | "I2C_SMBUS_I2C_BLOCK_READ not DB!\n"); | 414 | "I2C block read is unsupported!\n"); |
390 | return -1; | 415 | return -1; |
391 | } | 416 | } |
392 | } | 417 | } |
393 | 418 | ||
394 | if (read_write == I2C_SMBUS_WRITE) { | 419 | if (read_write == I2C_SMBUS_WRITE |
420 | || command == I2C_SMBUS_I2C_BLOCK_DATA) { | ||
395 | if (data->block[0] < 1) | 421 | if (data->block[0] < 1) |
396 | data->block[0] = 1; | 422 | data->block[0] = 1; |
397 | if (data->block[0] > I2C_SMBUS_BLOCK_MAX) | 423 | if (data->block[0] > I2C_SMBUS_BLOCK_MAX) |
398 | data->block[0] = I2C_SMBUS_BLOCK_MAX; | 424 | data->block[0] = I2C_SMBUS_BLOCK_MAX; |
399 | } else { | 425 | } else { |
400 | data->block[0] = 32; /* max for reads */ | 426 | data->block[0] = 32; /* max for SMBus block reads */ |
401 | } | 427 | } |
402 | 428 | ||
403 | if (isich4 && i801_set_block_buffer_mode() == 0 ) | 429 | if ((i801_features & FEATURE_BLOCK_BUFFER) |
430 | && !(command == I2C_SMBUS_I2C_BLOCK_DATA | ||
431 | && read_write == I2C_SMBUS_READ) | ||
432 | && i801_set_block_buffer_mode() == 0) | ||
404 | result = i801_block_transaction_by_block(data, read_write, | 433 | result = i801_block_transaction_by_block(data, read_write, |
405 | hwpec); | 434 | hwpec); |
406 | else | 435 | else |
407 | result = i801_block_transaction_byte_by_byte(data, read_write, | 436 | result = i801_block_transaction_byte_by_byte(data, read_write, |
408 | hwpec); | 437 | command, hwpec); |
409 | 438 | ||
410 | if (result == 0 && hwpec) | 439 | if (result == 0 && hwpec) |
411 | i801_wait_hwpec(); | 440 | i801_wait_hwpec(); |
412 | 441 | ||
413 | if (command == I2C_SMBUS_I2C_BLOCK_DATA) { | 442 | if (command == I2C_SMBUS_I2C_BLOCK_DATA |
443 | && read_write == I2C_SMBUS_WRITE) { | ||
414 | /* restore saved configuration register value */ | 444 | /* restore saved configuration register value */ |
415 | pci_write_config_byte(I801_dev, SMBHSTCFG, hostc); | 445 | pci_write_config_byte(I801_dev, SMBHSTCFG, hostc); |
416 | } | 446 | } |
@@ -426,7 +456,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr, | |||
426 | int block = 0; | 456 | int block = 0; |
427 | int ret, xact = 0; | 457 | int ret, xact = 0; |
428 | 458 | ||
429 | hwpec = isich4 && (flags & I2C_CLIENT_PEC) | 459 | hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) |
430 | && size != I2C_SMBUS_QUICK | 460 | && size != I2C_SMBUS_QUICK |
431 | && size != I2C_SMBUS_I2C_BLOCK_DATA; | 461 | && size != I2C_SMBUS_I2C_BLOCK_DATA; |
432 | 462 | ||
@@ -462,12 +492,23 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr, | |||
462 | xact = I801_WORD_DATA; | 492 | xact = I801_WORD_DATA; |
463 | break; | 493 | break; |
464 | case I2C_SMBUS_BLOCK_DATA: | 494 | case I2C_SMBUS_BLOCK_DATA: |
465 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
466 | outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), | 495 | outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), |
467 | SMBHSTADD); | 496 | SMBHSTADD); |
468 | outb_p(command, SMBHSTCMD); | 497 | outb_p(command, SMBHSTCMD); |
469 | block = 1; | 498 | block = 1; |
470 | break; | 499 | break; |
500 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
501 | /* NB: page 240 of ICH5 datasheet shows that the R/#W | ||
502 | * bit should be cleared here, even when reading */ | ||
503 | outb_p((addr & 0x7f) << 1, SMBHSTADD); | ||
504 | if (read_write == I2C_SMBUS_READ) { | ||
505 | /* NB: page 240 of ICH5 datasheet also shows | ||
506 | * that DATA1 is the cmd field when reading */ | ||
507 | outb_p(command, SMBHSTDAT1); | ||
508 | } else | ||
509 | outb_p(command, SMBHSTCMD); | ||
510 | block = 1; | ||
511 | break; | ||
471 | case I2C_SMBUS_PROC_CALL: | 512 | case I2C_SMBUS_PROC_CALL: |
472 | default: | 513 | default: |
473 | dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size); | 514 | dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size); |
@@ -487,7 +528,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr, | |||
487 | /* Some BIOSes don't like it when PEC is enabled at reboot or resume | 528 | /* Some BIOSes don't like it when PEC is enabled at reboot or resume |
488 | time, so we forcibly disable it after every transaction. Turn off | 529 | time, so we forcibly disable it after every transaction. Turn off |
489 | E32B for the same reason. */ | 530 | E32B for the same reason. */ |
490 | if (hwpec) | 531 | if (hwpec || block) |
491 | outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), | 532 | outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), |
492 | SMBAUXCTL); | 533 | SMBAUXCTL); |
493 | 534 | ||
@@ -514,9 +555,11 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr, | |||
514 | static u32 i801_func(struct i2c_adapter *adapter) | 555 | static u32 i801_func(struct i2c_adapter *adapter) |
515 | { | 556 | { |
516 | return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | | 557 | return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | |
517 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | | 558 | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | |
518 | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | 559 | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | |
519 | | (isich4 ? I2C_FUNC_SMBUS_PEC : 0); | 560 | ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | |
561 | ((i801_features & FEATURE_I2C_BLOCK_READ) ? | ||
562 | I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0); | ||
520 | } | 563 | } |
521 | 564 | ||
522 | static const struct i2c_algorithm smbus_algorithm = { | 565 | static const struct i2c_algorithm smbus_algorithm = { |
@@ -556,8 +599,8 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
556 | int err; | 599 | int err; |
557 | 600 | ||
558 | I801_dev = dev; | 601 | I801_dev = dev; |
602 | i801_features = 0; | ||
559 | switch (dev->device) { | 603 | switch (dev->device) { |
560 | case PCI_DEVICE_ID_INTEL_82801DB_3: | ||
561 | case PCI_DEVICE_ID_INTEL_82801EB_3: | 604 | case PCI_DEVICE_ID_INTEL_82801EB_3: |
562 | case PCI_DEVICE_ID_INTEL_ESB_4: | 605 | case PCI_DEVICE_ID_INTEL_ESB_4: |
563 | case PCI_DEVICE_ID_INTEL_ICH6_16: | 606 | case PCI_DEVICE_ID_INTEL_ICH6_16: |
@@ -565,11 +608,13 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
565 | case PCI_DEVICE_ID_INTEL_ESB2_17: | 608 | case PCI_DEVICE_ID_INTEL_ESB2_17: |
566 | case PCI_DEVICE_ID_INTEL_ICH8_5: | 609 | case PCI_DEVICE_ID_INTEL_ICH8_5: |
567 | case PCI_DEVICE_ID_INTEL_ICH9_6: | 610 | case PCI_DEVICE_ID_INTEL_ICH9_6: |
611 | i801_features |= FEATURE_I2C_BLOCK_READ; | ||
612 | /* fall through */ | ||
613 | case PCI_DEVICE_ID_INTEL_82801DB_3: | ||
568 | case PCI_DEVICE_ID_INTEL_TOLAPAI_1: | 614 | case PCI_DEVICE_ID_INTEL_TOLAPAI_1: |
569 | isich4 = 1; | 615 | i801_features |= FEATURE_SMBUS_PEC; |
616 | i801_features |= FEATURE_BLOCK_BUFFER; | ||
570 | break; | 617 | break; |
571 | default: | ||
572 | isich4 = 0; | ||
573 | } | 618 | } |
574 | 619 | ||
575 | err = pci_enable_device(dev); | 620 | err = pci_enable_device(dev); |
@@ -610,6 +655,11 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id | |||
610 | else | 655 | else |
611 | dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n"); | 656 | dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n"); |
612 | 657 | ||
658 | /* Clear special mode bits */ | ||
659 | if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) | ||
660 | outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), | ||
661 | SMBAUXCTL); | ||
662 | |||
613 | /* set up the sysfs linkage to our parent device */ | 663 | /* set up the sysfs linkage to our parent device */ |
614 | i801_adapter.dev.parent = &dev->dev; | 664 | i801_adapter.dev.parent = &dev->dev; |
615 | 665 | ||
@@ -678,9 +728,8 @@ static void __exit i2c_i801_exit(void) | |||
678 | pci_unregister_driver(&i801_driver); | 728 | pci_unregister_driver(&i801_driver); |
679 | } | 729 | } |
680 | 730 | ||
681 | MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, " | 731 | MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, " |
682 | "Philip Edelbrock <phil@netroedge.com>, " | 732 | "Jean Delvare <khali@linux-fr.org>"); |
683 | "and Mark D. Studebaker <mdsxyz123@yahoo.com>"); | ||
684 | MODULE_DESCRIPTION("I801 SMBus driver"); | 733 | MODULE_DESCRIPTION("I801 SMBus driver"); |
685 | MODULE_LICENSE("GPL"); | 734 | MODULE_LICENSE("GPL"); |
686 | 735 | ||
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 9b43ff7270d0..7c7eb0cfeceb 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright (c) 2003, 2004 Zultys Technologies. | 6 | * Copyright (c) 2003, 2004 Zultys Technologies. |
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | 7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> |
8 | * | 8 | * |
9 | * Based on original work by | 9 | * Based on original work by |
10 | * Ian DaSilva <idasilva@mvista.com> | 10 | * Ian DaSilva <idasilva@mvista.com> |
11 | * Armin Kuster <akuster@mvista.com> | 11 | * Armin Kuster <akuster@mvista.com> |
12 | * Matt Porter <mporter@mvista.com> | 12 | * Matt Porter <mporter@mvista.com> |
@@ -86,8 +86,8 @@ static void dump_iic_regs(const char* header, struct ibm_iic_private* dev) | |||
86 | KERN_DEBUG " sts = 0x%02x, extsts = 0x%02x\n" | 86 | KERN_DEBUG " sts = 0x%02x, extsts = 0x%02x\n" |
87 | KERN_DEBUG " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" | 87 | KERN_DEBUG " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" |
88 | KERN_DEBUG " xtcntlss = 0x%02x, directcntl = 0x%02x\n", | 88 | KERN_DEBUG " xtcntlss = 0x%02x, directcntl = 0x%02x\n", |
89 | in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), | 89 | in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), |
90 | in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), | 90 | in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), |
91 | in_8(&iic->xtcntlss), in_8(&iic->directcntl)); | 91 | in_8(&iic->xtcntlss), in_8(&iic->directcntl)); |
92 | } | 92 | } |
93 | # define DUMP_REGS(h,dev) dump_iic_regs((h),(dev)) | 93 | # define DUMP_REGS(h,dev) dump_iic_regs((h),(dev)) |
@@ -125,7 +125,7 @@ static inline void iic_interrupt_mode(struct ibm_iic_private* dev, int enable) | |||
125 | { | 125 | { |
126 | out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); | 126 | out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * Initialize IIC interface. | 130 | * Initialize IIC interface. |
131 | */ | 131 | */ |
@@ -134,7 +134,7 @@ static void iic_dev_init(struct ibm_iic_private* dev) | |||
134 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 134 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
135 | 135 | ||
136 | DBG("%d: init\n", dev->idx); | 136 | DBG("%d: init\n", dev->idx); |
137 | 137 | ||
138 | /* Clear master address */ | 138 | /* Clear master address */ |
139 | out_8(&iic->lmadr, 0); | 139 | out_8(&iic->lmadr, 0); |
140 | out_8(&iic->hmadr, 0); | 140 | out_8(&iic->hmadr, 0); |
@@ -160,7 +160,7 @@ static void iic_dev_init(struct ibm_iic_private* dev) | |||
160 | 160 | ||
161 | /* Clear control register */ | 161 | /* Clear control register */ |
162 | out_8(&iic->cntl, 0); | 162 | out_8(&iic->cntl, 0); |
163 | 163 | ||
164 | /* Enable interrupts if possible */ | 164 | /* Enable interrupts if possible */ |
165 | iic_interrupt_mode(dev, dev->irq >= 0); | 165 | iic_interrupt_mode(dev, dev->irq >= 0); |
166 | 166 | ||
@@ -171,7 +171,7 @@ static void iic_dev_init(struct ibm_iic_private* dev) | |||
171 | DUMP_REGS("iic_init", dev); | 171 | DUMP_REGS("iic_init", dev); |
172 | } | 172 | } |
173 | 173 | ||
174 | /* | 174 | /* |
175 | * Reset IIC interface | 175 | * Reset IIC interface |
176 | */ | 176 | */ |
177 | static void iic_dev_reset(struct ibm_iic_private* dev) | 177 | static void iic_dev_reset(struct ibm_iic_private* dev) |
@@ -179,42 +179,42 @@ static void iic_dev_reset(struct ibm_iic_private* dev) | |||
179 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 179 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
180 | int i; | 180 | int i; |
181 | u8 dc; | 181 | u8 dc; |
182 | 182 | ||
183 | DBG("%d: soft reset\n", dev->idx); | 183 | DBG("%d: soft reset\n", dev->idx); |
184 | DUMP_REGS("reset", dev); | 184 | DUMP_REGS("reset", dev); |
185 | 185 | ||
186 | /* Place chip in the reset state */ | 186 | /* Place chip in the reset state */ |
187 | out_8(&iic->xtcntlss, XTCNTLSS_SRST); | 187 | out_8(&iic->xtcntlss, XTCNTLSS_SRST); |
188 | 188 | ||
189 | /* Check if bus is free */ | 189 | /* Check if bus is free */ |
190 | dc = in_8(&iic->directcntl); | 190 | dc = in_8(&iic->directcntl); |
191 | if (!DIRCTNL_FREE(dc)){ | 191 | if (!DIRCTNL_FREE(dc)){ |
192 | DBG("%d: trying to regain bus control\n", dev->idx); | 192 | DBG("%d: trying to regain bus control\n", dev->idx); |
193 | 193 | ||
194 | /* Try to set bus free state */ | 194 | /* Try to set bus free state */ |
195 | out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); | 195 | out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); |
196 | 196 | ||
197 | /* Wait until we regain bus control */ | 197 | /* Wait until we regain bus control */ |
198 | for (i = 0; i < 100; ++i){ | 198 | for (i = 0; i < 100; ++i){ |
199 | dc = in_8(&iic->directcntl); | 199 | dc = in_8(&iic->directcntl); |
200 | if (DIRCTNL_FREE(dc)) | 200 | if (DIRCTNL_FREE(dc)) |
201 | break; | 201 | break; |
202 | 202 | ||
203 | /* Toggle SCL line */ | 203 | /* Toggle SCL line */ |
204 | dc ^= DIRCNTL_SCC; | 204 | dc ^= DIRCNTL_SCC; |
205 | out_8(&iic->directcntl, dc); | 205 | out_8(&iic->directcntl, dc); |
206 | udelay(10); | 206 | udelay(10); |
207 | dc ^= DIRCNTL_SCC; | 207 | dc ^= DIRCNTL_SCC; |
208 | out_8(&iic->directcntl, dc); | 208 | out_8(&iic->directcntl, dc); |
209 | 209 | ||
210 | /* be nice */ | 210 | /* be nice */ |
211 | cond_resched(); | 211 | cond_resched(); |
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Remove reset */ | 215 | /* Remove reset */ |
216 | out_8(&iic->xtcntlss, 0); | 216 | out_8(&iic->xtcntlss, 0); |
217 | 217 | ||
218 | /* Reinitialize interface */ | 218 | /* Reinitialize interface */ |
219 | iic_dev_init(dev); | 219 | iic_dev_init(dev); |
220 | } | 220 | } |
@@ -324,14 +324,14 @@ static irqreturn_t iic_handler(int irq, void *dev_id) | |||
324 | { | 324 | { |
325 | struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id; | 325 | struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id; |
326 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 326 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
327 | 327 | ||
328 | DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", | 328 | DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", |
329 | dev->idx, in_8(&iic->sts), in_8(&iic->extsts)); | 329 | dev->idx, in_8(&iic->sts), in_8(&iic->extsts)); |
330 | 330 | ||
331 | /* Acknowledge IRQ and wakeup iic_wait_for_tc */ | 331 | /* Acknowledge IRQ and wakeup iic_wait_for_tc */ |
332 | out_8(&iic->sts, STS_IRQA | STS_SCMP); | 332 | out_8(&iic->sts, STS_IRQA | STS_SCMP); |
333 | wake_up_interruptible(&dev->wq); | 333 | wake_up_interruptible(&dev->wq); |
334 | 334 | ||
335 | return IRQ_HANDLED; | 335 | return IRQ_HANDLED; |
336 | } | 336 | } |
337 | 337 | ||
@@ -341,19 +341,19 @@ static irqreturn_t iic_handler(int irq, void *dev_id) | |||
341 | */ | 341 | */ |
342 | static int iic_xfer_result(struct ibm_iic_private* dev) | 342 | static int iic_xfer_result(struct ibm_iic_private* dev) |
343 | { | 343 | { |
344 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 344 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
345 | 345 | ||
346 | if (unlikely(in_8(&iic->sts) & STS_ERR)){ | 346 | if (unlikely(in_8(&iic->sts) & STS_ERR)){ |
347 | DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, | 347 | DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, |
348 | in_8(&iic->extsts)); | 348 | in_8(&iic->extsts)); |
349 | 349 | ||
350 | /* Clear errors and possible pending IRQs */ | 350 | /* Clear errors and possible pending IRQs */ |
351 | out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | | 351 | out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | |
352 | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); | 352 | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); |
353 | 353 | ||
354 | /* Flush master data buffer */ | 354 | /* Flush master data buffer */ |
355 | out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); | 355 | out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); |
356 | 356 | ||
357 | /* Is bus free? | 357 | /* Is bus free? |
358 | * If error happened during combined xfer | 358 | * If error happened during combined xfer |
359 | * IIC interface is usually stuck in some strange | 359 | * IIC interface is usually stuck in some strange |
@@ -376,11 +376,11 @@ static void iic_abort_xfer(struct ibm_iic_private* dev) | |||
376 | { | 376 | { |
377 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 377 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
378 | unsigned long x; | 378 | unsigned long x; |
379 | 379 | ||
380 | DBG("%d: iic_abort_xfer\n", dev->idx); | 380 | DBG("%d: iic_abort_xfer\n", dev->idx); |
381 | 381 | ||
382 | out_8(&iic->cntl, CNTL_HMT); | 382 | out_8(&iic->cntl, CNTL_HMT); |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Wait for the abort command to complete. | 385 | * Wait for the abort command to complete. |
386 | * It's not worth to be optimized, just poll (timeout >= 1 tick) | 386 | * It's not worth to be optimized, just poll (timeout >= 1 tick) |
@@ -405,13 +405,13 @@ static void iic_abort_xfer(struct ibm_iic_private* dev) | |||
405 | * Returns the number of transferred bytes or error (<0) | 405 | * Returns the number of transferred bytes or error (<0) |
406 | */ | 406 | */ |
407 | static int iic_wait_for_tc(struct ibm_iic_private* dev){ | 407 | static int iic_wait_for_tc(struct ibm_iic_private* dev){ |
408 | 408 | ||
409 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 409 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
410 | int ret = 0; | 410 | int ret = 0; |
411 | 411 | ||
412 | if (dev->irq >= 0){ | 412 | if (dev->irq >= 0){ |
413 | /* Interrupt mode */ | 413 | /* Interrupt mode */ |
414 | ret = wait_event_interruptible_timeout(dev->wq, | 414 | ret = wait_event_interruptible_timeout(dev->wq, |
415 | !(in_8(&iic->sts) & STS_PT), dev->adap.timeout * HZ); | 415 | !(in_8(&iic->sts) & STS_PT), dev->adap.timeout * HZ); |
416 | 416 | ||
417 | if (unlikely(ret < 0)) | 417 | if (unlikely(ret < 0)) |
@@ -424,37 +424,37 @@ static int iic_wait_for_tc(struct ibm_iic_private* dev){ | |||
424 | else { | 424 | else { |
425 | /* Polling mode */ | 425 | /* Polling mode */ |
426 | unsigned long x = jiffies + dev->adap.timeout * HZ; | 426 | unsigned long x = jiffies + dev->adap.timeout * HZ; |
427 | 427 | ||
428 | while (in_8(&iic->sts) & STS_PT){ | 428 | while (in_8(&iic->sts) & STS_PT){ |
429 | if (unlikely(time_after(jiffies, x))){ | 429 | if (unlikely(time_after(jiffies, x))){ |
430 | DBG("%d: poll timeout\n", dev->idx); | 430 | DBG("%d: poll timeout\n", dev->idx); |
431 | ret = -ETIMEDOUT; | 431 | ret = -ETIMEDOUT; |
432 | break; | 432 | break; |
433 | } | 433 | } |
434 | 434 | ||
435 | if (unlikely(signal_pending(current))){ | 435 | if (unlikely(signal_pending(current))){ |
436 | DBG("%d: poll interrupted\n", dev->idx); | 436 | DBG("%d: poll interrupted\n", dev->idx); |
437 | ret = -ERESTARTSYS; | 437 | ret = -ERESTARTSYS; |
438 | break; | 438 | break; |
439 | } | 439 | } |
440 | schedule(); | 440 | schedule(); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | 443 | ||
444 | if (unlikely(ret < 0)) | 444 | if (unlikely(ret < 0)) |
445 | iic_abort_xfer(dev); | 445 | iic_abort_xfer(dev); |
446 | else | 446 | else |
447 | ret = iic_xfer_result(dev); | 447 | ret = iic_xfer_result(dev); |
448 | 448 | ||
449 | DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret); | 449 | DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret); |
450 | 450 | ||
451 | return ret; | 451 | return ret; |
452 | } | 452 | } |
453 | 453 | ||
454 | /* | 454 | /* |
455 | * Low level master transfer routine | 455 | * Low level master transfer routine |
456 | */ | 456 | */ |
457 | static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, | 457 | static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, |
458 | int combined_xfer) | 458 | int combined_xfer) |
459 | { | 459 | { |
460 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 460 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
@@ -465,48 +465,48 @@ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, | |||
465 | u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT; | 465 | u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT; |
466 | if (pm->flags & I2C_M_RD) | 466 | if (pm->flags & I2C_M_RD) |
467 | cntl |= CNTL_RW; | 467 | cntl |= CNTL_RW; |
468 | 468 | ||
469 | loops = (len + 3) / 4; | 469 | loops = (len + 3) / 4; |
470 | for (i = 0; i < loops; ++i, len -= 4){ | 470 | for (i = 0; i < loops; ++i, len -= 4){ |
471 | int count = len > 4 ? 4 : len; | 471 | int count = len > 4 ? 4 : len; |
472 | u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT); | 472 | u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT); |
473 | 473 | ||
474 | if (!(cntl & CNTL_RW)) | 474 | if (!(cntl & CNTL_RW)) |
475 | for (j = 0; j < count; ++j) | 475 | for (j = 0; j < count; ++j) |
476 | out_8((void __iomem *)&iic->mdbuf, *buf++); | 476 | out_8((void __iomem *)&iic->mdbuf, *buf++); |
477 | 477 | ||
478 | if (i < loops - 1) | 478 | if (i < loops - 1) |
479 | cmd |= CNTL_CHT; | 479 | cmd |= CNTL_CHT; |
480 | else if (combined_xfer) | 480 | else if (combined_xfer) |
481 | cmd |= CNTL_RPST; | 481 | cmd |= CNTL_RPST; |
482 | 482 | ||
483 | DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd); | 483 | DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd); |
484 | 484 | ||
485 | /* Start transfer */ | 485 | /* Start transfer */ |
486 | out_8(&iic->cntl, cmd); | 486 | out_8(&iic->cntl, cmd); |
487 | 487 | ||
488 | /* Wait for completion */ | 488 | /* Wait for completion */ |
489 | ret = iic_wait_for_tc(dev); | 489 | ret = iic_wait_for_tc(dev); |
490 | 490 | ||
491 | if (unlikely(ret < 0)) | 491 | if (unlikely(ret < 0)) |
492 | break; | 492 | break; |
493 | else if (unlikely(ret != count)){ | 493 | else if (unlikely(ret != count)){ |
494 | DBG("%d: xfer_bytes, requested %d, transfered %d\n", | 494 | DBG("%d: xfer_bytes, requested %d, transfered %d\n", |
495 | dev->idx, count, ret); | 495 | dev->idx, count, ret); |
496 | 496 | ||
497 | /* If it's not a last part of xfer, abort it */ | 497 | /* If it's not a last part of xfer, abort it */ |
498 | if (combined_xfer || (i < loops - 1)) | 498 | if (combined_xfer || (i < loops - 1)) |
499 | iic_abort_xfer(dev); | 499 | iic_abort_xfer(dev); |
500 | 500 | ||
501 | ret = -EREMOTEIO; | 501 | ret = -EREMOTEIO; |
502 | break; | 502 | break; |
503 | } | 503 | } |
504 | 504 | ||
505 | if (cntl & CNTL_RW) | 505 | if (cntl & CNTL_RW) |
506 | for (j = 0; j < count; ++j) | 506 | for (j = 0; j < count; ++j) |
507 | *buf++ = in_8((void __iomem *)&iic->mdbuf); | 507 | *buf++ = in_8((void __iomem *)&iic->mdbuf); |
508 | } | 508 | } |
509 | 509 | ||
510 | return ret > 0 ? 0 : ret; | 510 | return ret > 0 ? 0 : ret; |
511 | } | 511 | } |
512 | 512 | ||
@@ -517,10 +517,10 @@ static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg) | |||
517 | { | 517 | { |
518 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 518 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
519 | u16 addr = msg->addr; | 519 | u16 addr = msg->addr; |
520 | 520 | ||
521 | DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, | 521 | DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, |
522 | addr, msg->flags & I2C_M_TEN ? 10 : 7); | 522 | addr, msg->flags & I2C_M_TEN ? 10 : 7); |
523 | 523 | ||
524 | if (msg->flags & I2C_M_TEN){ | 524 | if (msg->flags & I2C_M_TEN){ |
525 | out_8(&iic->cntl, CNTL_AMD); | 525 | out_8(&iic->cntl, CNTL_AMD); |
526 | out_8(&iic->lmadr, addr); | 526 | out_8(&iic->lmadr, addr); |
@@ -537,15 +537,15 @@ static inline int iic_invalid_address(const struct i2c_msg* p) | |||
537 | return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f)); | 537 | return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f)); |
538 | } | 538 | } |
539 | 539 | ||
540 | static inline int iic_address_neq(const struct i2c_msg* p1, | 540 | static inline int iic_address_neq(const struct i2c_msg* p1, |
541 | const struct i2c_msg* p2) | 541 | const struct i2c_msg* p2) |
542 | { | 542 | { |
543 | return (p1->addr != p2->addr) | 543 | return (p1->addr != p2->addr) |
544 | || ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN)); | 544 | || ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN)); |
545 | } | 545 | } |
546 | 546 | ||
547 | /* | 547 | /* |
548 | * Generic master transfer entrypoint. | 548 | * Generic master transfer entrypoint. |
549 | * Returns the number of processed messages or error (<0) | 549 | * Returns the number of processed messages or error (<0) |
550 | */ | 550 | */ |
551 | static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | 551 | static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) |
@@ -553,20 +553,20 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | |||
553 | struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap)); | 553 | struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap)); |
554 | volatile struct iic_regs __iomem *iic = dev->vaddr; | 554 | volatile struct iic_regs __iomem *iic = dev->vaddr; |
555 | int i, ret = 0; | 555 | int i, ret = 0; |
556 | 556 | ||
557 | DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num); | 557 | DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num); |
558 | 558 | ||
559 | if (!num) | 559 | if (!num) |
560 | return 0; | 560 | return 0; |
561 | 561 | ||
562 | /* Check the sanity of the passed messages. | 562 | /* Check the sanity of the passed messages. |
563 | * Uhh, generic i2c layer is more suitable place for such code... | 563 | * Uhh, generic i2c layer is more suitable place for such code... |
564 | */ | 564 | */ |
565 | if (unlikely(iic_invalid_address(&msgs[0]))){ | 565 | if (unlikely(iic_invalid_address(&msgs[0]))){ |
566 | DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, | 566 | DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, |
567 | msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7); | 567 | msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7); |
568 | return -EINVAL; | 568 | return -EINVAL; |
569 | } | 569 | } |
570 | for (i = 0; i < num; ++i){ | 570 | for (i = 0; i < num; ++i){ |
571 | if (unlikely(msgs[i].len <= 0)){ | 571 | if (unlikely(msgs[i].len <= 0)){ |
572 | if (num == 1 && !msgs[0].len){ | 572 | if (num == 1 && !msgs[0].len){ |
@@ -576,7 +576,7 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | |||
576 | */ | 576 | */ |
577 | return iic_smbus_quick(dev, &msgs[0]); | 577 | return iic_smbus_quick(dev, &msgs[0]); |
578 | } | 578 | } |
579 | DBG("%d: invalid len %d in msg[%d]\n", dev->idx, | 579 | DBG("%d: invalid len %d in msg[%d]\n", dev->idx, |
580 | msgs[i].len, i); | 580 | msgs[i].len, i); |
581 | return -EINVAL; | 581 | return -EINVAL; |
582 | } | 582 | } |
@@ -585,34 +585,34 @@ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | |||
585 | return -EINVAL; | 585 | return -EINVAL; |
586 | } | 586 | } |
587 | } | 587 | } |
588 | 588 | ||
589 | /* Check bus state */ | 589 | /* Check bus state */ |
590 | if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){ | 590 | if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){ |
591 | DBG("%d: iic_xfer, bus is not free\n", dev->idx); | 591 | DBG("%d: iic_xfer, bus is not free\n", dev->idx); |
592 | 592 | ||
593 | /* Usually it means something serious has happend. | 593 | /* Usually it means something serious has happend. |
594 | * We *cannot* have unfinished previous transfer | 594 | * We *cannot* have unfinished previous transfer |
595 | * so it doesn't make any sense to try to stop it. | 595 | * so it doesn't make any sense to try to stop it. |
596 | * Probably we were not able to recover from the | 596 | * Probably we were not able to recover from the |
597 | * previous error. | 597 | * previous error. |
598 | * The only *reasonable* thing I can think of here | 598 | * The only *reasonable* thing I can think of here |
599 | * is soft reset. --ebs | 599 | * is soft reset. --ebs |
600 | */ | 600 | */ |
601 | iic_dev_reset(dev); | 601 | iic_dev_reset(dev); |
602 | 602 | ||
603 | if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ | 603 | if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ |
604 | DBG("%d: iic_xfer, bus is still not free\n", dev->idx); | 604 | DBG("%d: iic_xfer, bus is still not free\n", dev->idx); |
605 | return -EREMOTEIO; | 605 | return -EREMOTEIO; |
606 | } | 606 | } |
607 | } | 607 | } |
608 | else { | 608 | else { |
609 | /* Flush master data buffer (just in case) */ | 609 | /* Flush master data buffer (just in case) */ |
610 | out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); | 610 | out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); |
611 | } | 611 | } |
612 | 612 | ||
613 | /* Load slave address */ | 613 | /* Load slave address */ |
614 | iic_address(dev, &msgs[0]); | 614 | iic_address(dev, &msgs[0]); |
615 | 615 | ||
616 | /* Do real transfer */ | 616 | /* Do real transfer */ |
617 | for (i = 0; i < num && !ret; ++i) | 617 | for (i = 0; i < num && !ret; ++i) |
618 | ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1); | 618 | ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1); |
@@ -648,7 +648,7 @@ static inline u8 iic_clckdiv(unsigned int opb) | |||
648 | 648 | ||
649 | /* Convert to MHz */ | 649 | /* Convert to MHz */ |
650 | opb /= 1000000; | 650 | opb /= 1000000; |
651 | 651 | ||
652 | if (opb < 20 || opb > 150){ | 652 | if (opb < 20 || opb > 150){ |
653 | printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n", | 653 | printk(KERN_CRIT "ibm-iic: invalid OPB clock frequency %u MHz\n", |
654 | opb); | 654 | opb); |
@@ -666,7 +666,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
666 | struct i2c_adapter* adap; | 666 | struct i2c_adapter* adap; |
667 | struct ocp_func_iic_data* iic_data = ocp->def->additions; | 667 | struct ocp_func_iic_data* iic_data = ocp->def->additions; |
668 | int ret; | 668 | int ret; |
669 | 669 | ||
670 | if (!iic_data) | 670 | if (!iic_data) |
671 | printk(KERN_WARNING"ibm-iic%d: missing additional data!\n", | 671 | printk(KERN_WARNING"ibm-iic%d: missing additional data!\n", |
672 | ocp->def->index); | 672 | ocp->def->index); |
@@ -679,7 +679,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
679 | 679 | ||
680 | dev->idx = ocp->def->index; | 680 | dev->idx = ocp->def->index; |
681 | ocp_set_drvdata(ocp, dev); | 681 | ocp_set_drvdata(ocp, dev); |
682 | 682 | ||
683 | if (!request_mem_region(ocp->def->paddr, sizeof(struct iic_regs), | 683 | if (!request_mem_region(ocp->def->paddr, sizeof(struct iic_regs), |
684 | "ibm_iic")) { | 684 | "ibm_iic")) { |
685 | ret = -EBUSY; | 685 | ret = -EBUSY; |
@@ -692,7 +692,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
692 | ret = -ENXIO; | 692 | ret = -ENXIO; |
693 | goto fail2; | 693 | goto fail2; |
694 | } | 694 | } |
695 | 695 | ||
696 | init_waitqueue_head(&dev->wq); | 696 | init_waitqueue_head(&dev->wq); |
697 | 697 | ||
698 | dev->irq = iic_force_poll ? -1 : ocp->def->irq; | 698 | dev->irq = iic_force_poll ? -1 : ocp->def->irq; |
@@ -702,29 +702,29 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
702 | */ | 702 | */ |
703 | iic_interrupt_mode(dev, 0); | 703 | iic_interrupt_mode(dev, 0); |
704 | if (request_irq(dev->irq, iic_handler, 0, "IBM IIC", dev)){ | 704 | if (request_irq(dev->irq, iic_handler, 0, "IBM IIC", dev)){ |
705 | printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n", | 705 | printk(KERN_ERR "ibm-iic%d: request_irq %d failed\n", |
706 | dev->idx, dev->irq); | 706 | dev->idx, dev->irq); |
707 | /* Fallback to the polling mode */ | 707 | /* Fallback to the polling mode */ |
708 | dev->irq = -1; | 708 | dev->irq = -1; |
709 | } | 709 | } |
710 | } | 710 | } |
711 | 711 | ||
712 | if (dev->irq < 0) | 712 | if (dev->irq < 0) |
713 | printk(KERN_WARNING "ibm-iic%d: using polling mode\n", | 713 | printk(KERN_WARNING "ibm-iic%d: using polling mode\n", |
714 | dev->idx); | 714 | dev->idx); |
715 | 715 | ||
716 | /* Board specific settings */ | 716 | /* Board specific settings */ |
717 | dev->fast_mode = iic_force_fast ? 1 : (iic_data ? iic_data->fast_mode : 0); | 717 | dev->fast_mode = iic_force_fast ? 1 : (iic_data ? iic_data->fast_mode : 0); |
718 | 718 | ||
719 | /* clckdiv is the same for *all* IIC interfaces, | 719 | /* clckdiv is the same for *all* IIC interfaces, |
720 | * but I'd rather make a copy than introduce another global. --ebs | 720 | * but I'd rather make a copy than introduce another global. --ebs |
721 | */ | 721 | */ |
722 | dev->clckdiv = iic_clckdiv(ocp_sys_info.opb_bus_freq); | 722 | dev->clckdiv = iic_clckdiv(ocp_sys_info.opb_bus_freq); |
723 | DBG("%d: clckdiv = %d\n", dev->idx, dev->clckdiv); | 723 | DBG("%d: clckdiv = %d\n", dev->idx, dev->clckdiv); |
724 | 724 | ||
725 | /* Initialize IIC interface */ | 725 | /* Initialize IIC interface */ |
726 | iic_dev_init(dev); | 726 | iic_dev_init(dev); |
727 | 727 | ||
728 | /* Register it with i2c layer */ | 728 | /* Register it with i2c layer */ |
729 | adap = &dev->adap; | 729 | adap = &dev->adap; |
730 | adap->dev.parent = &ocp->dev; | 730 | adap->dev.parent = &ocp->dev; |
@@ -736,7 +736,6 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
736 | adap->client_register = NULL; | 736 | adap->client_register = NULL; |
737 | adap->client_unregister = NULL; | 737 | adap->client_unregister = NULL; |
738 | adap->timeout = 1; | 738 | adap->timeout = 1; |
739 | adap->retries = 1; | ||
740 | 739 | ||
741 | /* | 740 | /* |
742 | * If "dev->idx" is negative we consider it as zero. | 741 | * If "dev->idx" is negative we consider it as zero. |
@@ -750,24 +749,24 @@ static int __devinit iic_probe(struct ocp_device *ocp){ | |||
750 | dev->idx); | 749 | dev->idx); |
751 | goto fail; | 750 | goto fail; |
752 | } | 751 | } |
753 | 752 | ||
754 | printk(KERN_INFO "ibm-iic%d: using %s mode\n", dev->idx, | 753 | printk(KERN_INFO "ibm-iic%d: using %s mode\n", dev->idx, |
755 | dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); | 754 | dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); |
756 | 755 | ||
757 | return 0; | 756 | return 0; |
758 | 757 | ||
759 | fail: | 758 | fail: |
760 | if (dev->irq >= 0){ | 759 | if (dev->irq >= 0){ |
761 | iic_interrupt_mode(dev, 0); | 760 | iic_interrupt_mode(dev, 0); |
762 | free_irq(dev->irq, dev); | 761 | free_irq(dev->irq, dev); |
763 | } | 762 | } |
764 | 763 | ||
765 | iounmap(dev->vaddr); | 764 | iounmap(dev->vaddr); |
766 | fail2: | 765 | fail2: |
767 | release_mem_region(ocp->def->paddr, sizeof(struct iic_regs)); | 766 | release_mem_region(ocp->def->paddr, sizeof(struct iic_regs)); |
768 | fail1: | 767 | fail1: |
769 | ocp_set_drvdata(ocp, NULL); | 768 | ocp_set_drvdata(ocp, NULL); |
770 | kfree(dev); | 769 | kfree(dev); |
771 | return ret; | 770 | return ret; |
772 | } | 771 | } |
773 | 772 | ||
@@ -783,13 +782,13 @@ static void __devexit iic_remove(struct ocp_device *ocp) | |||
783 | dev->idx); | 782 | dev->idx); |
784 | /* That's *very* bad, just shutdown IRQ ... */ | 783 | /* That's *very* bad, just shutdown IRQ ... */ |
785 | if (dev->irq >= 0){ | 784 | if (dev->irq >= 0){ |
786 | iic_interrupt_mode(dev, 0); | 785 | iic_interrupt_mode(dev, 0); |
787 | free_irq(dev->irq, dev); | 786 | free_irq(dev->irq, dev); |
788 | dev->irq = -1; | 787 | dev->irq = -1; |
789 | } | 788 | } |
790 | } else { | 789 | } else { |
791 | if (dev->irq >= 0){ | 790 | if (dev->irq >= 0){ |
792 | iic_interrupt_mode(dev, 0); | 791 | iic_interrupt_mode(dev, 0); |
793 | free_irq(dev->irq, dev); | 792 | free_irq(dev->irq, dev); |
794 | } | 793 | } |
795 | iounmap(dev->vaddr); | 794 | iounmap(dev->vaddr); |
@@ -798,7 +797,7 @@ static void __devexit iic_remove(struct ocp_device *ocp) | |||
798 | } | 797 | } |
799 | } | 798 | } |
800 | 799 | ||
801 | static struct ocp_device_id ibm_iic_ids[] __devinitdata = | 800 | static struct ocp_device_id ibm_iic_ids[] __devinitdata = |
802 | { | 801 | { |
803 | { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_IIC }, | 802 | { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_IIC }, |
804 | { .vendor = OCP_VENDOR_INVALID } | 803 | { .vendor = OCP_VENDOR_INVALID } |
diff --git a/drivers/i2c/busses/i2c-ibm_iic.h b/drivers/i2c/busses/i2c-ibm_iic.h index 59d7b437f7ff..fdaa48292cb6 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.h +++ b/drivers/i2c/busses/i2c-ibm_iic.h | |||
@@ -2,11 +2,11 @@ | |||
2 | * drivers/i2c/busses/i2c-ibm_iic.h | 2 | * drivers/i2c/busses/i2c-ibm_iic.h |
3 | * | 3 | * |
4 | * Support for the IIC peripheral on IBM PPC 4xx | 4 | * Support for the IIC peripheral on IBM PPC 4xx |
5 | * | 5 | * |
6 | * Copyright (c) 2003 Zultys Technologies. | 6 | * Copyright (c) 2003 Zultys Technologies. |
7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> | 7 | * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> |
8 | * | 8 | * |
9 | * Based on original work by | 9 | * Based on original work by |
10 | * Ian DaSilva <idasilva@mvista.com> | 10 | * Ian DaSilva <idasilva@mvista.com> |
11 | * Armin Kuster <akuster@mvista.com> | 11 | * Armin Kuster <akuster@mvista.com> |
12 | * Matt Porter <mporter@mvista.com> | 12 | * Matt Porter <mporter@mvista.com> |
@@ -22,7 +22,7 @@ | |||
22 | #ifndef __I2C_IBM_IIC_H_ | 22 | #ifndef __I2C_IBM_IIC_H_ |
23 | #define __I2C_IBM_IIC_H_ | 23 | #define __I2C_IBM_IIC_H_ |
24 | 24 | ||
25 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
26 | 26 | ||
27 | struct iic_regs { | 27 | struct iic_regs { |
28 | u16 mdbuf; | 28 | u16 mdbuf; |
@@ -58,7 +58,7 @@ struct ibm_iic_private { | |||
58 | #define CNTL_TCT_MASK 0x30 | 58 | #define CNTL_TCT_MASK 0x30 |
59 | #define CNTL_TCT_SHIFT 4 | 59 | #define CNTL_TCT_SHIFT 4 |
60 | #define CNTL_RPST 0x08 | 60 | #define CNTL_RPST 0x08 |
61 | #define CNTL_CHT 0x04 | 61 | #define CNTL_CHT 0x04 |
62 | #define CNTL_RW 0x02 | 62 | #define CNTL_RW 0x02 |
63 | #define CNTL_PT 0x01 | 63 | #define CNTL_PT 0x01 |
64 | 64 | ||
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index c70146e4c2c0..ab41400c883e 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c | |||
@@ -490,7 +490,6 @@ iop3xx_i2c_probe(struct platform_device *pdev) | |||
490 | * Default values...should these come in from board code? | 490 | * Default values...should these come in from board code? |
491 | */ | 491 | */ |
492 | new_adapter->timeout = 100; | 492 | new_adapter->timeout = 100; |
493 | new_adapter->retries = 3; | ||
494 | new_adapter->algo = &iop3xx_i2c_algo; | 493 | new_adapter->algo = &iop3xx_i2c_algo; |
495 | 494 | ||
496 | init_waitqueue_head(&adapter_data->waitq); | 495 | init_waitqueue_head(&adapter_data->waitq); |
diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c deleted file mode 100644 index 069ed7f3b395..000000000000 --- a/drivers/i2c/busses/i2c-ixp4xx.c +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/i2c/busses/i2c-ixp4xx.c | ||
3 | * | ||
4 | * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have | ||
5 | * an on board I2C controller but provide 16 GPIO pins that are often | ||
6 | * used to create an I2C bus. This driver provides an i2c_adapter | ||
7 | * interface that plugs in under algo_bit and drives the GPIO pins | ||
8 | * as instructed by the alogorithm driver. | ||
9 | * | ||
10 | * Author: Deepak Saxena <dsaxena@plexity.net> | ||
11 | * | ||
12 | * Copyright (c) 2003-2004 MontaVista Software Inc. | ||
13 | * | ||
14 | * This file is licensed under the terms of the GNU General Public | ||
15 | * License version 2. This program is licensed "as is" without any | ||
16 | * warranty of any kind, whether express or implied. | ||
17 | * | ||
18 | * NOTE: Since different platforms will use different GPIO pins for | ||
19 | * I2C, this driver uses an IXP4xx-specific platform_data | ||
20 | * pointer to pass the GPIO numbers to the driver. This | ||
21 | * allows us to support all the different IXP4xx platforms | ||
22 | * w/o having to put #ifdefs in this driver. | ||
23 | * | ||
24 | * See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a | ||
25 | * device list and filling in the ixp4xx_i2c_pins data structure | ||
26 | * that is passed as the platform_data to this driver. | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/i2c.h> | ||
34 | #include <linux/i2c-algo-bit.h> | ||
35 | |||
36 | #include <asm/hardware.h> /* Pick up IXP4xx-specific bits */ | ||
37 | |||
38 | static inline int ixp4xx_scl_pin(void *data) | ||
39 | { | ||
40 | return ((struct ixp4xx_i2c_pins*)data)->scl_pin; | ||
41 | } | ||
42 | |||
43 | static inline int ixp4xx_sda_pin(void *data) | ||
44 | { | ||
45 | return ((struct ixp4xx_i2c_pins*)data)->sda_pin; | ||
46 | } | ||
47 | |||
48 | static void ixp4xx_bit_setscl(void *data, int val) | ||
49 | { | ||
50 | gpio_line_set(ixp4xx_scl_pin(data), 0); | ||
51 | gpio_line_config(ixp4xx_scl_pin(data), | ||
52 | val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); | ||
53 | } | ||
54 | |||
55 | static void ixp4xx_bit_setsda(void *data, int val) | ||
56 | { | ||
57 | gpio_line_set(ixp4xx_sda_pin(data), 0); | ||
58 | gpio_line_config(ixp4xx_sda_pin(data), | ||
59 | val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); | ||
60 | } | ||
61 | |||
62 | static int ixp4xx_bit_getscl(void *data) | ||
63 | { | ||
64 | int scl; | ||
65 | |||
66 | gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN ); | ||
67 | gpio_line_get(ixp4xx_scl_pin(data), &scl); | ||
68 | |||
69 | return scl; | ||
70 | } | ||
71 | |||
72 | static int ixp4xx_bit_getsda(void *data) | ||
73 | { | ||
74 | int sda; | ||
75 | |||
76 | gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN ); | ||
77 | gpio_line_get(ixp4xx_sda_pin(data), &sda); | ||
78 | |||
79 | return sda; | ||
80 | } | ||
81 | |||
82 | struct ixp4xx_i2c_data { | ||
83 | struct ixp4xx_i2c_pins *gpio_pins; | ||
84 | struct i2c_adapter adapter; | ||
85 | struct i2c_algo_bit_data algo_data; | ||
86 | }; | ||
87 | |||
88 | static int ixp4xx_i2c_remove(struct platform_device *plat_dev) | ||
89 | { | ||
90 | struct ixp4xx_i2c_data *drv_data = platform_get_drvdata(plat_dev); | ||
91 | |||
92 | platform_set_drvdata(plat_dev, NULL); | ||
93 | |||
94 | i2c_del_adapter(&drv_data->adapter); | ||
95 | |||
96 | kfree(drv_data); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static int ixp4xx_i2c_probe(struct platform_device *plat_dev) | ||
102 | { | ||
103 | int err; | ||
104 | struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data; | ||
105 | struct ixp4xx_i2c_data *drv_data = | ||
106 | kzalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL); | ||
107 | |||
108 | if(!drv_data) | ||
109 | return -ENOMEM; | ||
110 | |||
111 | drv_data->gpio_pins = gpio; | ||
112 | |||
113 | /* | ||
114 | * We could make a lot of these structures static, but | ||
115 | * certain platforms may have multiple GPIO-based I2C | ||
116 | * buses for various device domains, so we need per-device | ||
117 | * algo_data->data. | ||
118 | */ | ||
119 | drv_data->algo_data.data = gpio; | ||
120 | drv_data->algo_data.setsda = ixp4xx_bit_setsda; | ||
121 | drv_data->algo_data.setscl = ixp4xx_bit_setscl; | ||
122 | drv_data->algo_data.getsda = ixp4xx_bit_getsda; | ||
123 | drv_data->algo_data.getscl = ixp4xx_bit_getscl; | ||
124 | drv_data->algo_data.udelay = 10; | ||
125 | drv_data->algo_data.timeout = 100; | ||
126 | |||
127 | drv_data->adapter.id = I2C_HW_B_IXP4XX; | ||
128 | drv_data->adapter.class = I2C_CLASS_HWMON; | ||
129 | strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, | ||
130 | sizeof(drv_data->adapter.name)); | ||
131 | drv_data->adapter.algo_data = &drv_data->algo_data; | ||
132 | |||
133 | drv_data->adapter.dev.parent = &plat_dev->dev; | ||
134 | |||
135 | gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN); | ||
136 | gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN); | ||
137 | gpio_line_set(gpio->scl_pin, 0); | ||
138 | gpio_line_set(gpio->sda_pin, 0); | ||
139 | |||
140 | err = i2c_bit_add_bus(&drv_data->adapter); | ||
141 | if (err) { | ||
142 | printk(KERN_ERR "ERROR: Could not install %s\n", plat_dev->dev.bus_id); | ||
143 | |||
144 | kfree(drv_data); | ||
145 | return err; | ||
146 | } | ||
147 | |||
148 | platform_set_drvdata(plat_dev, drv_data); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static struct platform_driver ixp4xx_i2c_driver = { | ||
154 | .probe = ixp4xx_i2c_probe, | ||
155 | .remove = ixp4xx_i2c_remove, | ||
156 | .driver = { | ||
157 | .name = "IXP4XX-I2C", | ||
158 | .owner = THIS_MODULE, | ||
159 | }, | ||
160 | }; | ||
161 | |||
162 | static int __init ixp4xx_i2c_init(void) | ||
163 | { | ||
164 | return platform_driver_register(&ixp4xx_i2c_driver); | ||
165 | } | ||
166 | |||
167 | static void __exit ixp4xx_i2c_exit(void) | ||
168 | { | ||
169 | platform_driver_unregister(&ixp4xx_i2c_driver); | ||
170 | } | ||
171 | |||
172 | module_init(ixp4xx_i2c_init); | ||
173 | module_exit(ixp4xx_i2c_exit); | ||
174 | |||
175 | MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems"); | ||
176 | MODULE_LICENSE("GPL"); | ||
177 | MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); | ||
178 | |||
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index d8de4ac88b7d..bbe787b243b7 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c | |||
@@ -180,7 +180,7 @@ static void mpc_i2c_stop(struct mpc_i2c *i2c) | |||
180 | static int mpc_write(struct mpc_i2c *i2c, int target, | 180 | static int mpc_write(struct mpc_i2c *i2c, int target, |
181 | const u8 * data, int length, int restart) | 181 | const u8 * data, int length, int restart) |
182 | { | 182 | { |
183 | int i; | 183 | int i, result; |
184 | unsigned timeout = i2c->adap.timeout; | 184 | unsigned timeout = i2c->adap.timeout; |
185 | u32 flags = restart ? CCR_RSTA : 0; | 185 | u32 flags = restart ? CCR_RSTA : 0; |
186 | 186 | ||
@@ -192,15 +192,17 @@ static int mpc_write(struct mpc_i2c *i2c, int target, | |||
192 | /* Write target byte */ | 192 | /* Write target byte */ |
193 | writeb((target << 1), i2c->base + MPC_I2C_DR); | 193 | writeb((target << 1), i2c->base + MPC_I2C_DR); |
194 | 194 | ||
195 | if (i2c_wait(i2c, timeout, 1) < 0) | 195 | result = i2c_wait(i2c, timeout, 1); |
196 | return -1; | 196 | if (result < 0) |
197 | return result; | ||
197 | 198 | ||
198 | for (i = 0; i < length; i++) { | 199 | for (i = 0; i < length; i++) { |
199 | /* Write data byte */ | 200 | /* Write data byte */ |
200 | writeb(data[i], i2c->base + MPC_I2C_DR); | 201 | writeb(data[i], i2c->base + MPC_I2C_DR); |
201 | 202 | ||
202 | if (i2c_wait(i2c, timeout, 1) < 0) | 203 | result = i2c_wait(i2c, timeout, 1); |
203 | return -1; | 204 | if (result < 0) |
205 | return result; | ||
204 | } | 206 | } |
205 | 207 | ||
206 | return 0; | 208 | return 0; |
@@ -210,7 +212,7 @@ static int mpc_read(struct mpc_i2c *i2c, int target, | |||
210 | u8 * data, int length, int restart) | 212 | u8 * data, int length, int restart) |
211 | { | 213 | { |
212 | unsigned timeout = i2c->adap.timeout; | 214 | unsigned timeout = i2c->adap.timeout; |
213 | int i; | 215 | int i, result; |
214 | u32 flags = restart ? CCR_RSTA : 0; | 216 | u32 flags = restart ? CCR_RSTA : 0; |
215 | 217 | ||
216 | /* Start with MEN */ | 218 | /* Start with MEN */ |
@@ -221,8 +223,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target, | |||
221 | /* Write target address byte - this time with the read flag set */ | 223 | /* Write target address byte - this time with the read flag set */ |
222 | writeb((target << 1) | 1, i2c->base + MPC_I2C_DR); | 224 | writeb((target << 1) | 1, i2c->base + MPC_I2C_DR); |
223 | 225 | ||
224 | if (i2c_wait(i2c, timeout, 1) < 0) | 226 | result = i2c_wait(i2c, timeout, 1); |
225 | return -1; | 227 | if (result < 0) |
228 | return result; | ||
226 | 229 | ||
227 | if (length) { | 230 | if (length) { |
228 | if (length == 1) | 231 | if (length == 1) |
@@ -234,8 +237,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target, | |||
234 | } | 237 | } |
235 | 238 | ||
236 | for (i = 0; i < length; i++) { | 239 | for (i = 0; i < length; i++) { |
237 | if (i2c_wait(i2c, timeout, 0) < 0) | 240 | result = i2c_wait(i2c, timeout, 0); |
238 | return -1; | 241 | if (result < 0) |
242 | return result; | ||
239 | 243 | ||
240 | /* Generate txack on next to last byte */ | 244 | /* Generate txack on next to last byte */ |
241 | if (i == length - 2) | 245 | if (i == length - 2) |
@@ -309,7 +313,6 @@ static struct i2c_adapter mpc_ops = { | |||
309 | .algo = &mpc_algo, | 313 | .algo = &mpc_algo, |
310 | .class = I2C_CLASS_HWMON, | 314 | .class = I2C_CLASS_HWMON, |
311 | .timeout = 1, | 315 | .timeout = 1, |
312 | .retries = 1 | ||
313 | }; | 316 | }; |
314 | 317 | ||
315 | static int fsl_i2c_probe(struct platform_device *pdev) | 318 | static int fsl_i2c_probe(struct platform_device *pdev) |
@@ -321,9 +324,9 @@ static int fsl_i2c_probe(struct platform_device *pdev) | |||
321 | 324 | ||
322 | pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data; | 325 | pdata = (struct fsl_i2c_platform_data *) pdev->dev.platform_data; |
323 | 326 | ||
324 | if (!(i2c = kzalloc(sizeof(*i2c), GFP_KERNEL))) { | 327 | i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); |
328 | if (!i2c) | ||
325 | return -ENOMEM; | 329 | return -ENOMEM; |
326 | } | ||
327 | 330 | ||
328 | i2c->irq = platform_get_irq(pdev, 0); | 331 | i2c->irq = platform_get_irq(pdev, 0); |
329 | if (i2c->irq < 0) { | 332 | if (i2c->irq < 0) { |
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index 1bf590c74166..3dac920e53ea 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c | |||
@@ -351,6 +351,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_ | |||
351 | pci_set_drvdata(dev, smbuses); | 351 | pci_set_drvdata(dev, smbuses); |
352 | 352 | ||
353 | switch(dev->device) { | 353 | switch(dev->device) { |
354 | case PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS: | ||
354 | case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS: | 355 | case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS: |
355 | case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: | 356 | case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS: |
356 | smbuses[0].blockops = 1; | 357 | smbuses[0].blockops = 1; |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index f2552b19ea60..da6639707ea3 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -362,8 +362,6 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
362 | 362 | ||
363 | omap_i2c_enable_clocks(dev); | 363 | omap_i2c_enable_clocks(dev); |
364 | 364 | ||
365 | /* REVISIT: initialize and use adap->retries. This is an optional | ||
366 | * feature */ | ||
367 | if ((r = omap_i2c_wait_for_bb(dev)) < 0) | 365 | if ((r = omap_i2c_wait_for_bb(dev)) < 0) |
368 | goto out; | 366 | goto out; |
369 | 367 | ||
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index ca18e0be4901..1603c81e39d4 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c | |||
@@ -368,6 +368,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev, | |||
368 | smbus->adapter.class = I2C_CLASS_HWMON; | 368 | smbus->adapter.class = I2C_CLASS_HWMON; |
369 | smbus->adapter.algo = &smbus_algorithm; | 369 | smbus->adapter.algo = &smbus_algorithm; |
370 | smbus->adapter.algo_data = smbus; | 370 | smbus->adapter.algo_data = smbus; |
371 | smbus->adapter.nr = PCI_FUNC(dev->devfn); | ||
371 | 372 | ||
372 | /* set up the sysfs linkage to our parent device */ | 373 | /* set up the sysfs linkage to our parent device */ |
373 | smbus->adapter.dev.parent = &dev->dev; | 374 | smbus->adapter.dev.parent = &dev->dev; |
@@ -375,7 +376,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev, | |||
375 | reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR | | 376 | reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR | |
376 | (CLK_100K_DIV & CTL_CLK_M))); | 377 | (CLK_100K_DIV & CTL_CLK_M))); |
377 | 378 | ||
378 | error = i2c_add_adapter(&smbus->adapter); | 379 | error = i2c_add_numbered_adapter(&smbus->adapter); |
379 | if (error) | 380 | if (error) |
380 | goto out_release_region; | 381 | goto out_release_region; |
381 | 382 | ||
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 167e4137ee21..9bbe96cef719 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
@@ -121,10 +121,6 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, | |||
121 | { | 121 | { |
122 | unsigned char temp; | 122 | unsigned char temp; |
123 | 123 | ||
124 | /* match up the function */ | ||
125 | if (PCI_FUNC(PIIX4_dev->devfn) != id->driver_data) | ||
126 | return -ENODEV; | ||
127 | |||
128 | dev_info(&PIIX4_dev->dev, "Found %s device\n", pci_name(PIIX4_dev)); | 124 | dev_info(&PIIX4_dev->dev, "Found %s device\n", pci_name(PIIX4_dev)); |
129 | 125 | ||
130 | /* Don't access SMBus on IBM systems which get corrupted eeproms */ | 126 | /* Don't access SMBus on IBM systems which get corrupted eeproms */ |
@@ -389,28 +385,21 @@ static struct i2c_adapter piix4_adapter = { | |||
389 | }; | 385 | }; |
390 | 386 | ||
391 | static struct pci_device_id piix4_ids[] = { | 387 | static struct pci_device_id piix4_ids[] = { |
392 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3), | 388 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, |
393 | .driver_data = 3 }, | 389 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, |
394 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS), | 390 | { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, |
395 | .driver_data = 0 }, | 391 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) }, |
396 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS), | 392 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) }, |
397 | .driver_data = 0 }, | 393 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, |
398 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS), | 394 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, |
399 | .driver_data = 0 }, | 395 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, |
400 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS), | 396 | PCI_DEVICE_ID_SERVERWORKS_OSB4) }, |
401 | .driver_data = 0 }, | 397 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, |
402 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4), | 398 | PCI_DEVICE_ID_SERVERWORKS_CSB5) }, |
403 | .driver_data = 0 }, | 399 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, |
404 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5), | 400 | PCI_DEVICE_ID_SERVERWORKS_CSB6) }, |
405 | .driver_data = 0 }, | 401 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, |
406 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6), | 402 | PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, |
407 | .driver_data = 0 }, | ||
408 | { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB), | ||
409 | .driver_data = 0 }, | ||
410 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3), | ||
411 | .driver_data = 3 }, | ||
412 | { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3), | ||
413 | .driver_data = 0 }, | ||
414 | { 0, } | 403 | { 0, } |
415 | }; | 404 | }; |
416 | 405 | ||
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 6426a61f8d4d..2598d29fd7a4 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c | |||
@@ -65,6 +65,7 @@ struct pxa_i2c { | |||
65 | unsigned long iosize; | 65 | unsigned long iosize; |
66 | 66 | ||
67 | int irq; | 67 | int irq; |
68 | int use_pio; | ||
68 | }; | 69 | }; |
69 | 70 | ||
70 | #define _IBMR(i2c) ((i2c)->reg_base + 0) | 71 | #define _IBMR(i2c) ((i2c)->reg_base + 0) |
@@ -163,6 +164,7 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname) | |||
163 | #define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0) | 164 | #define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0) |
164 | 165 | ||
165 | static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret); | 166 | static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret); |
167 | static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id); | ||
166 | 168 | ||
167 | static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) | 169 | static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) |
168 | { | 170 | { |
@@ -554,6 +556,71 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) | |||
554 | writel(icr, _ICR(i2c)); | 556 | writel(icr, _ICR(i2c)); |
555 | } | 557 | } |
556 | 558 | ||
559 | static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c) | ||
560 | { | ||
561 | /* make timeout the same as for interrupt based functions */ | ||
562 | long timeout = 2 * DEF_TIMEOUT; | ||
563 | |||
564 | /* | ||
565 | * Wait for the bus to become free. | ||
566 | */ | ||
567 | while (timeout-- && readl(_ISR(i2c)) & (ISR_IBB | ISR_UB)) { | ||
568 | udelay(1000); | ||
569 | show_state(i2c); | ||
570 | } | ||
571 | |||
572 | if (timeout <= 0) { | ||
573 | show_state(i2c); | ||
574 | dev_err(&i2c->adap.dev, | ||
575 | "i2c_pxa: timeout waiting for bus free\n"); | ||
576 | return I2C_RETRY; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * Set master mode. | ||
581 | */ | ||
582 | writel(readl(_ICR(i2c)) | ICR_SCLE, _ICR(i2c)); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c, | ||
588 | struct i2c_msg *msg, int num) | ||
589 | { | ||
590 | unsigned long timeout = 500000; /* 5 seconds */ | ||
591 | int ret = 0; | ||
592 | |||
593 | ret = i2c_pxa_pio_set_master(i2c); | ||
594 | if (ret) | ||
595 | goto out; | ||
596 | |||
597 | i2c->msg = msg; | ||
598 | i2c->msg_num = num; | ||
599 | i2c->msg_idx = 0; | ||
600 | i2c->msg_ptr = 0; | ||
601 | i2c->irqlogidx = 0; | ||
602 | |||
603 | i2c_pxa_start_message(i2c); | ||
604 | |||
605 | while (timeout-- && i2c->msg_num > 0) { | ||
606 | i2c_pxa_handler(0, i2c); | ||
607 | udelay(10); | ||
608 | } | ||
609 | |||
610 | i2c_pxa_stop_message(i2c); | ||
611 | |||
612 | /* | ||
613 | * We place the return code in i2c->msg_idx. | ||
614 | */ | ||
615 | ret = i2c->msg_idx; | ||
616 | |||
617 | out: | ||
618 | if (timeout == 0) | ||
619 | i2c_pxa_scream_blue_murder(i2c, "timeout"); | ||
620 | |||
621 | return ret; | ||
622 | } | ||
623 | |||
557 | /* | 624 | /* |
558 | * We are protected by the adapter bus mutex. | 625 | * We are protected by the adapter bus mutex. |
559 | */ | 626 | */ |
@@ -610,6 +677,35 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num) | |||
610 | return ret; | 677 | return ret; |
611 | } | 678 | } |
612 | 679 | ||
680 | static int i2c_pxa_pio_xfer(struct i2c_adapter *adap, | ||
681 | struct i2c_msg msgs[], int num) | ||
682 | { | ||
683 | struct pxa_i2c *i2c = adap->algo_data; | ||
684 | int ret, i; | ||
685 | |||
686 | /* If the I2C controller is disabled we need to reset it | ||
687 | (probably due to a suspend/resume destroying state). We do | ||
688 | this here as we can then avoid worrying about resuming the | ||
689 | controller before its users. */ | ||
690 | if (!(readl(_ICR(i2c)) & ICR_IUE)) | ||
691 | i2c_pxa_reset(i2c); | ||
692 | |||
693 | for (i = adap->retries; i >= 0; i--) { | ||
694 | ret = i2c_pxa_do_pio_xfer(i2c, msgs, num); | ||
695 | if (ret != I2C_RETRY) | ||
696 | goto out; | ||
697 | |||
698 | if (i2c_debug) | ||
699 | dev_dbg(&adap->dev, "Retrying transmission\n"); | ||
700 | udelay(100); | ||
701 | } | ||
702 | i2c_pxa_scream_blue_murder(i2c, "exhausted retries"); | ||
703 | ret = -EREMOTEIO; | ||
704 | out: | ||
705 | i2c_pxa_set_slave(i2c, ret); | ||
706 | return ret; | ||
707 | } | ||
708 | |||
613 | /* | 709 | /* |
614 | * i2c_pxa_master_complete - complete the message and wake up. | 710 | * i2c_pxa_master_complete - complete the message and wake up. |
615 | */ | 711 | */ |
@@ -621,7 +717,8 @@ static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret) | |||
621 | i2c->msg_num = 0; | 717 | i2c->msg_num = 0; |
622 | if (ret) | 718 | if (ret) |
623 | i2c->msg_idx = ret; | 719 | i2c->msg_idx = ret; |
624 | wake_up(&i2c->wait); | 720 | if (!i2c->use_pio) |
721 | wake_up(&i2c->wait); | ||
625 | } | 722 | } |
626 | 723 | ||
627 | static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) | 724 | static void i2c_pxa_irq_txempty(struct pxa_i2c *i2c, u32 isr) |
@@ -840,6 +937,37 @@ static const struct i2c_algorithm i2c_pxa_algorithm = { | |||
840 | .functionality = i2c_pxa_functionality, | 937 | .functionality = i2c_pxa_functionality, |
841 | }; | 938 | }; |
842 | 939 | ||
940 | static const struct i2c_algorithm i2c_pxa_pio_algorithm = { | ||
941 | .master_xfer = i2c_pxa_pio_xfer, | ||
942 | .functionality = i2c_pxa_functionality, | ||
943 | }; | ||
944 | |||
945 | static void i2c_pxa_enable(struct platform_device *dev) | ||
946 | { | ||
947 | if (cpu_is_pxa27x()) { | ||
948 | switch (dev->id) { | ||
949 | case 0: | ||
950 | pxa_gpio_mode(GPIO117_I2CSCL_MD); | ||
951 | pxa_gpio_mode(GPIO118_I2CSDA_MD); | ||
952 | break; | ||
953 | case 1: | ||
954 | local_irq_disable(); | ||
955 | PCFR |= PCFR_PI2CEN; | ||
956 | local_irq_enable(); | ||
957 | break; | ||
958 | } | ||
959 | } | ||
960 | } | ||
961 | |||
962 | static void i2c_pxa_disable(struct platform_device *dev) | ||
963 | { | ||
964 | if (cpu_is_pxa27x() && dev->id == 1) { | ||
965 | local_irq_disable(); | ||
966 | PCFR &= ~PCFR_PI2CEN; | ||
967 | local_irq_enable(); | ||
968 | } | ||
969 | } | ||
970 | |||
843 | #define res_len(r) ((r)->end - (r)->start + 1) | 971 | #define res_len(r) ((r)->end - (r)->start + 1) |
844 | static int i2c_pxa_probe(struct platform_device *dev) | 972 | static int i2c_pxa_probe(struct platform_device *dev) |
845 | { | 973 | { |
@@ -864,7 +992,6 @@ static int i2c_pxa_probe(struct platform_device *dev) | |||
864 | } | 992 | } |
865 | 993 | ||
866 | i2c->adap.owner = THIS_MODULE; | 994 | i2c->adap.owner = THIS_MODULE; |
867 | i2c->adap.algo = &i2c_pxa_algorithm; | ||
868 | i2c->adap.retries = 5; | 995 | i2c->adap.retries = 5; |
869 | 996 | ||
870 | spin_lock_init(&i2c->lock); | 997 | spin_lock_init(&i2c->lock); |
@@ -899,34 +1026,28 @@ static int i2c_pxa_probe(struct platform_device *dev) | |||
899 | #endif | 1026 | #endif |
900 | 1027 | ||
901 | clk_enable(i2c->clk); | 1028 | clk_enable(i2c->clk); |
902 | #ifdef CONFIG_PXA27x | 1029 | i2c_pxa_enable(dev); |
903 | switch (dev->id) { | ||
904 | case 0: | ||
905 | pxa_gpio_mode(GPIO117_I2CSCL_MD); | ||
906 | pxa_gpio_mode(GPIO118_I2CSDA_MD); | ||
907 | break; | ||
908 | case 1: | ||
909 | local_irq_disable(); | ||
910 | PCFR |= PCFR_PI2CEN; | ||
911 | local_irq_enable(); | ||
912 | } | ||
913 | #endif | ||
914 | 1030 | ||
915 | ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED, | 1031 | if (plat) { |
916 | i2c->adap.name, i2c); | 1032 | i2c->adap.class = plat->class; |
917 | if (ret) | 1033 | i2c->use_pio = plat->use_pio; |
918 | goto ereqirq; | 1034 | } |
919 | 1035 | ||
1036 | if (i2c->use_pio) { | ||
1037 | i2c->adap.algo = &i2c_pxa_pio_algorithm; | ||
1038 | } else { | ||
1039 | i2c->adap.algo = &i2c_pxa_algorithm; | ||
1040 | ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED, | ||
1041 | i2c->adap.name, i2c); | ||
1042 | if (ret) | ||
1043 | goto ereqirq; | ||
1044 | } | ||
920 | 1045 | ||
921 | i2c_pxa_reset(i2c); | 1046 | i2c_pxa_reset(i2c); |
922 | 1047 | ||
923 | i2c->adap.algo_data = i2c; | 1048 | i2c->adap.algo_data = i2c; |
924 | i2c->adap.dev.parent = &dev->dev; | 1049 | i2c->adap.dev.parent = &dev->dev; |
925 | 1050 | ||
926 | if (plat) { | ||
927 | i2c->adap.class = plat->class; | ||
928 | } | ||
929 | |||
930 | /* | 1051 | /* |
931 | * If "dev->id" is negative we consider it as zero. | 1052 | * If "dev->id" is negative we consider it as zero. |
932 | * The reason to do so is to avoid sysfs names that only make | 1053 | * The reason to do so is to avoid sysfs names that only make |
@@ -952,17 +1073,11 @@ static int i2c_pxa_probe(struct platform_device *dev) | |||
952 | return 0; | 1073 | return 0; |
953 | 1074 | ||
954 | eadapt: | 1075 | eadapt: |
955 | free_irq(irq, i2c); | 1076 | if (!i2c->use_pio) |
1077 | free_irq(irq, i2c); | ||
956 | ereqirq: | 1078 | ereqirq: |
957 | clk_disable(i2c->clk); | 1079 | clk_disable(i2c->clk); |
958 | 1080 | i2c_pxa_disable(dev); | |
959 | #ifdef CONFIG_PXA27x | ||
960 | if (dev->id == 1) { | ||
961 | local_irq_disable(); | ||
962 | PCFR &= ~PCFR_PI2CEN; | ||
963 | local_irq_enable(); | ||
964 | } | ||
965 | #endif | ||
966 | eremap: | 1081 | eremap: |
967 | clk_put(i2c->clk); | 1082 | clk_put(i2c->clk); |
968 | eclk: | 1083 | eclk: |
@@ -979,18 +1094,12 @@ static int i2c_pxa_remove(struct platform_device *dev) | |||
979 | platform_set_drvdata(dev, NULL); | 1094 | platform_set_drvdata(dev, NULL); |
980 | 1095 | ||
981 | i2c_del_adapter(&i2c->adap); | 1096 | i2c_del_adapter(&i2c->adap); |
982 | free_irq(i2c->irq, i2c); | 1097 | if (!i2c->use_pio) |
1098 | free_irq(i2c->irq, i2c); | ||
983 | 1099 | ||
984 | clk_disable(i2c->clk); | 1100 | clk_disable(i2c->clk); |
985 | clk_put(i2c->clk); | 1101 | clk_put(i2c->clk); |
986 | 1102 | i2c_pxa_disable(dev); | |
987 | #ifdef CONFIG_PXA27x | ||
988 | if (dev->id == 1) { | ||
989 | local_irq_disable(); | ||
990 | PCFR &= ~PCFR_PI2CEN; | ||
991 | local_irq_enable(); | ||
992 | } | ||
993 | #endif | ||
994 | 1103 | ||
995 | release_mem_region(i2c->iobase, i2c->iosize); | 1104 | release_mem_region(i2c->iobase, i2c->iosize); |
996 | kfree(i2c); | 1105 | kfree(i2c); |
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c index 503a134ec803..8fbbdb4c2f35 100644 --- a/drivers/i2c/busses/i2c-sibyte.c +++ b/drivers/i2c/busses/i2c-sibyte.c | |||
@@ -36,14 +36,6 @@ struct i2c_algo_sibyte_data { | |||
36 | /* ----- global defines ----------------------------------------------- */ | 36 | /* ----- global defines ----------------------------------------------- */ |
37 | #define SMB_CSR(a,r) ((long)(a->reg_base + r)) | 37 | #define SMB_CSR(a,r) ((long)(a->reg_base + r)) |
38 | 38 | ||
39 | /* ----- global variables --------------------------------------------- */ | ||
40 | |||
41 | /* module parameters: | ||
42 | */ | ||
43 | static int bit_scan; /* have a look at what's hanging 'round */ | ||
44 | module_param(bit_scan, int, 0); | ||
45 | MODULE_PARM_DESC(bit_scan, "Scan for active chips on the bus"); | ||
46 | |||
47 | 39 | ||
48 | static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr, | 40 | static int smbus_xfer(struct i2c_adapter *i2c_adap, u16 addr, |
49 | unsigned short flags, char read_write, | 41 | unsigned short flags, char read_write, |
@@ -140,9 +132,8 @@ static const struct i2c_algorithm i2c_sibyte_algo = { | |||
140 | /* | 132 | /* |
141 | * registering functions to load algorithms at runtime | 133 | * registering functions to load algorithms at runtime |
142 | */ | 134 | */ |
143 | int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed) | 135 | int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed) |
144 | { | 136 | { |
145 | int i; | ||
146 | struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data; | 137 | struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data; |
147 | 138 | ||
148 | /* register new adapter to i2c module... */ | 139 | /* register new adapter to i2c module... */ |
@@ -152,24 +143,6 @@ int i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed) | |||
152 | csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ)); | 143 | csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ)); |
153 | csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL)); | 144 | csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL)); |
154 | 145 | ||
155 | /* scan bus */ | ||
156 | if (bit_scan) { | ||
157 | union i2c_smbus_data data; | ||
158 | int rc; | ||
159 | printk(KERN_INFO " i2c-algo-sibyte.o: scanning bus %s.\n", | ||
160 | i2c_adap->name); | ||
161 | for (i = 0x00; i < 0x7f; i++) { | ||
162 | /* XXXKW is this a realistic probe? */ | ||
163 | rc = smbus_xfer(i2c_adap, i, 0, I2C_SMBUS_READ, 0, | ||
164 | I2C_SMBUS_BYTE_DATA, &data); | ||
165 | if (!rc) { | ||
166 | printk("(%02x)",i); | ||
167 | } else | ||
168 | printk("."); | ||
169 | } | ||
170 | printk("\n"); | ||
171 | } | ||
172 | |||
173 | return i2c_add_adapter(i2c_adap); | 146 | return i2c_add_adapter(i2c_adap); |
174 | } | 147 | } |
175 | 148 | ||
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c index 84df29da1ddc..c2a9f8c94f5e 100644 --- a/drivers/i2c/busses/i2c-stub.c +++ b/drivers/i2c/busses/i2c-stub.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | i2c-stub.c - Part of lm_sensors, Linux kernel modules for hardware | 2 | i2c-stub.c - I2C/SMBus chip emulator |
3 | monitoring | ||
4 | 3 | ||
5 | Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> | 4 | Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> |
5 | Copyright (C) 2007 Jean Delvare <khali@linux-fr.org> | ||
6 | 6 | ||
7 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
@@ -37,8 +37,8 @@ MODULE_PARM_DESC(chip_addr, | |||
37 | 37 | ||
38 | struct stub_chip { | 38 | struct stub_chip { |
39 | u8 pointer; | 39 | u8 pointer; |
40 | u8 bytes[256]; | 40 | u16 words[256]; /* Byte operations use the LSB as per SMBus |
41 | u16 words[256]; | 41 | specification */ |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static struct stub_chip *stub_chips; | 44 | static struct stub_chip *stub_chips; |
@@ -75,7 +75,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags, | |||
75 | "wrote 0x%02x.\n", | 75 | "wrote 0x%02x.\n", |
76 | addr, command); | 76 | addr, command); |
77 | } else { | 77 | } else { |
78 | data->byte = chip->bytes[chip->pointer++]; | 78 | data->byte = chip->words[chip->pointer++] & 0xff; |
79 | dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " | 79 | dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, " |
80 | "read 0x%02x.\n", | 80 | "read 0x%02x.\n", |
81 | addr, data->byte); | 81 | addr, data->byte); |
@@ -86,12 +86,13 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags, | |||
86 | 86 | ||
87 | case I2C_SMBUS_BYTE_DATA: | 87 | case I2C_SMBUS_BYTE_DATA: |
88 | if (read_write == I2C_SMBUS_WRITE) { | 88 | if (read_write == I2C_SMBUS_WRITE) { |
89 | chip->bytes[command] = data->byte; | 89 | chip->words[command] &= 0xff00; |
90 | chip->words[command] |= data->byte; | ||
90 | dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " | 91 | dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " |
91 | "wrote 0x%02x at 0x%02x.\n", | 92 | "wrote 0x%02x at 0x%02x.\n", |
92 | addr, data->byte, command); | 93 | addr, data->byte, command); |
93 | } else { | 94 | } else { |
94 | data->byte = chip->bytes[command]; | 95 | data->byte = chip->words[command] & 0xff; |
95 | dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " | 96 | dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, " |
96 | "read 0x%02x at 0x%02x.\n", | 97 | "read 0x%02x at 0x%02x.\n", |
97 | addr, data->byte, command); | 98 | addr, data->byte, command); |
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index c9ce77f13c0e..77b13d027f86 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c | |||
@@ -4,7 +4,7 @@ | |||
4 | Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>, | 4 | Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl>, |
5 | Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>, | 5 | Philip Edelbrock <phil@netroedge.com>, Kyösti Mälkki <kmalkki@cc.hut.fi>, |
6 | Mark D. Studebaker <mdsxyz123@yahoo.com> | 6 | Mark D. Studebaker <mdsxyz123@yahoo.com> |
7 | Copyright (C) 2005 - 2007 Jean Delvare <khali@linux-fr.org> | 7 | Copyright (C) 2005 - 2008 Jean Delvare <khali@linux-fr.org> |
8 | 8 | ||
9 | This program is free software; you can redistribute it and/or modify | 9 | This program is free software; you can redistribute it and/or modify |
10 | it under the terms of the GNU General Public License as published by | 10 | it under the terms of the GNU General Public License as published by |
@@ -35,6 +35,7 @@ | |||
35 | VT8235 0x3177 yes | 35 | VT8235 0x3177 yes |
36 | VT8237R 0x3227 yes | 36 | VT8237R 0x3227 yes |
37 | VT8237A 0x3337 yes | 37 | VT8237A 0x3337 yes |
38 | VT8237S 0x3372 yes | ||
38 | VT8251 0x3287 yes | 39 | VT8251 0x3287 yes |
39 | CX700 0x8324 yes | 40 | CX700 0x8324 yes |
40 | 41 | ||
@@ -318,6 +319,10 @@ static int __devinit vt596_probe(struct pci_dev *pdev, | |||
318 | unsigned char temp; | 319 | unsigned char temp; |
319 | int error = -ENODEV; | 320 | int error = -ENODEV; |
320 | 321 | ||
322 | /* driver_data might come from user-space, so check it */ | ||
323 | if (id->driver_data & 1 || id->driver_data > 0xff) | ||
324 | return -EINVAL; | ||
325 | |||
321 | /* Determine the address of the SMBus areas */ | 326 | /* Determine the address of the SMBus areas */ |
322 | if (force_addr) { | 327 | if (force_addr) { |
323 | vt596_smba = force_addr & 0xfff0; | 328 | vt596_smba = force_addr & 0xfff0; |
@@ -389,6 +394,7 @@ found: | |||
389 | case PCI_DEVICE_ID_VIA_8251: | 394 | case PCI_DEVICE_ID_VIA_8251: |
390 | case PCI_DEVICE_ID_VIA_8237: | 395 | case PCI_DEVICE_ID_VIA_8237: |
391 | case PCI_DEVICE_ID_VIA_8237A: | 396 | case PCI_DEVICE_ID_VIA_8237A: |
397 | case PCI_DEVICE_ID_VIA_8237S: | ||
392 | case PCI_DEVICE_ID_VIA_8235: | 398 | case PCI_DEVICE_ID_VIA_8235: |
393 | case PCI_DEVICE_ID_VIA_8233A: | 399 | case PCI_DEVICE_ID_VIA_8233A: |
394 | case PCI_DEVICE_ID_VIA_8233_0: | 400 | case PCI_DEVICE_ID_VIA_8233_0: |
@@ -440,6 +446,8 @@ static struct pci_device_id vt596_ids[] = { | |||
440 | .driver_data = SMBBA3 }, | 446 | .driver_data = SMBBA3 }, |
441 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A), | 447 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A), |
442 | .driver_data = SMBBA3 }, | 448 | .driver_data = SMBBA3 }, |
449 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237S), | ||
450 | .driver_data = SMBBA3 }, | ||
443 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4), | 451 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4), |
444 | .driver_data = SMBBA1 }, | 452 | .driver_data = SMBBA1 }, |
445 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8251), | 453 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8251), |
@@ -455,6 +463,7 @@ static struct pci_driver vt596_driver = { | |||
455 | .name = "vt596_smbus", | 463 | .name = "vt596_smbus", |
456 | .id_table = vt596_ids, | 464 | .id_table = vt596_ids, |
457 | .probe = vt596_probe, | 465 | .probe = vt596_probe, |
466 | .dynids.use_driver_data = 1, | ||
458 | }; | 467 | }; |
459 | 468 | ||
460 | static int __init i2c_vt596_init(void) | 469 | static int __init i2c_vt596_init(void) |
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig index 2e1c24f671cf..bd7082c2443d 100644 --- a/drivers/i2c/chips/Kconfig +++ b/drivers/i2c/chips/Kconfig | |||
@@ -4,32 +4,6 @@ | |||
4 | 4 | ||
5 | menu "Miscellaneous I2C Chip support" | 5 | menu "Miscellaneous I2C Chip support" |
6 | 6 | ||
7 | config SENSORS_DS1337 | ||
8 | tristate "Dallas DS1337 and DS1339 Real Time Clock (DEPRECATED)" | ||
9 | depends on EXPERIMENTAL | ||
10 | help | ||
11 | If you say yes here you get support for Dallas Semiconductor | ||
12 | DS1337 and DS1339 real-time clock chips. | ||
13 | |||
14 | This driver can also be built as a module. If so, the module | ||
15 | will be called ds1337. | ||
16 | |||
17 | This driver is deprecated and will be dropped soon. Use | ||
18 | rtc-ds1307 instead. | ||
19 | |||
20 | config SENSORS_DS1374 | ||
21 | tristate "Dallas DS1374 Real Time Clock (DEPRECATED)" | ||
22 | depends on EXPERIMENTAL | ||
23 | help | ||
24 | If you say yes here you get support for Dallas Semiconductor | ||
25 | DS1374 real-time clock chips. | ||
26 | |||
27 | This driver can also be built as a module. If so, the module | ||
28 | will be called ds1374. | ||
29 | |||
30 | This driver is deprecated and will be dropped soon. Use | ||
31 | rtc-ds1374 instead. | ||
32 | |||
33 | config DS1682 | 7 | config DS1682 |
34 | tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" | 8 | tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" |
35 | depends on EXPERIMENTAL | 9 | depends on EXPERIMENTAL |
@@ -57,7 +31,7 @@ config SENSORS_PCF8574 | |||
57 | default n | 31 | default n |
58 | help | 32 | help |
59 | If you say yes here you get support for Philips PCF8574 and | 33 | If you say yes here you get support for Philips PCF8574 and |
60 | PCF8574A chips. | 34 | PCF8574A chips. These chips are 8-bit I/O expanders for the I2C bus. |
61 | 35 | ||
62 | This driver can also be built as a module. If so, the module | 36 | This driver can also be built as a module. If so, the module |
63 | will be called pcf8574. | 37 | will be called pcf8574. |
@@ -65,6 +39,20 @@ config SENSORS_PCF8574 | |||
65 | These devices are hard to detect and rarely found on mainstream | 39 | These devices are hard to detect and rarely found on mainstream |
66 | hardware. If unsure, say N. | 40 | hardware. If unsure, say N. |
67 | 41 | ||
42 | config PCF8575 | ||
43 | tristate "Philips PCF8575" | ||
44 | default n | ||
45 | help | ||
46 | If you say yes here you get support for Philips PCF8575 chip. | ||
47 | This chip is a 16-bit I/O expander for the I2C bus. Several other | ||
48 | chip manufacturers sell equivalent chips, e.g. Texas Instruments. | ||
49 | |||
50 | This driver can also be built as a module. If so, the module | ||
51 | will be called pcf8575. | ||
52 | |||
53 | This device is hard to detect and is rarely found on mainstream | ||
54 | hardware. If unsure, say N. | ||
55 | |||
68 | config SENSORS_PCA9539 | 56 | config SENSORS_PCA9539 |
69 | tristate "Philips PCA9539 16-bit I/O port" | 57 | tristate "Philips PCA9539 16-bit I/O port" |
70 | depends on EXPERIMENTAL | 58 | depends on EXPERIMENTAL |
@@ -100,12 +88,8 @@ config ISP1301_OMAP | |||
100 | This driver can also be built as a module. If so, the module | 88 | This driver can also be built as a module. If so, the module |
101 | will be called isp1301_omap. | 89 | will be called isp1301_omap. |
102 | 90 | ||
103 | # NOTE: This isn't really OMAP-specific, except for the current | ||
104 | # interface location in <include/asm-arm/arch-omap/tps65010.h> | ||
105 | # and having mostly OMAP-specific board support | ||
106 | config TPS65010 | 91 | config TPS65010 |
107 | tristate "TPS6501x Power Management chips" | 92 | tristate "TPS6501x Power Management chips" |
108 | depends on ARCH_OMAP | ||
109 | default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK | 93 | default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK |
110 | help | 94 | help |
111 | If you say yes here you get support for the TPS6501x series of | 95 | If you say yes here you get support for the TPS6501x series of |
@@ -116,18 +100,6 @@ config TPS65010 | |||
116 | This driver can also be built as a module. If so, the module | 100 | This driver can also be built as a module. If so, the module |
117 | will be called tps65010. | 101 | will be called tps65010. |
118 | 102 | ||
119 | config SENSORS_M41T00 | ||
120 | tristate "ST M41T00 RTC chip (DEPRECATED)" | ||
121 | depends on PPC32 | ||
122 | help | ||
123 | If you say yes here you get support for the ST M41T00 RTC chip. | ||
124 | |||
125 | This driver can also be built as a module. If so, the module | ||
126 | will be called m41t00. | ||
127 | |||
128 | This driver is deprecated and will be dropped soon. Use | ||
129 | rtc-ds1307 or rtc-m41t80 instead. | ||
130 | |||
131 | config SENSORS_MAX6875 | 103 | config SENSORS_MAX6875 |
132 | tristate "Maxim MAX6875 Power supply supervisor" | 104 | tristate "Maxim MAX6875 Power supply supervisor" |
133 | depends on EXPERIMENTAL | 105 | depends on EXPERIMENTAL |
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile index ca924e105959..501f00cea782 100644 --- a/drivers/i2c/chips/Makefile +++ b/drivers/i2c/chips/Makefile | |||
@@ -2,14 +2,12 @@ | |||
2 | # Makefile for miscellaneous I2C chip drivers. | 2 | # Makefile for miscellaneous I2C chip drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_SENSORS_DS1337) += ds1337.o | ||
6 | obj-$(CONFIG_SENSORS_DS1374) += ds1374.o | ||
7 | obj-$(CONFIG_DS1682) += ds1682.o | 5 | obj-$(CONFIG_DS1682) += ds1682.o |
8 | obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o | 6 | obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o |
9 | obj-$(CONFIG_SENSORS_MAX6875) += max6875.o | 7 | obj-$(CONFIG_SENSORS_MAX6875) += max6875.o |
10 | obj-$(CONFIG_SENSORS_M41T00) += m41t00.o | ||
11 | obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o | 8 | obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o |
12 | obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o | 9 | obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o |
10 | obj-$(CONFIG_PCF8575) += pcf8575.o | ||
13 | obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o | 11 | obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o |
14 | obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o | 12 | obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o |
15 | obj-$(CONFIG_TPS65010) += tps65010.o | 13 | obj-$(CONFIG_TPS65010) += tps65010.o |
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c deleted file mode 100644 index ec17d6b684a2..000000000000 --- a/drivers/i2c/chips/ds1337.c +++ /dev/null | |||
@@ -1,410 +0,0 @@ | |||
1 | /* | ||
2 | * linux/drivers/i2c/chips/ds1337.c | ||
3 | * | ||
4 | * Copyright (C) 2005 James Chapman <jchapman@katalix.com> | ||
5 | * | ||
6 | * based on linux/drivers/acorn/char/pcf8583.c | ||
7 | * Copyright (C) 2000 Russell King | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * Driver for Dallas Semiconductor DS1337 and DS1339 real time clock chip | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/rtc.h> /* get the user-level API */ | ||
22 | #include <linux/bcd.h> | ||
23 | #include <linux/list.h> | ||
24 | |||
25 | /* Device registers */ | ||
26 | #define DS1337_REG_HOUR 2 | ||
27 | #define DS1337_REG_DAY 3 | ||
28 | #define DS1337_REG_DATE 4 | ||
29 | #define DS1337_REG_MONTH 5 | ||
30 | #define DS1337_REG_CONTROL 14 | ||
31 | #define DS1337_REG_STATUS 15 | ||
32 | |||
33 | /* FIXME - how do we export these interface constants? */ | ||
34 | #define DS1337_GET_DATE 0 | ||
35 | #define DS1337_SET_DATE 1 | ||
36 | |||
37 | /* | ||
38 | * Functions declaration | ||
39 | */ | ||
40 | static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END }; | ||
41 | |||
42 | I2C_CLIENT_INSMOD_1(ds1337); | ||
43 | |||
44 | static int ds1337_attach_adapter(struct i2c_adapter *adapter); | ||
45 | static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind); | ||
46 | static void ds1337_init_client(struct i2c_client *client); | ||
47 | static int ds1337_detach_client(struct i2c_client *client); | ||
48 | static int ds1337_command(struct i2c_client *client, unsigned int cmd, | ||
49 | void *arg); | ||
50 | |||
51 | /* | ||
52 | * Driver data (common to all clients) | ||
53 | */ | ||
54 | static struct i2c_driver ds1337_driver = { | ||
55 | .driver = { | ||
56 | .name = "ds1337", | ||
57 | }, | ||
58 | .attach_adapter = ds1337_attach_adapter, | ||
59 | .detach_client = ds1337_detach_client, | ||
60 | .command = ds1337_command, | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * Client data (each client gets its own) | ||
65 | */ | ||
66 | struct ds1337_data { | ||
67 | struct i2c_client client; | ||
68 | struct list_head list; | ||
69 | }; | ||
70 | |||
71 | /* | ||
72 | * Internal variables | ||
73 | */ | ||
74 | static LIST_HEAD(ds1337_clients); | ||
75 | |||
76 | static inline int ds1337_read(struct i2c_client *client, u8 reg, u8 *value) | ||
77 | { | ||
78 | s32 tmp = i2c_smbus_read_byte_data(client, reg); | ||
79 | |||
80 | if (tmp < 0) | ||
81 | return -EIO; | ||
82 | |||
83 | *value = tmp; | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Chip access functions | ||
90 | */ | ||
91 | static int ds1337_get_datetime(struct i2c_client *client, struct rtc_time *dt) | ||
92 | { | ||
93 | int result; | ||
94 | u8 buf[7]; | ||
95 | u8 val; | ||
96 | struct i2c_msg msg[2]; | ||
97 | u8 offs = 0; | ||
98 | |||
99 | if (!dt) { | ||
100 | dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__); | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | |||
104 | msg[0].addr = client->addr; | ||
105 | msg[0].flags = 0; | ||
106 | msg[0].len = 1; | ||
107 | msg[0].buf = &offs; | ||
108 | |||
109 | msg[1].addr = client->addr; | ||
110 | msg[1].flags = I2C_M_RD; | ||
111 | msg[1].len = sizeof(buf); | ||
112 | msg[1].buf = &buf[0]; | ||
113 | |||
114 | result = i2c_transfer(client->adapter, msg, 2); | ||
115 | |||
116 | dev_dbg(&client->dev, "%s: [%d] %02x %02x %02x %02x %02x %02x %02x\n", | ||
117 | __FUNCTION__, result, buf[0], buf[1], buf[2], buf[3], | ||
118 | buf[4], buf[5], buf[6]); | ||
119 | |||
120 | if (result == 2) { | ||
121 | dt->tm_sec = BCD2BIN(buf[0]); | ||
122 | dt->tm_min = BCD2BIN(buf[1]); | ||
123 | val = buf[2] & 0x3f; | ||
124 | dt->tm_hour = BCD2BIN(val); | ||
125 | dt->tm_wday = BCD2BIN(buf[3]) - 1; | ||
126 | dt->tm_mday = BCD2BIN(buf[4]); | ||
127 | val = buf[5] & 0x7f; | ||
128 | dt->tm_mon = BCD2BIN(val) - 1; | ||
129 | dt->tm_year = BCD2BIN(buf[6]); | ||
130 | if (buf[5] & 0x80) | ||
131 | dt->tm_year += 100; | ||
132 | |||
133 | dev_dbg(&client->dev, "%s: secs=%d, mins=%d, " | ||
134 | "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", | ||
135 | __FUNCTION__, dt->tm_sec, dt->tm_min, | ||
136 | dt->tm_hour, dt->tm_mday, | ||
137 | dt->tm_mon, dt->tm_year, dt->tm_wday); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | dev_err(&client->dev, "error reading data! %d\n", result); | ||
143 | return -EIO; | ||
144 | } | ||
145 | |||
146 | static int ds1337_set_datetime(struct i2c_client *client, struct rtc_time *dt) | ||
147 | { | ||
148 | int result; | ||
149 | u8 buf[8]; | ||
150 | u8 val; | ||
151 | struct i2c_msg msg[1]; | ||
152 | |||
153 | if (!dt) { | ||
154 | dev_dbg(&client->dev, "%s: EINVAL: dt=NULL\n", __FUNCTION__); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " | ||
159 | "mday=%d, mon=%d, year=%d, wday=%d\n", __FUNCTION__, | ||
160 | dt->tm_sec, dt->tm_min, dt->tm_hour, | ||
161 | dt->tm_mday, dt->tm_mon, dt->tm_year, dt->tm_wday); | ||
162 | |||
163 | buf[0] = 0; /* reg offset */ | ||
164 | buf[1] = BIN2BCD(dt->tm_sec); | ||
165 | buf[2] = BIN2BCD(dt->tm_min); | ||
166 | buf[3] = BIN2BCD(dt->tm_hour); | ||
167 | buf[4] = BIN2BCD(dt->tm_wday + 1); | ||
168 | buf[5] = BIN2BCD(dt->tm_mday); | ||
169 | buf[6] = BIN2BCD(dt->tm_mon + 1); | ||
170 | val = dt->tm_year; | ||
171 | if (val >= 100) { | ||
172 | val -= 100; | ||
173 | buf[6] |= (1 << 7); | ||
174 | } | ||
175 | buf[7] = BIN2BCD(val); | ||
176 | |||
177 | msg[0].addr = client->addr; | ||
178 | msg[0].flags = 0; | ||
179 | msg[0].len = sizeof(buf); | ||
180 | msg[0].buf = &buf[0]; | ||
181 | |||
182 | result = i2c_transfer(client->adapter, msg, 1); | ||
183 | if (result == 1) | ||
184 | return 0; | ||
185 | |||
186 | dev_err(&client->dev, "error writing data! %d\n", result); | ||
187 | return -EIO; | ||
188 | } | ||
189 | |||
190 | static int ds1337_command(struct i2c_client *client, unsigned int cmd, | ||
191 | void *arg) | ||
192 | { | ||
193 | dev_dbg(&client->dev, "%s: cmd=%d\n", __FUNCTION__, cmd); | ||
194 | |||
195 | switch (cmd) { | ||
196 | case DS1337_GET_DATE: | ||
197 | return ds1337_get_datetime(client, arg); | ||
198 | |||
199 | case DS1337_SET_DATE: | ||
200 | return ds1337_set_datetime(client, arg); | ||
201 | |||
202 | default: | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Public API for access to specific device. Useful for low-level | ||
209 | * RTC access from kernel code. | ||
210 | */ | ||
211 | int ds1337_do_command(int bus, int cmd, void *arg) | ||
212 | { | ||
213 | struct list_head *walk; | ||
214 | struct list_head *tmp; | ||
215 | struct ds1337_data *data; | ||
216 | |||
217 | list_for_each_safe(walk, tmp, &ds1337_clients) { | ||
218 | data = list_entry(walk, struct ds1337_data, list); | ||
219 | if (data->client.adapter->nr == bus) | ||
220 | return ds1337_command(&data->client, cmd, arg); | ||
221 | } | ||
222 | |||
223 | return -ENODEV; | ||
224 | } | ||
225 | |||
226 | static int ds1337_attach_adapter(struct i2c_adapter *adapter) | ||
227 | { | ||
228 | return i2c_probe(adapter, &addr_data, ds1337_detect); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * The following function does more than just detection. If detection | ||
233 | * succeeds, it also registers the new chip. | ||
234 | */ | ||
235 | static int ds1337_detect(struct i2c_adapter *adapter, int address, int kind) | ||
236 | { | ||
237 | struct i2c_client *new_client; | ||
238 | struct ds1337_data *data; | ||
239 | int err = 0; | ||
240 | const char *name = ""; | ||
241 | |||
242 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | | ||
243 | I2C_FUNC_I2C)) | ||
244 | goto exit; | ||
245 | |||
246 | if (!(data = kzalloc(sizeof(struct ds1337_data), GFP_KERNEL))) { | ||
247 | err = -ENOMEM; | ||
248 | goto exit; | ||
249 | } | ||
250 | INIT_LIST_HEAD(&data->list); | ||
251 | |||
252 | /* The common I2C client data is placed right before the | ||
253 | * DS1337-specific data. | ||
254 | */ | ||
255 | new_client = &data->client; | ||
256 | i2c_set_clientdata(new_client, data); | ||
257 | new_client->addr = address; | ||
258 | new_client->adapter = adapter; | ||
259 | new_client->driver = &ds1337_driver; | ||
260 | new_client->flags = 0; | ||
261 | |||
262 | /* | ||
263 | * Now we do the remaining detection. A negative kind means that | ||
264 | * the driver was loaded with no force parameter (default), so we | ||
265 | * must both detect and identify the chip. A zero kind means that | ||
266 | * the driver was loaded with the force parameter, the detection | ||
267 | * step shall be skipped. A positive kind means that the driver | ||
268 | * was loaded with the force parameter and a given kind of chip is | ||
269 | * requested, so both the detection and the identification steps | ||
270 | * are skipped. | ||
271 | * | ||
272 | * For detection, we read registers that are most likely to cause | ||
273 | * detection failure, i.e. those that have more bits with fixed | ||
274 | * or reserved values. | ||
275 | */ | ||
276 | |||
277 | /* Default to an DS1337 if forced */ | ||
278 | if (kind == 0) | ||
279 | kind = ds1337; | ||
280 | |||
281 | if (kind < 0) { /* detection and identification */ | ||
282 | u8 data; | ||
283 | |||
284 | /* Check that status register bits 6-2 are zero */ | ||
285 | if ((ds1337_read(new_client, DS1337_REG_STATUS, &data) < 0) || | ||
286 | (data & 0x7c)) | ||
287 | goto exit_free; | ||
288 | |||
289 | /* Check for a valid day register value */ | ||
290 | if ((ds1337_read(new_client, DS1337_REG_DAY, &data) < 0) || | ||
291 | (data == 0) || (data & 0xf8)) | ||
292 | goto exit_free; | ||
293 | |||
294 | /* Check for a valid date register value */ | ||
295 | if ((ds1337_read(new_client, DS1337_REG_DATE, &data) < 0) || | ||
296 | (data == 0) || (data & 0xc0) || ((data & 0x0f) > 9) || | ||
297 | (data >= 0x32)) | ||
298 | goto exit_free; | ||
299 | |||
300 | /* Check for a valid month register value */ | ||
301 | if ((ds1337_read(new_client, DS1337_REG_MONTH, &data) < 0) || | ||
302 | (data == 0) || (data & 0x60) || ((data & 0x0f) > 9) || | ||
303 | ((data >= 0x13) && (data <= 0x19))) | ||
304 | goto exit_free; | ||
305 | |||
306 | /* Check that control register bits 6-5 are zero */ | ||
307 | if ((ds1337_read(new_client, DS1337_REG_CONTROL, &data) < 0) || | ||
308 | (data & 0x60)) | ||
309 | goto exit_free; | ||
310 | |||
311 | kind = ds1337; | ||
312 | } | ||
313 | |||
314 | if (kind == ds1337) | ||
315 | name = "ds1337"; | ||
316 | |||
317 | /* We can fill in the remaining client fields */ | ||
318 | strlcpy(new_client->name, name, I2C_NAME_SIZE); | ||
319 | |||
320 | /* Tell the I2C layer a new client has arrived */ | ||
321 | if ((err = i2c_attach_client(new_client))) | ||
322 | goto exit_free; | ||
323 | |||
324 | /* Initialize the DS1337 chip */ | ||
325 | ds1337_init_client(new_client); | ||
326 | |||
327 | /* Add client to local list */ | ||
328 | list_add(&data->list, &ds1337_clients); | ||
329 | |||
330 | return 0; | ||
331 | |||
332 | exit_free: | ||
333 | kfree(data); | ||
334 | exit: | ||
335 | return err; | ||
336 | } | ||
337 | |||
338 | static void ds1337_init_client(struct i2c_client *client) | ||
339 | { | ||
340 | u8 status, control; | ||
341 | |||
342 | /* On some boards, the RTC isn't configured by boot firmware. | ||
343 | * Handle that case by starting/configuring the RTC now. | ||
344 | */ | ||
345 | status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS); | ||
346 | control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); | ||
347 | |||
348 | if ((status & 0x80) || (control & 0x80)) { | ||
349 | /* RTC not running */ | ||
350 | u8 buf[1+16]; /* First byte is interpreted as address */ | ||
351 | struct i2c_msg msg[1]; | ||
352 | |||
353 | dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__); | ||
354 | |||
355 | /* Initialize all, including STATUS and CONTROL to zero */ | ||
356 | memset(buf, 0, sizeof(buf)); | ||
357 | |||
358 | /* Write valid values in the date/time registers */ | ||
359 | buf[1+DS1337_REG_DAY] = 1; | ||
360 | buf[1+DS1337_REG_DATE] = 1; | ||
361 | buf[1+DS1337_REG_MONTH] = 1; | ||
362 | |||
363 | msg[0].addr = client->addr; | ||
364 | msg[0].flags = 0; | ||
365 | msg[0].len = sizeof(buf); | ||
366 | msg[0].buf = &buf[0]; | ||
367 | |||
368 | i2c_transfer(client->adapter, msg, 1); | ||
369 | } else { | ||
370 | /* Running: ensure that device is set in 24-hour mode */ | ||
371 | s32 val; | ||
372 | |||
373 | val = i2c_smbus_read_byte_data(client, DS1337_REG_HOUR); | ||
374 | if ((val >= 0) && (val & (1 << 6))) | ||
375 | i2c_smbus_write_byte_data(client, DS1337_REG_HOUR, | ||
376 | val & 0x3f); | ||
377 | } | ||
378 | } | ||
379 | |||
380 | static int ds1337_detach_client(struct i2c_client *client) | ||
381 | { | ||
382 | int err; | ||
383 | struct ds1337_data *data = i2c_get_clientdata(client); | ||
384 | |||
385 | if ((err = i2c_detach_client(client))) | ||
386 | return err; | ||
387 | |||
388 | list_del(&data->list); | ||
389 | kfree(data); | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int __init ds1337_init(void) | ||
394 | { | ||
395 | return i2c_add_driver(&ds1337_driver); | ||
396 | } | ||
397 | |||
398 | static void __exit ds1337_exit(void) | ||
399 | { | ||
400 | i2c_del_driver(&ds1337_driver); | ||
401 | } | ||
402 | |||
403 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
404 | MODULE_DESCRIPTION("DS1337 RTC driver"); | ||
405 | MODULE_LICENSE("GPL"); | ||
406 | |||
407 | EXPORT_SYMBOL_GPL(ds1337_do_command); | ||
408 | |||
409 | module_init(ds1337_init); | ||
410 | module_exit(ds1337_exit); | ||
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c deleted file mode 100644 index 8a2ff0c114d9..000000000000 --- a/drivers/i2c/chips/ds1374.c +++ /dev/null | |||
@@ -1,267 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/i2c/chips/ds1374.c | ||
3 | * | ||
4 | * I2C client/driver for the Maxim/Dallas DS1374 Real-Time Clock | ||
5 | * | ||
6 | * Author: Randy Vinson <rvinson@mvista.com> | ||
7 | * | ||
8 | * Based on the m41t00.c by Mark Greer <mgreer@mvista.com> | ||
9 | * | ||
10 | * 2005 (c) MontaVista Software, Inc. This file is licensed under | ||
11 | * the terms of the GNU General Public License version 2. This program | ||
12 | * is licensed "as is" without any warranty of any kind, whether express | ||
13 | * or implied. | ||
14 | */ | ||
15 | /* | ||
16 | * This i2c client/driver wedges between the drivers/char/genrtc.c RTC | ||
17 | * interface and the SMBus interface of the i2c subsystem. | ||
18 | * It would be more efficient to use i2c msgs/i2c_transfer directly but, as | ||
19 | * recommened in .../Documentation/i2c/writing-clients section | ||
20 | * "Sending and receiving", using SMBus level communication is preferred. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/i2c.h> | ||
27 | #include <linux/rtc.h> | ||
28 | #include <linux/bcd.h> | ||
29 | #include <linux/mutex.h> | ||
30 | #include <linux/workqueue.h> | ||
31 | |||
32 | #define DS1374_REG_TOD0 0x00 | ||
33 | #define DS1374_REG_TOD1 0x01 | ||
34 | #define DS1374_REG_TOD2 0x02 | ||
35 | #define DS1374_REG_TOD3 0x03 | ||
36 | #define DS1374_REG_WDALM0 0x04 | ||
37 | #define DS1374_REG_WDALM1 0x05 | ||
38 | #define DS1374_REG_WDALM2 0x06 | ||
39 | #define DS1374_REG_CR 0x07 | ||
40 | #define DS1374_REG_SR 0x08 | ||
41 | #define DS1374_REG_SR_OSF 0x80 | ||
42 | #define DS1374_REG_TCR 0x09 | ||
43 | |||
44 | #define DS1374_DRV_NAME "ds1374" | ||
45 | |||
46 | static DEFINE_MUTEX(ds1374_mutex); | ||
47 | |||
48 | static struct i2c_driver ds1374_driver; | ||
49 | static struct i2c_client *save_client; | ||
50 | |||
51 | static unsigned short ignore[] = { I2C_CLIENT_END }; | ||
52 | static unsigned short normal_addr[] = { 0x68, I2C_CLIENT_END }; | ||
53 | |||
54 | static struct i2c_client_address_data addr_data = { | ||
55 | .normal_i2c = normal_addr, | ||
56 | .probe = ignore, | ||
57 | .ignore = ignore, | ||
58 | }; | ||
59 | |||
60 | static ulong ds1374_read_rtc(void) | ||
61 | { | ||
62 | ulong time = 0; | ||
63 | int reg = DS1374_REG_WDALM0; | ||
64 | |||
65 | while (reg--) { | ||
66 | s32 tmp; | ||
67 | if ((tmp = i2c_smbus_read_byte_data(save_client, reg)) < 0) { | ||
68 | dev_warn(&save_client->dev, | ||
69 | "can't read from rtc chip\n"); | ||
70 | return 0; | ||
71 | } | ||
72 | time = (time << 8) | (tmp & 0xff); | ||
73 | } | ||
74 | return time; | ||
75 | } | ||
76 | |||
77 | static void ds1374_write_rtc(ulong time) | ||
78 | { | ||
79 | int reg; | ||
80 | |||
81 | for (reg = DS1374_REG_TOD0; reg < DS1374_REG_WDALM0; reg++) { | ||
82 | if (i2c_smbus_write_byte_data(save_client, reg, time & 0xff) | ||
83 | < 0) { | ||
84 | dev_warn(&save_client->dev, | ||
85 | "can't write to rtc chip\n"); | ||
86 | break; | ||
87 | } | ||
88 | time = time >> 8; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void ds1374_check_rtc_status(void) | ||
93 | { | ||
94 | s32 tmp; | ||
95 | |||
96 | tmp = i2c_smbus_read_byte_data(save_client, DS1374_REG_SR); | ||
97 | if (tmp < 0) { | ||
98 | dev_warn(&save_client->dev, | ||
99 | "can't read status from rtc chip\n"); | ||
100 | return; | ||
101 | } | ||
102 | if (tmp & DS1374_REG_SR_OSF) { | ||
103 | dev_warn(&save_client->dev, | ||
104 | "oscillator discontinuity flagged, time unreliable\n"); | ||
105 | tmp &= ~DS1374_REG_SR_OSF; | ||
106 | tmp = i2c_smbus_write_byte_data(save_client, DS1374_REG_SR, | ||
107 | tmp & 0xff); | ||
108 | if (tmp < 0) | ||
109 | dev_warn(&save_client->dev, | ||
110 | "can't clear discontinuity notification\n"); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | ulong ds1374_get_rtc_time(void) | ||
115 | { | ||
116 | ulong t1, t2; | ||
117 | int limit = 10; /* arbitrary retry limit */ | ||
118 | |||
119 | mutex_lock(&ds1374_mutex); | ||
120 | |||
121 | /* | ||
122 | * Since the reads are being performed one byte at a time using | ||
123 | * the SMBus vs a 4-byte i2c transfer, there is a chance that a | ||
124 | * carry will occur during the read. To detect this, 2 reads are | ||
125 | * performed and compared. | ||
126 | */ | ||
127 | do { | ||
128 | t1 = ds1374_read_rtc(); | ||
129 | t2 = ds1374_read_rtc(); | ||
130 | } while (t1 != t2 && limit--); | ||
131 | |||
132 | mutex_unlock(&ds1374_mutex); | ||
133 | |||
134 | if (t1 != t2) { | ||
135 | dev_warn(&save_client->dev, | ||
136 | "can't get consistent time from rtc chip\n"); | ||
137 | t1 = 0; | ||
138 | } | ||
139 | |||
140 | return t1; | ||
141 | } | ||
142 | |||
143 | static ulong new_time; | ||
144 | |||
145 | static void ds1374_set_work(struct work_struct *work) | ||
146 | { | ||
147 | ulong t1, t2; | ||
148 | int limit = 10; /* arbitrary retry limit */ | ||
149 | |||
150 | t1 = new_time; | ||
151 | |||
152 | mutex_lock(&ds1374_mutex); | ||
153 | |||
154 | /* | ||
155 | * Since the writes are being performed one byte at a time using | ||
156 | * the SMBus vs a 4-byte i2c transfer, there is a chance that a | ||
157 | * carry will occur during the write. To detect this, the write | ||
158 | * value is read back and compared. | ||
159 | */ | ||
160 | do { | ||
161 | ds1374_write_rtc(t1); | ||
162 | t2 = ds1374_read_rtc(); | ||
163 | } while (t1 != t2 && limit--); | ||
164 | |||
165 | mutex_unlock(&ds1374_mutex); | ||
166 | |||
167 | if (t1 != t2) | ||
168 | dev_warn(&save_client->dev, | ||
169 | "can't confirm time set from rtc chip\n"); | ||
170 | } | ||
171 | |||
172 | static struct workqueue_struct *ds1374_workqueue; | ||
173 | |||
174 | static DECLARE_WORK(ds1374_work, ds1374_set_work); | ||
175 | |||
176 | int ds1374_set_rtc_time(ulong nowtime) | ||
177 | { | ||
178 | new_time = nowtime; | ||
179 | |||
180 | if (in_interrupt()) | ||
181 | queue_work(ds1374_workqueue, &ds1374_work); | ||
182 | else | ||
183 | ds1374_set_work(NULL); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | ***************************************************************************** | ||
190 | * | ||
191 | * Driver Interface | ||
192 | * | ||
193 | ***************************************************************************** | ||
194 | */ | ||
195 | static int ds1374_probe(struct i2c_adapter *adap, int addr, int kind) | ||
196 | { | ||
197 | struct i2c_client *client; | ||
198 | int rc; | ||
199 | |||
200 | client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); | ||
201 | if (!client) | ||
202 | return -ENOMEM; | ||
203 | |||
204 | strncpy(client->name, DS1374_DRV_NAME, I2C_NAME_SIZE); | ||
205 | client->addr = addr; | ||
206 | client->adapter = adap; | ||
207 | client->driver = &ds1374_driver; | ||
208 | |||
209 | ds1374_workqueue = create_singlethread_workqueue("ds1374"); | ||
210 | if (!ds1374_workqueue) { | ||
211 | kfree(client); | ||
212 | return -ENOMEM; /* most expected reason */ | ||
213 | } | ||
214 | |||
215 | if ((rc = i2c_attach_client(client)) != 0) { | ||
216 | kfree(client); | ||
217 | return rc; | ||
218 | } | ||
219 | |||
220 | save_client = client; | ||
221 | |||
222 | ds1374_check_rtc_status(); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int ds1374_attach(struct i2c_adapter *adap) | ||
228 | { | ||
229 | return i2c_probe(adap, &addr_data, ds1374_probe); | ||
230 | } | ||
231 | |||
232 | static int ds1374_detach(struct i2c_client *client) | ||
233 | { | ||
234 | int rc; | ||
235 | |||
236 | if ((rc = i2c_detach_client(client)) == 0) { | ||
237 | kfree(i2c_get_clientdata(client)); | ||
238 | destroy_workqueue(ds1374_workqueue); | ||
239 | } | ||
240 | return rc; | ||
241 | } | ||
242 | |||
243 | static struct i2c_driver ds1374_driver = { | ||
244 | .driver = { | ||
245 | .name = DS1374_DRV_NAME, | ||
246 | }, | ||
247 | .id = I2C_DRIVERID_DS1374, | ||
248 | .attach_adapter = ds1374_attach, | ||
249 | .detach_client = ds1374_detach, | ||
250 | }; | ||
251 | |||
252 | static int __init ds1374_init(void) | ||
253 | { | ||
254 | return i2c_add_driver(&ds1374_driver); | ||
255 | } | ||
256 | |||
257 | static void __exit ds1374_exit(void) | ||
258 | { | ||
259 | i2c_del_driver(&ds1374_driver); | ||
260 | } | ||
261 | |||
262 | module_init(ds1374_init); | ||
263 | module_exit(ds1374_exit); | ||
264 | |||
265 | MODULE_AUTHOR("Randy Vinson <rvinson@mvista.com>"); | ||
266 | MODULE_DESCRIPTION("Maxim/Dallas DS1374 RTC I2C Client Driver"); | ||
267 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c index 1a7eeebac506..fde297b21ad7 100644 --- a/drivers/i2c/chips/eeprom.c +++ b/drivers/i2c/chips/eeprom.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | 36 | ||
37 | /* Addresses to scan */ | 37 | /* Addresses to scan */ |
38 | static unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, | 38 | static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54, |
39 | 0x55, 0x56, 0x57, I2C_CLIENT_END }; | 39 | 0x55, 0x56, 0x57, I2C_CLIENT_END }; |
40 | 40 | ||
41 | /* Insmod parameters */ | 41 | /* Insmod parameters */ |
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c index ebfbb2947ae6..2a3160153f54 100644 --- a/drivers/i2c/chips/isp1301_omap.c +++ b/drivers/i2c/chips/isp1301_omap.c | |||
@@ -100,7 +100,7 @@ struct isp1301 { | |||
100 | 100 | ||
101 | #if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) | 101 | #if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) |
102 | 102 | ||
103 | #include <asm/arch/tps65010.h> | 103 | #include <linux/i2c/tps65010.h> |
104 | 104 | ||
105 | #else | 105 | #else |
106 | 106 | ||
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c deleted file mode 100644 index 3fcb646e2073..000000000000 --- a/drivers/i2c/chips/m41t00.c +++ /dev/null | |||
@@ -1,413 +0,0 @@ | |||
1 | /* | ||
2 | * I2C client/driver for the ST M41T00 family of i2c rtc chips. | ||
3 | * | ||
4 | * Author: Mark A. Greer <mgreer@mvista.com> | ||
5 | * | ||
6 | * 2005, 2006 (c) MontaVista Software, Inc. This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | */ | ||
11 | /* | ||
12 | * This i2c client/driver wedges between the drivers/char/genrtc.c RTC | ||
13 | * interface and the SMBus interface of the i2c subsystem. | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/rtc.h> | ||
21 | #include <linux/bcd.h> | ||
22 | #include <linux/workqueue.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/m41t00.h> | ||
25 | #include <asm/time.h> | ||
26 | #include <asm/rtc.h> | ||
27 | |||
28 | static struct i2c_driver m41t00_driver; | ||
29 | static struct i2c_client *save_client; | ||
30 | |||
31 | static unsigned short ignore[] = { I2C_CLIENT_END }; | ||
32 | static unsigned short normal_addr[] = { I2C_CLIENT_END, I2C_CLIENT_END }; | ||
33 | |||
34 | static struct i2c_client_address_data addr_data = { | ||
35 | .normal_i2c = normal_addr, | ||
36 | .probe = ignore, | ||
37 | .ignore = ignore, | ||
38 | }; | ||
39 | |||
40 | struct m41t00_chip_info { | ||
41 | u8 type; | ||
42 | char *name; | ||
43 | u8 read_limit; | ||
44 | u8 sec; /* Offsets for chip regs */ | ||
45 | u8 min; | ||
46 | u8 hour; | ||
47 | u8 day; | ||
48 | u8 mon; | ||
49 | u8 year; | ||
50 | u8 alarm_mon; | ||
51 | u8 alarm_hour; | ||
52 | u8 sqw; | ||
53 | u8 sqw_freq; | ||
54 | }; | ||
55 | |||
56 | static struct m41t00_chip_info m41t00_chip_info_tbl[] = { | ||
57 | { | ||
58 | .type = M41T00_TYPE_M41T00, | ||
59 | .name = "m41t00", | ||
60 | .read_limit = 5, | ||
61 | .sec = 0, | ||
62 | .min = 1, | ||
63 | .hour = 2, | ||
64 | .day = 4, | ||
65 | .mon = 5, | ||
66 | .year = 6, | ||
67 | }, | ||
68 | { | ||
69 | .type = M41T00_TYPE_M41T81, | ||
70 | .name = "m41t81", | ||
71 | .read_limit = 1, | ||
72 | .sec = 1, | ||
73 | .min = 2, | ||
74 | .hour = 3, | ||
75 | .day = 5, | ||
76 | .mon = 6, | ||
77 | .year = 7, | ||
78 | .alarm_mon = 0xa, | ||
79 | .alarm_hour = 0xc, | ||
80 | .sqw = 0x13, | ||
81 | }, | ||
82 | { | ||
83 | .type = M41T00_TYPE_M41T85, | ||
84 | .name = "m41t85", | ||
85 | .read_limit = 1, | ||
86 | .sec = 1, | ||
87 | .min = 2, | ||
88 | .hour = 3, | ||
89 | .day = 5, | ||
90 | .mon = 6, | ||
91 | .year = 7, | ||
92 | .alarm_mon = 0xa, | ||
93 | .alarm_hour = 0xc, | ||
94 | .sqw = 0x13, | ||
95 | }, | ||
96 | }; | ||
97 | static struct m41t00_chip_info *m41t00_chip; | ||
98 | |||
99 | ulong | ||
100 | m41t00_get_rtc_time(void) | ||
101 | { | ||
102 | s32 sec, min, hour, day, mon, year; | ||
103 | s32 sec1, min1, hour1, day1, mon1, year1; | ||
104 | u8 reads = 0; | ||
105 | u8 buf[8], msgbuf[1] = { 0 }; /* offset into rtc's regs */ | ||
106 | struct i2c_msg msgs[] = { | ||
107 | { | ||
108 | .addr = save_client->addr, | ||
109 | .flags = 0, | ||
110 | .len = 1, | ||
111 | .buf = msgbuf, | ||
112 | }, | ||
113 | { | ||
114 | .addr = save_client->addr, | ||
115 | .flags = I2C_M_RD, | ||
116 | .len = 8, | ||
117 | .buf = buf, | ||
118 | }, | ||
119 | }; | ||
120 | |||
121 | sec = min = hour = day = mon = year = 0; | ||
122 | |||
123 | do { | ||
124 | if (i2c_transfer(save_client->adapter, msgs, 2) < 0) | ||
125 | goto read_err; | ||
126 | |||
127 | sec1 = sec; | ||
128 | min1 = min; | ||
129 | hour1 = hour; | ||
130 | day1 = day; | ||
131 | mon1 = mon; | ||
132 | year1 = year; | ||
133 | |||
134 | sec = buf[m41t00_chip->sec] & 0x7f; | ||
135 | min = buf[m41t00_chip->min] & 0x7f; | ||
136 | hour = buf[m41t00_chip->hour] & 0x3f; | ||
137 | day = buf[m41t00_chip->day] & 0x3f; | ||
138 | mon = buf[m41t00_chip->mon] & 0x1f; | ||
139 | year = buf[m41t00_chip->year]; | ||
140 | } while ((++reads < m41t00_chip->read_limit) && ((sec != sec1) | ||
141 | || (min != min1) || (hour != hour1) || (day != day1) | ||
142 | || (mon != mon1) || (year != year1))); | ||
143 | |||
144 | if ((m41t00_chip->read_limit > 1) && ((sec != sec1) || (min != min1) | ||
145 | || (hour != hour1) || (day != day1) || (mon != mon1) | ||
146 | || (year != year1))) | ||
147 | goto read_err; | ||
148 | |||
149 | sec = BCD2BIN(sec); | ||
150 | min = BCD2BIN(min); | ||
151 | hour = BCD2BIN(hour); | ||
152 | day = BCD2BIN(day); | ||
153 | mon = BCD2BIN(mon); | ||
154 | year = BCD2BIN(year); | ||
155 | |||
156 | year += 1900; | ||
157 | if (year < 1970) | ||
158 | year += 100; | ||
159 | |||
160 | return mktime(year, mon, day, hour, min, sec); | ||
161 | |||
162 | read_err: | ||
163 | dev_err(&save_client->dev, "m41t00_get_rtc_time: Read error\n"); | ||
164 | return 0; | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(m41t00_get_rtc_time); | ||
167 | |||
168 | static void | ||
169 | m41t00_set(void *arg) | ||
170 | { | ||
171 | struct rtc_time tm; | ||
172 | int nowtime = *(int *)arg; | ||
173 | s32 sec, min, hour, day, mon, year; | ||
174 | u8 wbuf[9], *buf = &wbuf[1], msgbuf[1] = { 0 }; | ||
175 | struct i2c_msg msgs[] = { | ||
176 | { | ||
177 | .addr = save_client->addr, | ||
178 | .flags = 0, | ||
179 | .len = 1, | ||
180 | .buf = msgbuf, | ||
181 | }, | ||
182 | { | ||
183 | .addr = save_client->addr, | ||
184 | .flags = I2C_M_RD, | ||
185 | .len = 8, | ||
186 | .buf = buf, | ||
187 | }, | ||
188 | }; | ||
189 | |||
190 | to_tm(nowtime, &tm); | ||
191 | tm.tm_year = (tm.tm_year - 1900) % 100; | ||
192 | |||
193 | sec = BIN2BCD(tm.tm_sec); | ||
194 | min = BIN2BCD(tm.tm_min); | ||
195 | hour = BIN2BCD(tm.tm_hour); | ||
196 | day = BIN2BCD(tm.tm_mday); | ||
197 | mon = BIN2BCD(tm.tm_mon); | ||
198 | year = BIN2BCD(tm.tm_year); | ||
199 | |||
200 | /* Read reg values into buf[0..7]/wbuf[1..8] */ | ||
201 | if (i2c_transfer(save_client->adapter, msgs, 2) < 0) { | ||
202 | dev_err(&save_client->dev, "m41t00_set: Read error\n"); | ||
203 | return; | ||
204 | } | ||
205 | |||
206 | wbuf[0] = 0; /* offset into rtc's regs */ | ||
207 | buf[m41t00_chip->sec] = (buf[m41t00_chip->sec] & ~0x7f) | (sec & 0x7f); | ||
208 | buf[m41t00_chip->min] = (buf[m41t00_chip->min] & ~0x7f) | (min & 0x7f); | ||
209 | buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f); | ||
210 | buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f); | ||
211 | buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f); | ||
212 | buf[m41t00_chip->year] = year; | ||
213 | |||
214 | if (i2c_master_send(save_client, wbuf, 9) < 0) | ||
215 | dev_err(&save_client->dev, "m41t00_set: Write error\n"); | ||
216 | } | ||
217 | |||
218 | static ulong new_time; | ||
219 | /* well, isn't this API just _lovely_? */ | ||
220 | static void | ||
221 | m41t00_barf(struct work_struct *unusable) | ||
222 | { | ||
223 | m41t00_set(&new_time); | ||
224 | } | ||
225 | |||
226 | static struct workqueue_struct *m41t00_wq; | ||
227 | static DECLARE_WORK(m41t00_work, m41t00_barf); | ||
228 | |||
229 | int | ||
230 | m41t00_set_rtc_time(ulong nowtime) | ||
231 | { | ||
232 | new_time = nowtime; | ||
233 | |||
234 | if (in_interrupt()) | ||
235 | queue_work(m41t00_wq, &m41t00_work); | ||
236 | else | ||
237 | m41t00_set(&new_time); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(m41t00_set_rtc_time); | ||
242 | |||
243 | /* | ||
244 | ***************************************************************************** | ||
245 | * | ||
246 | * platform_data Driver Interface | ||
247 | * | ||
248 | ***************************************************************************** | ||
249 | */ | ||
250 | static int __init | ||
251 | m41t00_platform_probe(struct platform_device *pdev) | ||
252 | { | ||
253 | struct m41t00_platform_data *pdata; | ||
254 | int i; | ||
255 | |||
256 | if (pdev && (pdata = pdev->dev.platform_data)) { | ||
257 | normal_addr[0] = pdata->i2c_addr; | ||
258 | |||
259 | for (i=0; i<ARRAY_SIZE(m41t00_chip_info_tbl); i++) | ||
260 | if (m41t00_chip_info_tbl[i].type == pdata->type) { | ||
261 | m41t00_chip = &m41t00_chip_info_tbl[i]; | ||
262 | m41t00_chip->sqw_freq = pdata->sqw_freq; | ||
263 | return 0; | ||
264 | } | ||
265 | } | ||
266 | return -ENODEV; | ||
267 | } | ||
268 | |||
269 | static int __exit | ||
270 | m41t00_platform_remove(struct platform_device *pdev) | ||
271 | { | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static struct platform_driver m41t00_platform_driver = { | ||
276 | .probe = m41t00_platform_probe, | ||
277 | .remove = m41t00_platform_remove, | ||
278 | .driver = { | ||
279 | .owner = THIS_MODULE, | ||
280 | .name = M41T00_DRV_NAME, | ||
281 | }, | ||
282 | }; | ||
283 | |||
284 | /* | ||
285 | ***************************************************************************** | ||
286 | * | ||
287 | * Driver Interface | ||
288 | * | ||
289 | ***************************************************************************** | ||
290 | */ | ||
291 | static int | ||
292 | m41t00_probe(struct i2c_adapter *adap, int addr, int kind) | ||
293 | { | ||
294 | struct i2c_client *client; | ||
295 | int rc; | ||
296 | |||
297 | if (!i2c_check_functionality(adap, I2C_FUNC_I2C | ||
298 | | I2C_FUNC_SMBUS_BYTE_DATA)) | ||
299 | return 0; | ||
300 | |||
301 | client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); | ||
302 | if (!client) | ||
303 | return -ENOMEM; | ||
304 | |||
305 | strlcpy(client->name, m41t00_chip->name, I2C_NAME_SIZE); | ||
306 | client->addr = addr; | ||
307 | client->adapter = adap; | ||
308 | client->driver = &m41t00_driver; | ||
309 | |||
310 | if ((rc = i2c_attach_client(client))) | ||
311 | goto attach_err; | ||
312 | |||
313 | if (m41t00_chip->type != M41T00_TYPE_M41T00) { | ||
314 | /* If asked, disable SQW, set SQW frequency & re-enable */ | ||
315 | if (m41t00_chip->sqw_freq) | ||
316 | if (((rc = i2c_smbus_read_byte_data(client, | ||
317 | m41t00_chip->alarm_mon)) < 0) | ||
318 | || ((rc = i2c_smbus_write_byte_data(client, | ||
319 | m41t00_chip->alarm_mon, rc & ~0x40)) <0) | ||
320 | || ((rc = i2c_smbus_write_byte_data(client, | ||
321 | m41t00_chip->sqw, | ||
322 | m41t00_chip->sqw_freq)) < 0) | ||
323 | || ((rc = i2c_smbus_write_byte_data(client, | ||
324 | m41t00_chip->alarm_mon, rc | 0x40)) <0)) | ||
325 | goto sqw_err; | ||
326 | |||
327 | /* Make sure HT (Halt Update) bit is cleared */ | ||
328 | if ((rc = i2c_smbus_read_byte_data(client, | ||
329 | m41t00_chip->alarm_hour)) < 0) | ||
330 | goto ht_err; | ||
331 | |||
332 | if (rc & 0x40) | ||
333 | if ((rc = i2c_smbus_write_byte_data(client, | ||
334 | m41t00_chip->alarm_hour, rc & ~0x40))<0) | ||
335 | goto ht_err; | ||
336 | } | ||
337 | |||
338 | /* Make sure ST (stop) bit is cleared */ | ||
339 | if ((rc = i2c_smbus_read_byte_data(client, m41t00_chip->sec)) < 0) | ||
340 | goto st_err; | ||
341 | |||
342 | if (rc & 0x80) | ||
343 | if ((rc = i2c_smbus_write_byte_data(client, m41t00_chip->sec, | ||
344 | rc & ~0x80)) < 0) | ||
345 | goto st_err; | ||
346 | |||
347 | m41t00_wq = create_singlethread_workqueue(m41t00_chip->name); | ||
348 | save_client = client; | ||
349 | return 0; | ||
350 | |||
351 | st_err: | ||
352 | dev_err(&client->dev, "m41t00_probe: Can't clear ST bit\n"); | ||
353 | goto attach_err; | ||
354 | ht_err: | ||
355 | dev_err(&client->dev, "m41t00_probe: Can't clear HT bit\n"); | ||
356 | goto attach_err; | ||
357 | sqw_err: | ||
358 | dev_err(&client->dev, "m41t00_probe: Can't set SQW Frequency\n"); | ||
359 | attach_err: | ||
360 | kfree(client); | ||
361 | return rc; | ||
362 | } | ||
363 | |||
364 | static int | ||
365 | m41t00_attach(struct i2c_adapter *adap) | ||
366 | { | ||
367 | return i2c_probe(adap, &addr_data, m41t00_probe); | ||
368 | } | ||
369 | |||
370 | static int | ||
371 | m41t00_detach(struct i2c_client *client) | ||
372 | { | ||
373 | int rc; | ||
374 | |||
375 | if ((rc = i2c_detach_client(client)) == 0) { | ||
376 | kfree(client); | ||
377 | destroy_workqueue(m41t00_wq); | ||
378 | } | ||
379 | return rc; | ||
380 | } | ||
381 | |||
382 | static struct i2c_driver m41t00_driver = { | ||
383 | .driver = { | ||
384 | .name = M41T00_DRV_NAME, | ||
385 | }, | ||
386 | .id = I2C_DRIVERID_STM41T00, | ||
387 | .attach_adapter = m41t00_attach, | ||
388 | .detach_client = m41t00_detach, | ||
389 | }; | ||
390 | |||
391 | static int __init | ||
392 | m41t00_init(void) | ||
393 | { | ||
394 | int rc; | ||
395 | |||
396 | if (!(rc = platform_driver_register(&m41t00_platform_driver))) | ||
397 | rc = i2c_add_driver(&m41t00_driver); | ||
398 | return rc; | ||
399 | } | ||
400 | |||
401 | static void __exit | ||
402 | m41t00_exit(void) | ||
403 | { | ||
404 | i2c_del_driver(&m41t00_driver); | ||
405 | platform_driver_unregister(&m41t00_platform_driver); | ||
406 | } | ||
407 | |||
408 | module_init(m41t00_init); | ||
409 | module_exit(m41t00_exit); | ||
410 | |||
411 | MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); | ||
412 | MODULE_DESCRIPTION("ST Microelectronics M41T00 RTC I2C Client Driver"); | ||
413 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c index 64692f666372..fb7ea5637eca 100644 --- a/drivers/i2c/chips/max6875.c +++ b/drivers/i2c/chips/max6875.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
35 | 35 | ||
36 | /* Do not scan - the MAX6875 access method will write to some EEPROM chips */ | 36 | /* Do not scan - the MAX6875 access method will write to some EEPROM chips */ |
37 | static unsigned short normal_i2c[] = {I2C_CLIENT_END}; | 37 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; |
38 | 38 | ||
39 | /* Insmod parameters */ | 39 | /* Insmod parameters */ |
40 | I2C_CLIENT_INSMOD_1(max6875); | 40 | I2C_CLIENT_INSMOD_1(max6875); |
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c index 21c6dd69193c..b3b830ccf209 100644 --- a/drivers/i2c/chips/pcf8574.c +++ b/drivers/i2c/chips/pcf8574.c | |||
@@ -41,9 +41,11 @@ | |||
41 | #include <linux/i2c.h> | 41 | #include <linux/i2c.h> |
42 | 42 | ||
43 | /* Addresses to scan */ | 43 | /* Addresses to scan */ |
44 | static unsigned short normal_i2c[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | 44 | static const unsigned short normal_i2c[] = { |
45 | 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, | 45 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, |
46 | I2C_CLIENT_END }; | 46 | 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, |
47 | I2C_CLIENT_END | ||
48 | }; | ||
47 | 49 | ||
48 | /* Insmod parameters */ | 50 | /* Insmod parameters */ |
49 | I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a); | 51 | I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a); |
diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c new file mode 100644 index 000000000000..3ea08ac0bfa3 --- /dev/null +++ b/drivers/i2c/chips/pcf8575.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | pcf8575.c | ||
3 | |||
4 | About the PCF8575 chip: the PCF8575 is a 16-bit I/O expander for the I2C bus | ||
5 | produced by a.o. Philips Semiconductors. | ||
6 | |||
7 | Copyright (C) 2006 Michael Hennerich, Analog Devices Inc. | ||
8 | <hennerich@blackfin.uclinux.org> | ||
9 | Based on pcf8574.c. | ||
10 | |||
11 | Copyright (c) 2007 Bart Van Assche <bart.vanassche@gmail.com>. | ||
12 | Ported this driver from ucLinux to the mainstream Linux kernel. | ||
13 | |||
14 | This program is free software; you can redistribute it and/or modify | ||
15 | it under the terms of the GNU General Public License as published by | ||
16 | the Free Software Foundation; either version 2 of the License, or | ||
17 | (at your option) any later version. | ||
18 | |||
19 | This program is distributed in the hope that it will be useful, | ||
20 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | GNU General Public License for more details. | ||
23 | |||
24 | You should have received a copy of the GNU General Public License | ||
25 | along with this program; if not, write to the Free Software | ||
26 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include <linux/module.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/i2c.h> | ||
32 | #include <linux/slab.h> /* kzalloc() */ | ||
33 | #include <linux/sysfs.h> /* sysfs_create_group() */ | ||
34 | |||
35 | /* Addresses to scan */ | ||
36 | static const unsigned short normal_i2c[] = { | ||
37 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, | ||
38 | I2C_CLIENT_END | ||
39 | }; | ||
40 | |||
41 | /* Insmod parameters */ | ||
42 | I2C_CLIENT_INSMOD; | ||
43 | |||
44 | |||
45 | /* Each client has this additional data */ | ||
46 | struct pcf8575_data { | ||
47 | struct i2c_client client; | ||
48 | int write; /* last written value, or error code */ | ||
49 | }; | ||
50 | |||
51 | static int pcf8575_attach_adapter(struct i2c_adapter *adapter); | ||
52 | static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind); | ||
53 | static int pcf8575_detach_client(struct i2c_client *client); | ||
54 | |||
55 | /* This is the driver that will be inserted */ | ||
56 | static struct i2c_driver pcf8575_driver = { | ||
57 | .driver = { | ||
58 | .owner = THIS_MODULE, | ||
59 | .name = "pcf8575", | ||
60 | }, | ||
61 | .attach_adapter = pcf8575_attach_adapter, | ||
62 | .detach_client = pcf8575_detach_client, | ||
63 | }; | ||
64 | |||
65 | /* following are the sysfs callback functions */ | ||
66 | static ssize_t show_read(struct device *dev, struct device_attribute *attr, | ||
67 | char *buf) | ||
68 | { | ||
69 | struct i2c_client *client = to_i2c_client(dev); | ||
70 | u16 val; | ||
71 | u8 iopin_state[2]; | ||
72 | |||
73 | i2c_master_recv(client, iopin_state, 2); | ||
74 | |||
75 | val = iopin_state[0]; | ||
76 | val |= iopin_state[1] << 8; | ||
77 | |||
78 | return sprintf(buf, "%u\n", val); | ||
79 | } | ||
80 | |||
81 | static DEVICE_ATTR(read, S_IRUGO, show_read, NULL); | ||
82 | |||
83 | static ssize_t show_write(struct device *dev, struct device_attribute *attr, | ||
84 | char *buf) | ||
85 | { | ||
86 | struct pcf8575_data *data = dev_get_drvdata(dev); | ||
87 | if (data->write < 0) | ||
88 | return data->write; | ||
89 | return sprintf(buf, "%d\n", data->write); | ||
90 | } | ||
91 | |||
92 | static ssize_t set_write(struct device *dev, struct device_attribute *attr, | ||
93 | const char *buf, size_t count) | ||
94 | { | ||
95 | struct i2c_client *client = to_i2c_client(dev); | ||
96 | struct pcf8575_data *data = i2c_get_clientdata(client); | ||
97 | unsigned long val = simple_strtoul(buf, NULL, 10); | ||
98 | u8 iopin_state[2]; | ||
99 | |||
100 | if (val > 0xffff) | ||
101 | return -EINVAL; | ||
102 | |||
103 | data->write = val; | ||
104 | |||
105 | iopin_state[0] = val & 0xFF; | ||
106 | iopin_state[1] = val >> 8; | ||
107 | |||
108 | i2c_master_send(client, iopin_state, 2); | ||
109 | |||
110 | return count; | ||
111 | } | ||
112 | |||
113 | static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write); | ||
114 | |||
115 | static struct attribute *pcf8575_attributes[] = { | ||
116 | &dev_attr_read.attr, | ||
117 | &dev_attr_write.attr, | ||
118 | NULL | ||
119 | }; | ||
120 | |||
121 | static const struct attribute_group pcf8575_attr_group = { | ||
122 | .attrs = pcf8575_attributes, | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Real code | ||
127 | */ | ||
128 | |||
129 | static int pcf8575_attach_adapter(struct i2c_adapter *adapter) | ||
130 | { | ||
131 | return i2c_probe(adapter, &addr_data, pcf8575_detect); | ||
132 | } | ||
133 | |||
134 | /* This function is called by i2c_probe */ | ||
135 | static int pcf8575_detect(struct i2c_adapter *adapter, int address, int kind) | ||
136 | { | ||
137 | struct i2c_client *client; | ||
138 | struct pcf8575_data *data; | ||
139 | int err = 0; | ||
140 | |||
141 | if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) | ||
142 | goto exit; | ||
143 | |||
144 | /* OK. For now, we presume we have a valid client. We now create the | ||
145 | client structure, even though we cannot fill it completely yet. */ | ||
146 | data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL); | ||
147 | if (!data) { | ||
148 | err = -ENOMEM; | ||
149 | goto exit; | ||
150 | } | ||
151 | |||
152 | client = &data->client; | ||
153 | i2c_set_clientdata(client, data); | ||
154 | client->addr = address; | ||
155 | client->adapter = adapter; | ||
156 | client->driver = &pcf8575_driver; | ||
157 | strlcpy(client->name, "pcf8575", I2C_NAME_SIZE); | ||
158 | data->write = -EAGAIN; | ||
159 | |||
160 | /* This is the place to detect whether the chip at the specified | ||
161 | address really is a PCF8575 chip. However, there is no method known | ||
162 | to detect whether an I2C chip is a PCF8575 or any other I2C chip. */ | ||
163 | |||
164 | /* Tell the I2C layer a new client has arrived */ | ||
165 | err = i2c_attach_client(client); | ||
166 | if (err) | ||
167 | goto exit_free; | ||
168 | |||
169 | /* Register sysfs hooks */ | ||
170 | err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group); | ||
171 | if (err) | ||
172 | goto exit_detach; | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | exit_detach: | ||
177 | i2c_detach_client(client); | ||
178 | exit_free: | ||
179 | kfree(data); | ||
180 | exit: | ||
181 | return err; | ||
182 | } | ||
183 | |||
184 | static int pcf8575_detach_client(struct i2c_client *client) | ||
185 | { | ||
186 | int err; | ||
187 | |||
188 | sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group); | ||
189 | |||
190 | err = i2c_detach_client(client); | ||
191 | if (err) | ||
192 | return err; | ||
193 | |||
194 | kfree(i2c_get_clientdata(client)); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int __init pcf8575_init(void) | ||
199 | { | ||
200 | return i2c_add_driver(&pcf8575_driver); | ||
201 | } | ||
202 | |||
203 | static void __exit pcf8575_exit(void) | ||
204 | { | ||
205 | i2c_del_driver(&pcf8575_driver); | ||
206 | } | ||
207 | |||
208 | MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>, " | ||
209 | "Bart Van Assche <bart.vanassche@gmail.com>"); | ||
210 | MODULE_DESCRIPTION("pcf8575 driver"); | ||
211 | MODULE_LICENSE("GPL"); | ||
212 | |||
213 | module_init(pcf8575_init); | ||
214 | module_exit(pcf8575_exit); | ||
diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c index 4dc36376eb32..865f4409c06b 100644 --- a/drivers/i2c/chips/pcf8591.c +++ b/drivers/i2c/chips/pcf8591.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | 28 | ||
29 | /* Addresses to scan */ | 29 | /* Addresses to scan */ |
30 | static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, | 30 | static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, |
31 | 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; | 31 | 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; |
32 | 32 | ||
33 | /* Insmod parameters */ | 33 | /* Insmod parameters */ |
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index e320994b981c..4154a9108859 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | 33 | ||
34 | #include <asm/arch/tps65010.h> | 34 | #include <linux/i2c/tps65010.h> |
35 | 35 | ||
36 | /*-------------------------------------------------------------------------*/ | 36 | /*-------------------------------------------------------------------------*/ |
37 | 37 | ||
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c index 3de4b19ba08f..a10fd2791a69 100644 --- a/drivers/i2c/chips/tsl2550.c +++ b/drivers/i2c/chips/tsl2550.c | |||
@@ -432,11 +432,32 @@ static int __devexit tsl2550_remove(struct i2c_client *client) | |||
432 | return 0; | 432 | return 0; |
433 | } | 433 | } |
434 | 434 | ||
435 | #ifdef CONFIG_PM | ||
436 | |||
437 | static int tsl2550_suspend(struct i2c_client *client, pm_message_t mesg) | ||
438 | { | ||
439 | return tsl2550_set_power_state(client, 0); | ||
440 | } | ||
441 | |||
442 | static int tsl2550_resume(struct i2c_client *client) | ||
443 | { | ||
444 | return tsl2550_set_power_state(client, 1); | ||
445 | } | ||
446 | |||
447 | #else | ||
448 | |||
449 | #define tsl2550_suspend NULL | ||
450 | #define tsl2550_resume NULL | ||
451 | |||
452 | #endif /* CONFIG_PM */ | ||
453 | |||
435 | static struct i2c_driver tsl2550_driver = { | 454 | static struct i2c_driver tsl2550_driver = { |
436 | .driver = { | 455 | .driver = { |
437 | .name = TSL2550_DRV_NAME, | 456 | .name = TSL2550_DRV_NAME, |
438 | .owner = THIS_MODULE, | 457 | .owner = THIS_MODULE, |
439 | }, | 458 | }, |
459 | .suspend = tsl2550_suspend, | ||
460 | .resume = tsl2550_resume, | ||
440 | .probe = tsl2550_probe, | 461 | .probe = tsl2550_probe, |
441 | .remove = __devexit_p(tsl2550_remove), | 462 | .remove = __devexit_p(tsl2550_remove), |
442 | }; | 463 | }; |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index b5e13e405e72..96da22e9a5a4 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -33,14 +33,15 @@ | |||
33 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
34 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
35 | #include <linux/completion.h> | 35 | #include <linux/completion.h> |
36 | #include <linux/hardirq.h> | ||
37 | #include <linux/irqflags.h> | ||
36 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
39 | #include <asm/semaphore.h> | ||
37 | 40 | ||
38 | #include "i2c-core.h" | 41 | #include "i2c-core.h" |
39 | 42 | ||
40 | 43 | ||
41 | static LIST_HEAD(adapters); | 44 | static DEFINE_MUTEX(core_lock); |
42 | static LIST_HEAD(drivers); | ||
43 | static DEFINE_MUTEX(core_lists); | ||
44 | static DEFINE_IDR(i2c_adapter_idr); | 45 | static DEFINE_IDR(i2c_adapter_idr); |
45 | 46 | ||
46 | #define is_newstyle_driver(d) ((d)->probe || (d)->remove) | 47 | #define is_newstyle_driver(d) ((d)->probe || (d)->remove) |
@@ -198,6 +199,25 @@ static struct bus_type i2c_bus_type = { | |||
198 | .resume = i2c_device_resume, | 199 | .resume = i2c_device_resume, |
199 | }; | 200 | }; |
200 | 201 | ||
202 | |||
203 | /** | ||
204 | * i2c_verify_client - return parameter as i2c_client, or NULL | ||
205 | * @dev: device, probably from some driver model iterator | ||
206 | * | ||
207 | * When traversing the driver model tree, perhaps using driver model | ||
208 | * iterators like @device_for_each_child(), you can't assume very much | ||
209 | * about the nodes you find. Use this function to avoid oopses caused | ||
210 | * by wrongly treating some non-I2C device as an i2c_client. | ||
211 | */ | ||
212 | struct i2c_client *i2c_verify_client(struct device *dev) | ||
213 | { | ||
214 | return (dev->bus == &i2c_bus_type) | ||
215 | ? to_i2c_client(dev) | ||
216 | : NULL; | ||
217 | } | ||
218 | EXPORT_SYMBOL(i2c_verify_client); | ||
219 | |||
220 | |||
201 | /** | 221 | /** |
202 | * i2c_new_device - instantiate an i2c device for use with a new style driver | 222 | * i2c_new_device - instantiate an i2c device for use with a new style driver |
203 | * @adap: the adapter managing the device | 223 | * @adap: the adapter managing the device |
@@ -276,6 +296,50 @@ void i2c_unregister_device(struct i2c_client *client) | |||
276 | EXPORT_SYMBOL_GPL(i2c_unregister_device); | 296 | EXPORT_SYMBOL_GPL(i2c_unregister_device); |
277 | 297 | ||
278 | 298 | ||
299 | static int dummy_nop(struct i2c_client *client) | ||
300 | { | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static struct i2c_driver dummy_driver = { | ||
305 | .driver.name = "dummy", | ||
306 | .probe = dummy_nop, | ||
307 | .remove = dummy_nop, | ||
308 | }; | ||
309 | |||
310 | /** | ||
311 | * i2c_new_dummy - return a new i2c device bound to a dummy driver | ||
312 | * @adapter: the adapter managing the device | ||
313 | * @address: seven bit address to be used | ||
314 | * @type: optional label used for i2c_client.name | ||
315 | * Context: can sleep | ||
316 | * | ||
317 | * This returns an I2C client bound to the "dummy" driver, intended for use | ||
318 | * with devices that consume multiple addresses. Examples of such chips | ||
319 | * include various EEPROMS (like 24c04 and 24c08 models). | ||
320 | * | ||
321 | * These dummy devices have two main uses. First, most I2C and SMBus calls | ||
322 | * except i2c_transfer() need a client handle; the dummy will be that handle. | ||
323 | * And second, this prevents the specified address from being bound to a | ||
324 | * different driver. | ||
325 | * | ||
326 | * This returns the new i2c client, which should be saved for later use with | ||
327 | * i2c_unregister_device(); or NULL to indicate an error. | ||
328 | */ | ||
329 | struct i2c_client * | ||
330 | i2c_new_dummy(struct i2c_adapter *adapter, u16 address, const char *type) | ||
331 | { | ||
332 | struct i2c_board_info info = { | ||
333 | .driver_name = "dummy", | ||
334 | .addr = address, | ||
335 | }; | ||
336 | |||
337 | if (type) | ||
338 | strlcpy(info.type, type, sizeof info.type); | ||
339 | return i2c_new_device(adapter, &info); | ||
340 | } | ||
341 | EXPORT_SYMBOL_GPL(i2c_new_dummy); | ||
342 | |||
279 | /* ------------------------------------------------------------------------- */ | 343 | /* ------------------------------------------------------------------------- */ |
280 | 344 | ||
281 | /* I2C bus adapters -- one roots each I2C or SMBUS segment */ | 345 | /* I2C bus adapters -- one roots each I2C or SMBUS segment */ |
@@ -320,18 +384,27 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter) | |||
320 | mutex_unlock(&__i2c_board_lock); | 384 | mutex_unlock(&__i2c_board_lock); |
321 | } | 385 | } |
322 | 386 | ||
387 | static int i2c_do_add_adapter(struct device_driver *d, void *data) | ||
388 | { | ||
389 | struct i2c_driver *driver = to_i2c_driver(d); | ||
390 | struct i2c_adapter *adap = data; | ||
391 | |||
392 | if (driver->attach_adapter) { | ||
393 | /* We ignore the return code; if it fails, too bad */ | ||
394 | driver->attach_adapter(adap); | ||
395 | } | ||
396 | return 0; | ||
397 | } | ||
398 | |||
323 | static int i2c_register_adapter(struct i2c_adapter *adap) | 399 | static int i2c_register_adapter(struct i2c_adapter *adap) |
324 | { | 400 | { |
325 | int res = 0; | 401 | int res = 0, dummy; |
326 | struct list_head *item; | ||
327 | struct i2c_driver *driver; | ||
328 | 402 | ||
329 | mutex_init(&adap->bus_lock); | 403 | mutex_init(&adap->bus_lock); |
330 | mutex_init(&adap->clist_lock); | 404 | mutex_init(&adap->clist_lock); |
331 | INIT_LIST_HEAD(&adap->clients); | 405 | INIT_LIST_HEAD(&adap->clients); |
332 | 406 | ||
333 | mutex_lock(&core_lists); | 407 | mutex_lock(&core_lock); |
334 | list_add_tail(&adap->list, &adapters); | ||
335 | 408 | ||
336 | /* Add the adapter to the driver core. | 409 | /* Add the adapter to the driver core. |
337 | * If the parent pointer is not set up, | 410 | * If the parent pointer is not set up, |
@@ -356,19 +429,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap) | |||
356 | i2c_scan_static_board_info(adap); | 429 | i2c_scan_static_board_info(adap); |
357 | 430 | ||
358 | /* let legacy drivers scan this bus for matching devices */ | 431 | /* let legacy drivers scan this bus for matching devices */ |
359 | list_for_each(item,&drivers) { | 432 | dummy = bus_for_each_drv(&i2c_bus_type, NULL, adap, |
360 | driver = list_entry(item, struct i2c_driver, list); | 433 | i2c_do_add_adapter); |
361 | if (driver->attach_adapter) | ||
362 | /* We ignore the return code; if it fails, too bad */ | ||
363 | driver->attach_adapter(adap); | ||
364 | } | ||
365 | 434 | ||
366 | out_unlock: | 435 | out_unlock: |
367 | mutex_unlock(&core_lists); | 436 | mutex_unlock(&core_lock); |
368 | return res; | 437 | return res; |
369 | 438 | ||
370 | out_list: | 439 | out_list: |
371 | list_del(&adap->list); | ||
372 | idr_remove(&i2c_adapter_idr, adap->nr); | 440 | idr_remove(&i2c_adapter_idr, adap->nr); |
373 | goto out_unlock; | 441 | goto out_unlock; |
374 | } | 442 | } |
@@ -394,11 +462,11 @@ retry: | |||
394 | if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) | 462 | if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) |
395 | return -ENOMEM; | 463 | return -ENOMEM; |
396 | 464 | ||
397 | mutex_lock(&core_lists); | 465 | mutex_lock(&core_lock); |
398 | /* "above" here means "above or equal to", sigh */ | 466 | /* "above" here means "above or equal to", sigh */ |
399 | res = idr_get_new_above(&i2c_adapter_idr, adapter, | 467 | res = idr_get_new_above(&i2c_adapter_idr, adapter, |
400 | __i2c_first_dynamic_bus_num, &id); | 468 | __i2c_first_dynamic_bus_num, &id); |
401 | mutex_unlock(&core_lists); | 469 | mutex_unlock(&core_lock); |
402 | 470 | ||
403 | if (res < 0) { | 471 | if (res < 0) { |
404 | if (res == -EAGAIN) | 472 | if (res == -EAGAIN) |
@@ -443,7 +511,7 @@ retry: | |||
443 | if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) | 511 | if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0) |
444 | return -ENOMEM; | 512 | return -ENOMEM; |
445 | 513 | ||
446 | mutex_lock(&core_lists); | 514 | mutex_lock(&core_lock); |
447 | /* "above" here means "above or equal to", sigh; | 515 | /* "above" here means "above or equal to", sigh; |
448 | * we need the "equal to" result to force the result | 516 | * we need the "equal to" result to force the result |
449 | */ | 517 | */ |
@@ -452,7 +520,7 @@ retry: | |||
452 | status = -EBUSY; | 520 | status = -EBUSY; |
453 | idr_remove(&i2c_adapter_idr, id); | 521 | idr_remove(&i2c_adapter_idr, id); |
454 | } | 522 | } |
455 | mutex_unlock(&core_lists); | 523 | mutex_unlock(&core_lock); |
456 | if (status == -EAGAIN) | 524 | if (status == -EAGAIN) |
457 | goto retry; | 525 | goto retry; |
458 | 526 | ||
@@ -462,6 +530,21 @@ retry: | |||
462 | } | 530 | } |
463 | EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); | 531 | EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); |
464 | 532 | ||
533 | static int i2c_do_del_adapter(struct device_driver *d, void *data) | ||
534 | { | ||
535 | struct i2c_driver *driver = to_i2c_driver(d); | ||
536 | struct i2c_adapter *adapter = data; | ||
537 | int res; | ||
538 | |||
539 | if (!driver->detach_adapter) | ||
540 | return 0; | ||
541 | res = driver->detach_adapter(adapter); | ||
542 | if (res) | ||
543 | dev_err(&adapter->dev, "detach_adapter failed (%d) " | ||
544 | "for driver [%s]\n", res, driver->driver.name); | ||
545 | return res; | ||
546 | } | ||
547 | |||
465 | /** | 548 | /** |
466 | * i2c_del_adapter - unregister I2C adapter | 549 | * i2c_del_adapter - unregister I2C adapter |
467 | * @adap: the adapter being unregistered | 550 | * @adap: the adapter being unregistered |
@@ -473,35 +556,24 @@ EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter); | |||
473 | int i2c_del_adapter(struct i2c_adapter *adap) | 556 | int i2c_del_adapter(struct i2c_adapter *adap) |
474 | { | 557 | { |
475 | struct list_head *item, *_n; | 558 | struct list_head *item, *_n; |
476 | struct i2c_adapter *adap_from_list; | ||
477 | struct i2c_driver *driver; | ||
478 | struct i2c_client *client; | 559 | struct i2c_client *client; |
479 | int res = 0; | 560 | int res = 0; |
480 | 561 | ||
481 | mutex_lock(&core_lists); | 562 | mutex_lock(&core_lock); |
482 | 563 | ||
483 | /* First make sure that this adapter was ever added */ | 564 | /* First make sure that this adapter was ever added */ |
484 | list_for_each_entry(adap_from_list, &adapters, list) { | 565 | if (idr_find(&i2c_adapter_idr, adap->nr) != adap) { |
485 | if (adap_from_list == adap) | ||
486 | break; | ||
487 | } | ||
488 | if (adap_from_list != adap) { | ||
489 | pr_debug("i2c-core: attempting to delete unregistered " | 566 | pr_debug("i2c-core: attempting to delete unregistered " |
490 | "adapter [%s]\n", adap->name); | 567 | "adapter [%s]\n", adap->name); |
491 | res = -EINVAL; | 568 | res = -EINVAL; |
492 | goto out_unlock; | 569 | goto out_unlock; |
493 | } | 570 | } |
494 | 571 | ||
495 | list_for_each(item,&drivers) { | 572 | /* Tell drivers about this removal */ |
496 | driver = list_entry(item, struct i2c_driver, list); | 573 | res = bus_for_each_drv(&i2c_bus_type, NULL, adap, |
497 | if (driver->detach_adapter) | 574 | i2c_do_del_adapter); |
498 | if ((res = driver->detach_adapter(adap))) { | 575 | if (res) |
499 | dev_err(&adap->dev, "detach_adapter failed " | 576 | goto out_unlock; |
500 | "for driver [%s]\n", | ||
501 | driver->driver.name); | ||
502 | goto out_unlock; | ||
503 | } | ||
504 | } | ||
505 | 577 | ||
506 | /* detach any active clients. This must be done first, because | 578 | /* detach any active clients. This must be done first, because |
507 | * it can fail; in which case we give up. */ | 579 | * it can fail; in which case we give up. */ |
@@ -529,7 +601,6 @@ int i2c_del_adapter(struct i2c_adapter *adap) | |||
529 | /* clean up the sysfs representation */ | 601 | /* clean up the sysfs representation */ |
530 | init_completion(&adap->dev_released); | 602 | init_completion(&adap->dev_released); |
531 | device_unregister(&adap->dev); | 603 | device_unregister(&adap->dev); |
532 | list_del(&adap->list); | ||
533 | 604 | ||
534 | /* wait for sysfs to drop all references */ | 605 | /* wait for sysfs to drop all references */ |
535 | wait_for_completion(&adap->dev_released); | 606 | wait_for_completion(&adap->dev_released); |
@@ -540,7 +611,7 @@ int i2c_del_adapter(struct i2c_adapter *adap) | |||
540 | dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); | 611 | dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); |
541 | 612 | ||
542 | out_unlock: | 613 | out_unlock: |
543 | mutex_unlock(&core_lists); | 614 | mutex_unlock(&core_lock); |
544 | return res; | 615 | return res; |
545 | } | 616 | } |
546 | EXPORT_SYMBOL(i2c_del_adapter); | 617 | EXPORT_SYMBOL(i2c_del_adapter); |
@@ -583,21 +654,23 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) | |||
583 | if (res) | 654 | if (res) |
584 | return res; | 655 | return res; |
585 | 656 | ||
586 | mutex_lock(&core_lists); | 657 | mutex_lock(&core_lock); |
587 | 658 | ||
588 | list_add_tail(&driver->list,&drivers); | ||
589 | pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); | 659 | pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); |
590 | 660 | ||
591 | /* legacy drivers scan i2c busses directly */ | 661 | /* legacy drivers scan i2c busses directly */ |
592 | if (driver->attach_adapter) { | 662 | if (driver->attach_adapter) { |
593 | struct i2c_adapter *adapter; | 663 | struct i2c_adapter *adapter; |
594 | 664 | ||
595 | list_for_each_entry(adapter, &adapters, list) { | 665 | down(&i2c_adapter_class.sem); |
666 | list_for_each_entry(adapter, &i2c_adapter_class.devices, | ||
667 | dev.node) { | ||
596 | driver->attach_adapter(adapter); | 668 | driver->attach_adapter(adapter); |
597 | } | 669 | } |
670 | up(&i2c_adapter_class.sem); | ||
598 | } | 671 | } |
599 | 672 | ||
600 | mutex_unlock(&core_lists); | 673 | mutex_unlock(&core_lock); |
601 | return 0; | 674 | return 0; |
602 | } | 675 | } |
603 | EXPORT_SYMBOL(i2c_register_driver); | 676 | EXPORT_SYMBOL(i2c_register_driver); |
@@ -609,11 +682,11 @@ EXPORT_SYMBOL(i2c_register_driver); | |||
609 | */ | 682 | */ |
610 | void i2c_del_driver(struct i2c_driver *driver) | 683 | void i2c_del_driver(struct i2c_driver *driver) |
611 | { | 684 | { |
612 | struct list_head *item1, *item2, *_n; | 685 | struct list_head *item2, *_n; |
613 | struct i2c_client *client; | 686 | struct i2c_client *client; |
614 | struct i2c_adapter *adap; | 687 | struct i2c_adapter *adap; |
615 | 688 | ||
616 | mutex_lock(&core_lists); | 689 | mutex_lock(&core_lock); |
617 | 690 | ||
618 | /* new-style driver? */ | 691 | /* new-style driver? */ |
619 | if (is_newstyle_driver(driver)) | 692 | if (is_newstyle_driver(driver)) |
@@ -623,8 +696,8 @@ void i2c_del_driver(struct i2c_driver *driver) | |||
623 | * attached. If so, detach them to be able to kill the driver | 696 | * attached. If so, detach them to be able to kill the driver |
624 | * afterwards. | 697 | * afterwards. |
625 | */ | 698 | */ |
626 | list_for_each(item1,&adapters) { | 699 | down(&i2c_adapter_class.sem); |
627 | adap = list_entry(item1, struct i2c_adapter, list); | 700 | list_for_each_entry(adap, &i2c_adapter_class.devices, dev.node) { |
628 | if (driver->detach_adapter) { | 701 | if (driver->detach_adapter) { |
629 | if (driver->detach_adapter(adap)) { | 702 | if (driver->detach_adapter(adap)) { |
630 | dev_err(&adap->dev, "detach_adapter failed " | 703 | dev_err(&adap->dev, "detach_adapter failed " |
@@ -648,40 +721,31 @@ void i2c_del_driver(struct i2c_driver *driver) | |||
648 | } | 721 | } |
649 | } | 722 | } |
650 | } | 723 | } |
724 | up(&i2c_adapter_class.sem); | ||
651 | 725 | ||
652 | unregister: | 726 | unregister: |
653 | driver_unregister(&driver->driver); | 727 | driver_unregister(&driver->driver); |
654 | list_del(&driver->list); | ||
655 | pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); | 728 | pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); |
656 | 729 | ||
657 | mutex_unlock(&core_lists); | 730 | mutex_unlock(&core_lock); |
658 | } | 731 | } |
659 | EXPORT_SYMBOL(i2c_del_driver); | 732 | EXPORT_SYMBOL(i2c_del_driver); |
660 | 733 | ||
661 | /* ------------------------------------------------------------------------- */ | 734 | /* ------------------------------------------------------------------------- */ |
662 | 735 | ||
663 | static int __i2c_check_addr(struct i2c_adapter *adapter, unsigned int addr) | 736 | static int __i2c_check_addr(struct device *dev, void *addrp) |
664 | { | 737 | { |
665 | struct list_head *item; | 738 | struct i2c_client *client = i2c_verify_client(dev); |
666 | struct i2c_client *client; | 739 | int addr = *(int *)addrp; |
667 | 740 | ||
668 | list_for_each(item,&adapter->clients) { | 741 | if (client && client->addr == addr) |
669 | client = list_entry(item, struct i2c_client, list); | 742 | return -EBUSY; |
670 | if (client->addr == addr) | ||
671 | return -EBUSY; | ||
672 | } | ||
673 | return 0; | 743 | return 0; |
674 | } | 744 | } |
675 | 745 | ||
676 | static int i2c_check_addr(struct i2c_adapter *adapter, int addr) | 746 | static int i2c_check_addr(struct i2c_adapter *adapter, int addr) |
677 | { | 747 | { |
678 | int rval; | 748 | return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr); |
679 | |||
680 | mutex_lock(&adapter->clist_lock); | ||
681 | rval = __i2c_check_addr(adapter, addr); | ||
682 | mutex_unlock(&adapter->clist_lock); | ||
683 | |||
684 | return rval; | ||
685 | } | 749 | } |
686 | 750 | ||
687 | int i2c_attach_client(struct i2c_client *client) | 751 | int i2c_attach_client(struct i2c_client *client) |
@@ -689,15 +753,6 @@ int i2c_attach_client(struct i2c_client *client) | |||
689 | struct i2c_adapter *adapter = client->adapter; | 753 | struct i2c_adapter *adapter = client->adapter; |
690 | int res = 0; | 754 | int res = 0; |
691 | 755 | ||
692 | mutex_lock(&adapter->clist_lock); | ||
693 | if (__i2c_check_addr(client->adapter, client->addr)) { | ||
694 | res = -EBUSY; | ||
695 | goto out_unlock; | ||
696 | } | ||
697 | list_add_tail(&client->list,&adapter->clients); | ||
698 | |||
699 | client->usage_count = 0; | ||
700 | |||
701 | client->dev.parent = &client->adapter->dev; | 756 | client->dev.parent = &client->adapter->dev; |
702 | client->dev.bus = &i2c_bus_type; | 757 | client->dev.bus = &i2c_bus_type; |
703 | 758 | ||
@@ -712,13 +767,17 @@ int i2c_attach_client(struct i2c_client *client) | |||
712 | 767 | ||
713 | snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id), | 768 | snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id), |
714 | "%d-%04x", i2c_adapter_id(adapter), client->addr); | 769 | "%d-%04x", i2c_adapter_id(adapter), client->addr); |
715 | dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n", | ||
716 | client->name, client->dev.bus_id); | ||
717 | res = device_register(&client->dev); | 770 | res = device_register(&client->dev); |
718 | if (res) | 771 | if (res) |
719 | goto out_list; | 772 | goto out_err; |
773 | |||
774 | mutex_lock(&adapter->clist_lock); | ||
775 | list_add_tail(&client->list, &adapter->clients); | ||
720 | mutex_unlock(&adapter->clist_lock); | 776 | mutex_unlock(&adapter->clist_lock); |
721 | 777 | ||
778 | dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n", | ||
779 | client->name, client->dev.bus_id); | ||
780 | |||
722 | if (adapter->client_register) { | 781 | if (adapter->client_register) { |
723 | if (adapter->client_register(client)) { | 782 | if (adapter->client_register(client)) { |
724 | dev_dbg(&adapter->dev, "client_register " | 783 | dev_dbg(&adapter->dev, "client_register " |
@@ -729,12 +788,9 @@ int i2c_attach_client(struct i2c_client *client) | |||
729 | 788 | ||
730 | return 0; | 789 | return 0; |
731 | 790 | ||
732 | out_list: | 791 | out_err: |
733 | list_del(&client->list); | ||
734 | dev_err(&adapter->dev, "Failed to attach i2c client %s at 0x%02x " | 792 | dev_err(&adapter->dev, "Failed to attach i2c client %s at 0x%02x " |
735 | "(%d)\n", client->name, client->addr, res); | 793 | "(%d)\n", client->name, client->addr, res); |
736 | out_unlock: | ||
737 | mutex_unlock(&adapter->clist_lock); | ||
738 | return res; | 794 | return res; |
739 | } | 795 | } |
740 | EXPORT_SYMBOL(i2c_attach_client); | 796 | EXPORT_SYMBOL(i2c_attach_client); |
@@ -744,12 +800,6 @@ int i2c_detach_client(struct i2c_client *client) | |||
744 | struct i2c_adapter *adapter = client->adapter; | 800 | struct i2c_adapter *adapter = client->adapter; |
745 | int res = 0; | 801 | int res = 0; |
746 | 802 | ||
747 | if (client->usage_count > 0) { | ||
748 | dev_warn(&client->dev, "Client [%s] still busy, " | ||
749 | "can't detach\n", client->name); | ||
750 | return -EBUSY; | ||
751 | } | ||
752 | |||
753 | if (adapter->client_unregister) { | 803 | if (adapter->client_unregister) { |
754 | res = adapter->client_unregister(client); | 804 | res = adapter->client_unregister(client); |
755 | if (res) { | 805 | if (res) { |
@@ -762,9 +812,10 @@ int i2c_detach_client(struct i2c_client *client) | |||
762 | 812 | ||
763 | mutex_lock(&adapter->clist_lock); | 813 | mutex_lock(&adapter->clist_lock); |
764 | list_del(&client->list); | 814 | list_del(&client->list); |
815 | mutex_unlock(&adapter->clist_lock); | ||
816 | |||
765 | init_completion(&client->released); | 817 | init_completion(&client->released); |
766 | device_unregister(&client->dev); | 818 | device_unregister(&client->dev); |
767 | mutex_unlock(&adapter->clist_lock); | ||
768 | wait_for_completion(&client->released); | 819 | wait_for_completion(&client->released); |
769 | 820 | ||
770 | out: | 821 | out: |
@@ -772,72 +823,58 @@ int i2c_detach_client(struct i2c_client *client) | |||
772 | } | 823 | } |
773 | EXPORT_SYMBOL(i2c_detach_client); | 824 | EXPORT_SYMBOL(i2c_detach_client); |
774 | 825 | ||
775 | static int i2c_inc_use_client(struct i2c_client *client) | 826 | /** |
827 | * i2c_use_client - increments the reference count of the i2c client structure | ||
828 | * @client: the client being referenced | ||
829 | * | ||
830 | * Each live reference to a client should be refcounted. The driver model does | ||
831 | * that automatically as part of driver binding, so that most drivers don't | ||
832 | * need to do this explicitly: they hold a reference until they're unbound | ||
833 | * from the device. | ||
834 | * | ||
835 | * A pointer to the client with the incremented reference counter is returned. | ||
836 | */ | ||
837 | struct i2c_client *i2c_use_client(struct i2c_client *client) | ||
776 | { | 838 | { |
777 | 839 | get_device(&client->dev); | |
778 | if (!try_module_get(client->driver->driver.owner)) | 840 | return client; |
779 | return -ENODEV; | ||
780 | if (!try_module_get(client->adapter->owner)) { | ||
781 | module_put(client->driver->driver.owner); | ||
782 | return -ENODEV; | ||
783 | } | ||
784 | |||
785 | return 0; | ||
786 | } | 841 | } |
842 | EXPORT_SYMBOL(i2c_use_client); | ||
787 | 843 | ||
788 | static void i2c_dec_use_client(struct i2c_client *client) | 844 | /** |
845 | * i2c_release_client - release a use of the i2c client structure | ||
846 | * @client: the client being no longer referenced | ||
847 | * | ||
848 | * Must be called when a user of a client is finished with it. | ||
849 | */ | ||
850 | void i2c_release_client(struct i2c_client *client) | ||
789 | { | 851 | { |
790 | module_put(client->driver->driver.owner); | 852 | put_device(&client->dev); |
791 | module_put(client->adapter->owner); | ||
792 | } | 853 | } |
854 | EXPORT_SYMBOL(i2c_release_client); | ||
793 | 855 | ||
794 | int i2c_use_client(struct i2c_client *client) | 856 | struct i2c_cmd_arg { |
795 | { | 857 | unsigned cmd; |
796 | int ret; | 858 | void *arg; |
797 | 859 | }; | |
798 | ret = i2c_inc_use_client(client); | ||
799 | if (ret) | ||
800 | return ret; | ||
801 | |||
802 | client->usage_count++; | ||
803 | |||
804 | return 0; | ||
805 | } | ||
806 | EXPORT_SYMBOL(i2c_use_client); | ||
807 | 860 | ||
808 | int i2c_release_client(struct i2c_client *client) | 861 | static int i2c_cmd(struct device *dev, void *_arg) |
809 | { | 862 | { |
810 | if (!client->usage_count) { | 863 | struct i2c_client *client = i2c_verify_client(dev); |
811 | pr_debug("i2c-core: %s used one too many times\n", | 864 | struct i2c_cmd_arg *arg = _arg; |
812 | __FUNCTION__); | ||
813 | return -EPERM; | ||
814 | } | ||
815 | |||
816 | client->usage_count--; | ||
817 | i2c_dec_use_client(client); | ||
818 | 865 | ||
866 | if (client && client->driver && client->driver->command) | ||
867 | client->driver->command(client, arg->cmd, arg->arg); | ||
819 | return 0; | 868 | return 0; |
820 | } | 869 | } |
821 | EXPORT_SYMBOL(i2c_release_client); | ||
822 | 870 | ||
823 | void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) | 871 | void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) |
824 | { | 872 | { |
825 | struct list_head *item; | 873 | struct i2c_cmd_arg cmd_arg; |
826 | struct i2c_client *client; | ||
827 | 874 | ||
828 | mutex_lock(&adap->clist_lock); | 875 | cmd_arg.cmd = cmd; |
829 | list_for_each(item,&adap->clients) { | 876 | cmd_arg.arg = arg; |
830 | client = list_entry(item, struct i2c_client, list); | 877 | device_for_each_child(&adap->dev, &cmd_arg, i2c_cmd); |
831 | if (!try_module_get(client->driver->driver.owner)) | ||
832 | continue; | ||
833 | if (NULL != client->driver->command) { | ||
834 | mutex_unlock(&adap->clist_lock); | ||
835 | client->driver->command(client,cmd,arg); | ||
836 | mutex_lock(&adap->clist_lock); | ||
837 | } | ||
838 | module_put(client->driver->driver.owner); | ||
839 | } | ||
840 | mutex_unlock(&adap->clist_lock); | ||
841 | } | 878 | } |
842 | EXPORT_SYMBOL(i2c_clients_command); | 879 | EXPORT_SYMBOL(i2c_clients_command); |
843 | 880 | ||
@@ -848,11 +885,24 @@ static int __init i2c_init(void) | |||
848 | retval = bus_register(&i2c_bus_type); | 885 | retval = bus_register(&i2c_bus_type); |
849 | if (retval) | 886 | if (retval) |
850 | return retval; | 887 | return retval; |
851 | return class_register(&i2c_adapter_class); | 888 | retval = class_register(&i2c_adapter_class); |
889 | if (retval) | ||
890 | goto bus_err; | ||
891 | retval = i2c_add_driver(&dummy_driver); | ||
892 | if (retval) | ||
893 | goto class_err; | ||
894 | return 0; | ||
895 | |||
896 | class_err: | ||
897 | class_unregister(&i2c_adapter_class); | ||
898 | bus_err: | ||
899 | bus_unregister(&i2c_bus_type); | ||
900 | return retval; | ||
852 | } | 901 | } |
853 | 902 | ||
854 | static void __exit i2c_exit(void) | 903 | static void __exit i2c_exit(void) |
855 | { | 904 | { |
905 | i2c_del_driver(&dummy_driver); | ||
856 | class_unregister(&i2c_adapter_class); | 906 | class_unregister(&i2c_adapter_class); |
857 | bus_unregister(&i2c_bus_type); | 907 | bus_unregister(&i2c_bus_type); |
858 | } | 908 | } |
@@ -879,7 +929,15 @@ int i2c_transfer(struct i2c_adapter * adap, struct i2c_msg *msgs, int num) | |||
879 | } | 929 | } |
880 | #endif | 930 | #endif |
881 | 931 | ||
882 | mutex_lock_nested(&adap->bus_lock, adap->level); | 932 | if (in_atomic() || irqs_disabled()) { |
933 | ret = mutex_trylock(&adap->bus_lock); | ||
934 | if (!ret) | ||
935 | /* I2C activity is ongoing. */ | ||
936 | return -EAGAIN; | ||
937 | } else { | ||
938 | mutex_lock_nested(&adap->bus_lock, adap->level); | ||
939 | } | ||
940 | |||
883 | ret = adap->algo->master_xfer(adap,msgs,num); | 941 | ret = adap->algo->master_xfer(adap,msgs,num); |
884 | mutex_unlock(&adap->bus_lock); | 942 | mutex_unlock(&adap->bus_lock); |
885 | 943 | ||
@@ -978,7 +1036,7 @@ static int i2c_probe_address(struct i2c_adapter *adapter, int addr, int kind, | |||
978 | } | 1036 | } |
979 | 1037 | ||
980 | int i2c_probe(struct i2c_adapter *adapter, | 1038 | int i2c_probe(struct i2c_adapter *adapter, |
981 | struct i2c_client_address_data *address_data, | 1039 | const struct i2c_client_address_data *address_data, |
982 | int (*found_proc) (struct i2c_adapter *, int, int)) | 1040 | int (*found_proc) (struct i2c_adapter *, int, int)) |
983 | { | 1041 | { |
984 | int i, err; | 1042 | int i, err; |
@@ -987,7 +1045,7 @@ int i2c_probe(struct i2c_adapter *adapter, | |||
987 | /* Force entries are done first, and are not affected by ignore | 1045 | /* Force entries are done first, and are not affected by ignore |
988 | entries */ | 1046 | entries */ |
989 | if (address_data->forces) { | 1047 | if (address_data->forces) { |
990 | unsigned short **forces = address_data->forces; | 1048 | const unsigned short * const *forces = address_data->forces; |
991 | int kind; | 1049 | int kind; |
992 | 1050 | ||
993 | for (kind = 0; forces[kind]; kind++) { | 1051 | for (kind = 0; forces[kind]; kind++) { |
@@ -1085,7 +1143,6 @@ i2c_new_probed_device(struct i2c_adapter *adap, | |||
1085 | return NULL; | 1143 | return NULL; |
1086 | } | 1144 | } |
1087 | 1145 | ||
1088 | mutex_lock(&adap->clist_lock); | ||
1089 | for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { | 1146 | for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { |
1090 | /* Check address validity */ | 1147 | /* Check address validity */ |
1091 | if (addr_list[i] < 0x03 || addr_list[i] > 0x77) { | 1148 | if (addr_list[i] < 0x03 || addr_list[i] > 0x77) { |
@@ -1095,7 +1152,7 @@ i2c_new_probed_device(struct i2c_adapter *adap, | |||
1095 | } | 1152 | } |
1096 | 1153 | ||
1097 | /* Check address availability */ | 1154 | /* Check address availability */ |
1098 | if (__i2c_check_addr(adap, addr_list[i])) { | 1155 | if (i2c_check_addr(adap, addr_list[i])) { |
1099 | dev_dbg(&adap->dev, "Address 0x%02x already in " | 1156 | dev_dbg(&adap->dev, "Address 0x%02x already in " |
1100 | "use, not probing\n", addr_list[i]); | 1157 | "use, not probing\n", addr_list[i]); |
1101 | continue; | 1158 | continue; |
@@ -1123,7 +1180,6 @@ i2c_new_probed_device(struct i2c_adapter *adap, | |||
1123 | break; | 1180 | break; |
1124 | } | 1181 | } |
1125 | } | 1182 | } |
1126 | mutex_unlock(&adap->clist_lock); | ||
1127 | 1183 | ||
1128 | if (addr_list[i] == I2C_CLIENT_END) { | 1184 | if (addr_list[i] == I2C_CLIENT_END) { |
1129 | dev_dbg(&adap->dev, "Probing failed, no device found\n"); | 1185 | dev_dbg(&adap->dev, "Probing failed, no device found\n"); |
@@ -1139,12 +1195,12 @@ struct i2c_adapter* i2c_get_adapter(int id) | |||
1139 | { | 1195 | { |
1140 | struct i2c_adapter *adapter; | 1196 | struct i2c_adapter *adapter; |
1141 | 1197 | ||
1142 | mutex_lock(&core_lists); | 1198 | mutex_lock(&core_lock); |
1143 | adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id); | 1199 | adapter = (struct i2c_adapter *)idr_find(&i2c_adapter_idr, id); |
1144 | if (adapter && !try_module_get(adapter->owner)) | 1200 | if (adapter && !try_module_get(adapter->owner)) |
1145 | adapter = NULL; | 1201 | adapter = NULL; |
1146 | 1202 | ||
1147 | mutex_unlock(&core_lists); | 1203 | mutex_unlock(&core_lock); |
1148 | return adapter; | 1204 | return adapter; |
1149 | } | 1205 | } |
1150 | EXPORT_SYMBOL(i2c_get_adapter); | 1206 | EXPORT_SYMBOL(i2c_get_adapter); |
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index df540d5dfaf4..393e679d9faa 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c | |||
@@ -182,27 +182,22 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c | |||
182 | return ret; | 182 | return ret; |
183 | } | 183 | } |
184 | 184 | ||
185 | static int i2cdev_check(struct device *dev, void *addrp) | ||
186 | { | ||
187 | struct i2c_client *client = i2c_verify_client(dev); | ||
188 | |||
189 | if (!client || client->addr != *(unsigned int *)addrp) | ||
190 | return 0; | ||
191 | |||
192 | return dev->driver ? -EBUSY : 0; | ||
193 | } | ||
194 | |||
185 | /* This address checking function differs from the one in i2c-core | 195 | /* This address checking function differs from the one in i2c-core |
186 | in that it considers an address with a registered device, but no | 196 | in that it considers an address with a registered device, but no |
187 | bound driver, as NOT busy. */ | 197 | driver bound to it, as NOT busy. */ |
188 | static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) | 198 | static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) |
189 | { | 199 | { |
190 | struct list_head *item; | 200 | return device_for_each_child(&adapter->dev, &addr, i2cdev_check); |
191 | struct i2c_client *client; | ||
192 | int res = 0; | ||
193 | |||
194 | mutex_lock(&adapter->clist_lock); | ||
195 | list_for_each(item, &adapter->clients) { | ||
196 | client = list_entry(item, struct i2c_client, list); | ||
197 | if (client->addr == addr) { | ||
198 | if (client->driver) | ||
199 | res = -EBUSY; | ||
200 | break; | ||
201 | } | ||
202 | } | ||
203 | mutex_unlock(&adapter->clist_lock); | ||
204 | |||
205 | return res; | ||
206 | } | 201 | } |
207 | 202 | ||
208 | static int i2cdev_ioctl(struct inode *inode, struct file *file, | 203 | static int i2cdev_ioctl(struct inode *inode, struct file *file, |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index ee01e273a537..64df55e20ab5 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -325,7 +325,7 @@ config BLK_DEV_PLATFORM | |||
325 | If unsure, say N. | 325 | If unsure, say N. |
326 | 326 | ||
327 | config BLK_DEV_CMD640 | 327 | config BLK_DEV_CMD640 |
328 | bool "CMD640 chipset bugfix/support" | 328 | tristate "CMD640 chipset bugfix/support" |
329 | depends on X86 | 329 | depends on X86 |
330 | ---help--- | 330 | ---help--- |
331 | The CMD-Technologies CMD640 IDE chip is used on many common 486 and | 331 | The CMD-Technologies CMD640 IDE chip is used on many common 486 and |
@@ -359,9 +359,8 @@ config BLK_DEV_CMD640_ENHANCED | |||
359 | Otherwise say N. | 359 | Otherwise say N. |
360 | 360 | ||
361 | config BLK_DEV_IDEPNP | 361 | config BLK_DEV_IDEPNP |
362 | bool "PNP EIDE support" | 362 | tristate "PNP EIDE support" |
363 | depends on PNP | 363 | depends on PNP |
364 | select IDE_GENERIC | ||
365 | help | 364 | help |
366 | If you have a PnP (Plug and Play) compatible EIDE card and | 365 | If you have a PnP (Plug and Play) compatible EIDE card and |
367 | would like the kernel to automatically detect and activate | 366 | would like the kernel to automatically detect and activate |
@@ -375,7 +374,19 @@ config BLK_DEV_IDEPCI | |||
375 | bool | 374 | bool |
376 | 375 | ||
377 | config IDEPCI_PCIBUS_ORDER | 376 | config IDEPCI_PCIBUS_ORDER |
378 | def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI | 377 | bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)" |
378 | depends on BLK_DEV_IDE=y && BLK_DEV_IDEPCI | ||
379 | default y | ||
380 | help | ||
381 | Probe IDE PCI devices in the order in which they appear on the | ||
382 | PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device) | ||
383 | instead of the order in which IDE PCI host drivers are loaded. | ||
384 | |||
385 | Please note that this method of assuring stable naming of | ||
386 | IDE devices is unreliable and use other means for achieving | ||
387 | it (i.e. udev). | ||
388 | |||
389 | If in doubt, say N. | ||
379 | 390 | ||
380 | # TODO: split it on per host driver config options (or module parameters) | 391 | # TODO: split it on per host driver config options (or module parameters) |
381 | config BLK_DEV_OFFBOARD | 392 | config BLK_DEV_OFFBOARD |
@@ -789,7 +800,7 @@ config BLK_DEV_CELLEB | |||
789 | endif | 800 | endif |
790 | 801 | ||
791 | config BLK_DEV_IDE_PMAC | 802 | config BLK_DEV_IDE_PMAC |
792 | bool "Builtin PowerMac IDE support" | 803 | tristate "Builtin PowerMac IDE support" |
793 | depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y | 804 | depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y |
794 | help | 805 | help |
795 | This driver provides support for the built-in IDE controller on | 806 | This driver provides support for the built-in IDE controller on |
@@ -843,8 +854,9 @@ config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ | |||
843 | depends on BLK_DEV_IDE_AU1XXX | 854 | depends on BLK_DEV_IDE_AU1XXX |
844 | 855 | ||
845 | config IDE_ARM | 856 | config IDE_ARM |
846 | def_bool ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) | 857 | tristate "ARM IDE support" |
847 | select IDE_GENERIC | 858 | depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK) |
859 | default y | ||
848 | 860 | ||
849 | config BLK_DEV_IDE_ICSIDE | 861 | config BLK_DEV_IDE_ICSIDE |
850 | tristate "ICS IDE interface support" | 862 | tristate "ICS IDE interface support" |
@@ -876,10 +888,9 @@ config BLK_DEV_IDE_BAST | |||
876 | Simtec BAST or the Thorcom VR1000 | 888 | Simtec BAST or the Thorcom VR1000 |
877 | 889 | ||
878 | config ETRAX_IDE | 890 | config ETRAX_IDE |
879 | bool "ETRAX IDE support" | 891 | tristate "ETRAX IDE support" |
880 | depends on CRIS && BROKEN | 892 | depends on CRIS && BROKEN |
881 | select BLK_DEV_IDEDMA | 893 | select BLK_DEV_IDEDMA |
882 | select IDE_GENERIC | ||
883 | help | 894 | help |
884 | Enables the ETRAX IDE driver. | 895 | Enables the ETRAX IDE driver. |
885 | 896 | ||
@@ -911,17 +922,15 @@ config ETRAX_IDE_G27_RESET | |||
911 | endchoice | 922 | endchoice |
912 | 923 | ||
913 | config IDE_H8300 | 924 | config IDE_H8300 |
914 | bool "H8300 IDE support" | 925 | tristate "H8300 IDE support" |
915 | depends on H8300 | 926 | depends on H8300 |
916 | select IDE_GENERIC | ||
917 | default y | 927 | default y |
918 | help | 928 | help |
919 | Enables the H8300 IDE driver. | 929 | Enables the H8300 IDE driver. |
920 | 930 | ||
921 | config BLK_DEV_GAYLE | 931 | config BLK_DEV_GAYLE |
922 | bool "Amiga Gayle IDE interface support" | 932 | tristate "Amiga Gayle IDE interface support" |
923 | depends on AMIGA | 933 | depends on AMIGA |
924 | select IDE_GENERIC | ||
925 | help | 934 | help |
926 | This is the IDE driver for the Amiga Gayle IDE interface. It supports | 935 | This is the IDE driver for the Amiga Gayle IDE interface. It supports |
927 | both the `A1200 style' and `A4000 style' of the Gayle IDE interface, | 936 | both the `A1200 style' and `A4000 style' of the Gayle IDE interface, |
@@ -951,9 +960,8 @@ config BLK_DEV_IDEDOUBLER | |||
951 | runtime using the "ide=doubler" kernel boot parameter. | 960 | runtime using the "ide=doubler" kernel boot parameter. |
952 | 961 | ||
953 | config BLK_DEV_BUDDHA | 962 | config BLK_DEV_BUDDHA |
954 | bool "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)" | 963 | tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)" |
955 | depends on ZORRO && EXPERIMENTAL | 964 | depends on ZORRO && EXPERIMENTAL |
956 | select IDE_GENERIC | ||
957 | help | 965 | help |
958 | This is the IDE driver for the IDE interfaces on the Buddha, | 966 | This is the IDE driver for the IDE interfaces on the Buddha, |
959 | Catweasel and X-Surf expansion boards. It supports up to two interfaces | 967 | Catweasel and X-Surf expansion boards. It supports up to two interfaces |
@@ -964,9 +972,8 @@ config BLK_DEV_BUDDHA | |||
964 | to one of its IDE interfaces. | 972 | to one of its IDE interfaces. |
965 | 973 | ||
966 | config BLK_DEV_FALCON_IDE | 974 | config BLK_DEV_FALCON_IDE |
967 | bool "Falcon IDE interface support" | 975 | tristate "Falcon IDE interface support" |
968 | depends on ATARI | 976 | depends on ATARI |
969 | select IDE_GENERIC | ||
970 | help | 977 | help |
971 | This is the IDE driver for the builtin IDE interface on the Atari | 978 | This is the IDE driver for the builtin IDE interface on the Atari |
972 | Falcon. Say Y if you have a Falcon and want to use IDE devices (hard | 979 | Falcon. Say Y if you have a Falcon and want to use IDE devices (hard |
@@ -974,9 +981,8 @@ config BLK_DEV_FALCON_IDE | |||
974 | interface. | 981 | interface. |
975 | 982 | ||
976 | config BLK_DEV_MAC_IDE | 983 | config BLK_DEV_MAC_IDE |
977 | bool "Macintosh Quadra/Powerbook IDE interface support" | 984 | tristate "Macintosh Quadra/Powerbook IDE interface support" |
978 | depends on MAC | 985 | depends on MAC |
979 | select IDE_GENERIC | ||
980 | help | 986 | help |
981 | This is the IDE driver for the builtin IDE interface on some m68k | 987 | This is the IDE driver for the builtin IDE interface on some m68k |
982 | Macintosh models. It supports both the `Quadra style' (used in | 988 | Macintosh models. It supports both the `Quadra style' (used in |
@@ -988,18 +994,16 @@ config BLK_DEV_MAC_IDE | |||
988 | builtin IDE interface. | 994 | builtin IDE interface. |
989 | 995 | ||
990 | config BLK_DEV_Q40IDE | 996 | config BLK_DEV_Q40IDE |
991 | bool "Q40/Q60 IDE interface support" | 997 | tristate "Q40/Q60 IDE interface support" |
992 | depends on Q40 | 998 | depends on Q40 |
993 | select IDE_GENERIC | ||
994 | help | 999 | help |
995 | Enable the on-board IDE controller in the Q40/Q60. This should | 1000 | Enable the on-board IDE controller in the Q40/Q60. This should |
996 | normally be on; disable it only if you are running a custom hard | 1001 | normally be on; disable it only if you are running a custom hard |
997 | drive subsystem through an expansion card. | 1002 | drive subsystem through an expansion card. |
998 | 1003 | ||
999 | config BLK_DEV_MPC8xx_IDE | 1004 | config BLK_DEV_MPC8xx_IDE |
1000 | bool "MPC8xx IDE support" | 1005 | tristate "MPC8xx IDE support" |
1001 | depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE | 1006 | depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE |
1002 | select IDE_GENERIC | ||
1003 | help | 1007 | help |
1004 | This option provides support for IDE on Motorola MPC8xx Systems. | 1008 | This option provides support for IDE on Motorola MPC8xx Systems. |
1005 | Please see 'Type of MPC8xx IDE interface' for details. | 1009 | Please see 'Type of MPC8xx IDE interface' for details. |
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index b181fc672057..0d2da89d15cf 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile | |||
@@ -7,41 +7,37 @@ | |||
7 | # Note : at this point, these files are compiled on all systems. | 7 | # Note : at this point, these files are compiled on all systems. |
8 | # In the future, some of these should be built conditionally. | 8 | # In the future, some of these should be built conditionally. |
9 | # | 9 | # |
10 | # First come modules that register themselves with the core | 10 | # link order is important here |
11 | 11 | ||
12 | EXTRA_CFLAGS += -Idrivers/ide | 12 | EXTRA_CFLAGS += -Idrivers/ide |
13 | 13 | ||
14 | obj-$(CONFIG_BLK_DEV_IDE) += pci/ | ||
15 | |||
16 | ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o | 14 | ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o |
17 | 15 | ||
18 | ide-core-$(CONFIG_BLK_DEV_CMD640) += pci/cmd640.o | 16 | # core IDE code |
19 | |||
20 | # Core IDE code - must come before legacy | ||
21 | ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o | 17 | ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o |
22 | ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o | 18 | ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o |
23 | ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o | 19 | ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o |
24 | ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o | ||
25 | ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o | 20 | ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o |
26 | 21 | ||
27 | # built-in only drivers from arm/ | 22 | obj-$(CONFIG_BLK_DEV_IDE) += ide-core.o |
28 | ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o | ||
29 | 23 | ||
30 | # built-in only drivers from legacy/ | 24 | ifeq ($(CONFIG_IDE_ARM), y) |
31 | ide-core-$(CONFIG_BLK_DEV_BUDDHA) += legacy/buddha.o | 25 | ide-arm-core-y += arm/ide_arm.o |
32 | ide-core-$(CONFIG_BLK_DEV_FALCON_IDE) += legacy/falconide.o | 26 | obj-y += ide-arm-core.o |
33 | ide-core-$(CONFIG_BLK_DEV_GAYLE) += legacy/gayle.o | 27 | endif |
34 | ide-core-$(CONFIG_BLK_DEV_MAC_IDE) += legacy/macide.o | ||
35 | ide-core-$(CONFIG_BLK_DEV_Q40IDE) += legacy/q40ide.o | ||
36 | 28 | ||
37 | # built-in only drivers from ppc/ | 29 | obj-$(CONFIG_BLK_DEV_IDE) += legacy/ pci/ |
38 | ide-core-$(CONFIG_BLK_DEV_MPC8xx_IDE) += ppc/mpc8xx.o | ||
39 | ide-core-$(CONFIG_BLK_DEV_IDE_PMAC) += ppc/pmac.o | ||
40 | 30 | ||
41 | # built-in only drivers from h8300/ | 31 | obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o |
42 | ide-core-$(CONFIG_IDE_H8300) += h8300/ide-h8300.o | ||
43 | 32 | ||
44 | obj-$(CONFIG_BLK_DEV_IDE) += ide-core.o | 33 | ifeq ($(CONFIG_BLK_DEV_CMD640), y) |
34 | cmd640-core-y += pci/cmd640.o | ||
35 | obj-y += cmd640-core.o | ||
36 | endif | ||
37 | |||
38 | obj-$(CONFIG_BLK_DEV_IDE) += cris/ ppc/ | ||
39 | obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o | ||
40 | obj-$(CONFIG_IDE_H8300) += h8300/ | ||
45 | obj-$(CONFIG_IDE_GENERIC) += ide-generic.o | 41 | obj-$(CONFIG_IDE_GENERIC) += ide-generic.o |
46 | 42 | ||
47 | obj-$(CONFIG_BLK_DEV_IDEDISK) += ide-disk.o | 43 | obj-$(CONFIG_BLK_DEV_IDEDISK) += ide-disk.o |
@@ -49,6 +45,20 @@ obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd.o | |||
49 | obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o | 45 | obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o |
50 | obj-$(CONFIG_BLK_DEV_IDEFLOPPY) += ide-floppy.o | 46 | obj-$(CONFIG_BLK_DEV_IDEFLOPPY) += ide-floppy.o |
51 | 47 | ||
52 | obj-$(CONFIG_BLK_DEV_IDE) += legacy/ arm/ mips/ | 48 | ifeq ($(CONFIG_BLK_DEV_IDECS), y) |
53 | obj-$(CONFIG_BLK_DEV_HD) += legacy/ | 49 | ide-cs-core-y += legacy/ide-cs.o |
54 | obj-$(CONFIG_ETRAX_IDE) += cris/ | 50 | obj-y += ide-cs-core.o |
51 | endif | ||
52 | |||
53 | ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) | ||
54 | ide-platform-core-y += legacy/ide_platform.o | ||
55 | obj-y += ide-platform-core.o | ||
56 | endif | ||
57 | |||
58 | obj-$(CONFIG_BLK_DEV_IDE) += arm/ mips/ | ||
59 | |||
60 | # old hd driver must be last | ||
61 | ifeq ($(CONFIG_BLK_DEV_HD), y) | ||
62 | hd-core-y += legacy/hd.o | ||
63 | obj-y += hd-core.o | ||
64 | endif | ||
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile index 6a78f0755f26..5f63ad216862 100644 --- a/drivers/ide/arm/Makefile +++ b/drivers/ide/arm/Makefile | |||
@@ -3,4 +3,8 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o | |||
3 | obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o | 3 | obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o |
4 | obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o | 4 | obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o |
5 | 5 | ||
6 | ifeq ($(CONFIG_IDE_ARM), m) | ||
7 | obj-m += ide_arm.o | ||
8 | endif | ||
9 | |||
6 | EXTRA_CFLAGS := -Idrivers/ide | 10 | EXTRA_CFLAGS := -Idrivers/ide |
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c index 48db6167bb90..45bf9c825f2b 100644 --- a/drivers/ide/arm/bast-ide.c +++ b/drivers/ide/arm/bast-ide.c | |||
@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq, | |||
45 | hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); | 45 | hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); |
46 | hw.irq = irq; | 46 | hw.irq = irq; |
47 | 47 | ||
48 | ide_register_hw(&hw, NULL, 0, hwif); | 48 | ide_register_hw(&hw, NULL, hwif); |
49 | 49 | ||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 673402f4a295..8a5c7205b77c 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c | |||
@@ -287,26 +287,10 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) | |||
287 | ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); | 287 | ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void icside_dma_host_off(ide_drive_t *drive) | 290 | static void icside_dma_host_set(ide_drive_t *drive, int on) |
291 | { | 291 | { |
292 | } | 292 | } |
293 | 293 | ||
294 | static void icside_dma_off_quietly(ide_drive_t *drive) | ||
295 | { | ||
296 | drive->using_dma = 0; | ||
297 | } | ||
298 | |||
299 | static void icside_dma_host_on(ide_drive_t *drive) | ||
300 | { | ||
301 | } | ||
302 | |||
303 | static int icside_dma_on(ide_drive_t *drive) | ||
304 | { | ||
305 | drive->using_dma = 1; | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int icside_dma_end(ide_drive_t *drive) | 294 | static int icside_dma_end(ide_drive_t *drive) |
311 | { | 295 | { |
312 | ide_hwif_t *hwif = HWIF(drive); | 296 | ide_hwif_t *hwif = HWIF(drive); |
@@ -422,10 +406,7 @@ static void icside_dma_init(ide_hwif_t *hwif) | |||
422 | hwif->dmatable_dma = 0; | 406 | hwif->dmatable_dma = 0; |
423 | hwif->set_dma_mode = icside_set_dma_mode; | 407 | hwif->set_dma_mode = icside_set_dma_mode; |
424 | 408 | ||
425 | hwif->dma_host_off = icside_dma_host_off; | 409 | hwif->dma_host_set = icside_dma_host_set; |
426 | hwif->dma_off_quietly = icside_dma_off_quietly; | ||
427 | hwif->dma_host_on = icside_dma_host_on; | ||
428 | hwif->ide_dma_on = icside_dma_on; | ||
429 | hwif->dma_setup = icside_dma_setup; | 410 | hwif->dma_setup = icside_dma_setup; |
430 | hwif->dma_exec_cmd = icside_dma_exec_cmd; | 411 | hwif->dma_exec_cmd = icside_dma_exec_cmd; |
431 | hwif->dma_start = icside_dma_start; | 412 | hwif->dma_start = icside_dma_start; |
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c index 8957cbadf5c2..60f2497542c0 100644 --- a/drivers/ide/arm/ide_arm.c +++ b/drivers/ide/arm/ide_arm.c | |||
@@ -24,12 +24,25 @@ | |||
24 | # define IDE_ARM_IRQ IRQ_HARDDISK | 24 | # define IDE_ARM_IRQ IRQ_HARDDISK |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | void __init ide_arm_init(void) | 27 | static int __init ide_arm_init(void) |
28 | { | 28 | { |
29 | ide_hwif_t *hwif; | ||
29 | hw_regs_t hw; | 30 | hw_regs_t hw; |
31 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
30 | 32 | ||
31 | memset(&hw, 0, sizeof(hw)); | 33 | memset(&hw, 0, sizeof(hw)); |
32 | ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); | 34 | ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); |
33 | hw.irq = IDE_ARM_IRQ; | 35 | hw.irq = IDE_ARM_IRQ; |
34 | ide_register_hw(&hw, NULL, 1, NULL); | 36 | |
37 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); | ||
38 | if (hwif) { | ||
39 | ide_init_port_hw(hwif, &hw); | ||
40 | idx[0] = hwif->index; | ||
41 | |||
42 | ide_device_add(idx); | ||
43 | } | ||
44 | |||
45 | return 0; | ||
35 | } | 46 | } |
47 | |||
48 | module_init(ide_arm_init); | ||
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index 0775a3afef48..e6b56d1d48f4 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c | |||
@@ -13,26 +13,18 @@ | |||
13 | 13 | ||
14 | #include <asm/ecard.h> | 14 | #include <asm/ecard.h> |
15 | 15 | ||
16 | static ide_hwif_t * | 16 | static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, |
17 | rapide_locate_hwif(void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) | 17 | void __iomem *ctrl, unsigned int sz, int irq) |
18 | { | 18 | { |
19 | unsigned long port = (unsigned long)base; | 19 | unsigned long port = (unsigned long)base; |
20 | ide_hwif_t *hwif = ide_find_port(port); | ||
21 | int i; | 20 | int i; |
22 | 21 | ||
23 | if (hwif == NULL) | ||
24 | goto out; | ||
25 | |||
26 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 22 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { |
27 | hwif->io_ports[i] = port; | 23 | hw->io_ports[i] = port; |
28 | port += sz; | 24 | port += sz; |
29 | } | 25 | } |
30 | hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; | 26 | hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; |
31 | hwif->irq = irq; | 27 | hw->irq = irq; |
32 | hwif->mmio = 1; | ||
33 | default_hwif_mmiops(hwif); | ||
34 | out: | ||
35 | return hwif; | ||
36 | } | 28 | } |
37 | 29 | ||
38 | static int __devinit | 30 | static int __devinit |
@@ -42,6 +34,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
42 | void __iomem *base; | 34 | void __iomem *base; |
43 | int ret; | 35 | int ret; |
44 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 36 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
37 | hw_regs_t hw; | ||
45 | 38 | ||
46 | ret = ecard_request_resources(ec); | 39 | ret = ecard_request_resources(ec); |
47 | if (ret) | 40 | if (ret) |
@@ -53,11 +46,17 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) | |||
53 | goto release; | 46 | goto release; |
54 | } | 47 | } |
55 | 48 | ||
56 | hwif = rapide_locate_hwif(base, base + 0x818, 1 << 6, ec->irq); | 49 | hwif = ide_find_port((unsigned long)base); |
57 | if (hwif) { | 50 | if (hwif) { |
58 | hwif->hwif_data = base; | 51 | memset(&hw, 0, sizeof(hw)); |
59 | hwif->gendev.parent = &ec->dev; | 52 | rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); |
60 | hwif->noprobe = 0; | 53 | hw.chipset = ide_generic; |
54 | hw.dev = &ec->dev; | ||
55 | |||
56 | ide_init_port_hw(hwif, &hw); | ||
57 | |||
58 | hwif->mmio = 1; | ||
59 | default_hwif_mmiops(hwif); | ||
61 | 60 | ||
62 | idx[0] = hwif->index; | 61 | idx[0] = hwif->index; |
63 | 62 | ||
diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile index 6176e8d6b2e6..20b95960531f 100644 --- a/drivers/ide/cris/Makefile +++ b/drivers/ide/cris/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | EXTRA_CFLAGS += -Idrivers/ide | 1 | EXTRA_CFLAGS += -Idrivers/ide |
2 | 2 | ||
3 | obj-y += ide-cris.o | 3 | obj-$(CONFIG_IDE_ETRAX) += ide-cris.o |
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index 325e608d9e62..8c3294c4d23e 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c | |||
@@ -673,9 +673,8 @@ static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int); | |||
673 | static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int); | 673 | static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int); |
674 | static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); | 674 | static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); |
675 | static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int); | 675 | static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int); |
676 | static int cris_dma_on (ide_drive_t *drive); | ||
677 | 676 | ||
678 | static void cris_dma_off(ide_drive_t *drive) | 677 | static void cris_dma_host_set(ide_drive_t *drive, int on) |
679 | { | 678 | { |
680 | } | 679 | } |
681 | 680 | ||
@@ -755,13 +754,11 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
755 | cris_ide_set_speed(TYPE_DMA, 0, strobe, hold); | 754 | cris_ide_set_speed(TYPE_DMA, 0, strobe, hold); |
756 | } | 755 | } |
757 | 756 | ||
758 | void __init | 757 | static int __init init_e100_ide(void) |
759 | init_e100_ide (void) | ||
760 | { | 758 | { |
761 | hw_regs_t hw; | 759 | hw_regs_t hw; |
762 | int ide_offsets[IDE_NR_PORTS]; | 760 | int ide_offsets[IDE_NR_PORTS], h, i; |
763 | int h; | 761 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
764 | int i; | ||
765 | 762 | ||
766 | printk("ide: ETRAX FS built-in ATA DMA controller\n"); | 763 | printk("ide: ETRAX FS built-in ATA DMA controller\n"); |
767 | 764 | ||
@@ -778,9 +775,11 @@ init_e100_ide (void) | |||
778 | ide_offsets, | 775 | ide_offsets, |
779 | 0, 0, cris_ide_ack_intr, | 776 | 0, 0, cris_ide_ack_intr, |
780 | ide_default_irq(0)); | 777 | ide_default_irq(0)); |
781 | ide_register_hw(&hw, NULL, 1, &hwif); | 778 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
782 | if (hwif == NULL) | 779 | if (hwif == NULL) |
783 | continue; | 780 | continue; |
781 | ide_init_port_data(hwif, hwif->index); | ||
782 | ide_init_port_hw(hwif, &hw); | ||
784 | hwif->mmio = 1; | 783 | hwif->mmio = 1; |
785 | hwif->chipset = ide_etrax100; | 784 | hwif->chipset = ide_etrax100; |
786 | hwif->set_pio_mode = &cris_set_pio_mode; | 785 | hwif->set_pio_mode = &cris_set_pio_mode; |
@@ -789,6 +788,7 @@ init_e100_ide (void) | |||
789 | hwif->ata_output_data = &cris_ide_output_data; | 788 | hwif->ata_output_data = &cris_ide_output_data; |
790 | hwif->atapi_input_bytes = &cris_atapi_input_bytes; | 789 | hwif->atapi_input_bytes = &cris_atapi_input_bytes; |
791 | hwif->atapi_output_bytes = &cris_atapi_output_bytes; | 790 | hwif->atapi_output_bytes = &cris_atapi_output_bytes; |
791 | hwif->dma_host_set = &cris_dma_host_set; | ||
792 | hwif->ide_dma_end = &cris_dma_end; | 792 | hwif->ide_dma_end = &cris_dma_end; |
793 | hwif->dma_setup = &cris_dma_setup; | 793 | hwif->dma_setup = &cris_dma_setup; |
794 | hwif->dma_exec_cmd = &cris_dma_exec_cmd; | 794 | hwif->dma_exec_cmd = &cris_dma_exec_cmd; |
@@ -799,9 +799,6 @@ init_e100_ide (void) | |||
799 | hwif->OUTBSYNC = &cris_ide_outbsync; | 799 | hwif->OUTBSYNC = &cris_ide_outbsync; |
800 | hwif->INB = &cris_ide_inb; | 800 | hwif->INB = &cris_ide_inb; |
801 | hwif->INW = &cris_ide_inw; | 801 | hwif->INW = &cris_ide_inw; |
802 | hwif->dma_host_off = &cris_dma_off; | ||
803 | hwif->dma_host_on = &cris_dma_on; | ||
804 | hwif->dma_off_quietly = &cris_dma_off; | ||
805 | hwif->cbl = ATA_CBL_PATA40; | 802 | hwif->cbl = ATA_CBL_PATA40; |
806 | hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA; | 803 | hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA; |
807 | hwif->pio_mask = ATA_PIO4, | 804 | hwif->pio_mask = ATA_PIO4, |
@@ -809,6 +806,8 @@ init_e100_ide (void) | |||
809 | hwif->drives[1].autotune = 1; | 806 | hwif->drives[1].autotune = 1; |
810 | hwif->ultra_mask = cris_ultra_mask; | 807 | hwif->ultra_mask = cris_ultra_mask; |
811 | hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */ | 808 | hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */ |
809 | |||
810 | idx[h] = hwif->index; | ||
812 | } | 811 | } |
813 | 812 | ||
814 | /* Reset pulse */ | 813 | /* Reset pulse */ |
@@ -821,14 +820,12 @@ init_e100_ide (void) | |||
821 | cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD); | 820 | cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD); |
822 | cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD); | 821 | cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD); |
823 | cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0); | 822 | cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0); |
824 | } | ||
825 | 823 | ||
826 | static int cris_dma_on (ide_drive_t *drive) | 824 | ide_device_add(idx); |
827 | { | 825 | |
828 | return 0; | 826 | return 0; |
829 | } | 827 | } |
830 | 828 | ||
831 | |||
832 | static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16))); | 829 | static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16))); |
833 | 830 | ||
834 | /* | 831 | /* |
@@ -1060,3 +1057,5 @@ static void cris_dma_start(ide_drive_t *drive) | |||
1060 | LED_DISK_READ(1); | 1057 | LED_DISK_READ(1); |
1061 | } | 1058 | } |
1062 | } | 1059 | } |
1060 | |||
1061 | module_init(init_e100_ide); | ||
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile new file mode 100644 index 000000000000..5eba16f423f4 --- /dev/null +++ b/drivers/ide/h8300/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | |||
2 | obj-$(CONFIG_IDE_H8300) += ide-h8300.o | ||
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index 4a49b5c59acb..4f6d0191cf6c 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c | |||
@@ -84,11 +84,12 @@ static inline void hwif_setup(ide_hwif_t *hwif) | |||
84 | hwif->INSL = NULL; | 84 | hwif->INSL = NULL; |
85 | } | 85 | } |
86 | 86 | ||
87 | void __init h8300_ide_init(void) | 87 | static int __init h8300_ide_init(void) |
88 | { | 88 | { |
89 | hw_regs_t hw; | 89 | hw_regs_t hw; |
90 | ide_hwif_t *hwif; | 90 | ide_hwif_t *hwif; |
91 | int idx; | 91 | int index; |
92 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
92 | 93 | ||
93 | if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) | 94 | if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) |
94 | goto out_busy; | 95 | goto out_busy; |
@@ -100,16 +101,28 @@ void __init h8300_ide_init(void) | |||
100 | hw_setup(&hw); | 101 | hw_setup(&hw); |
101 | 102 | ||
102 | /* register if */ | 103 | /* register if */ |
103 | idx = ide_register_hw(&hw, NULL, 1, &hwif); | 104 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
104 | if (idx == -1) { | 105 | if (hwif == NULL) { |
105 | printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); | 106 | printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); |
106 | return; | 107 | return -ENOENT; |
107 | } | 108 | } |
108 | 109 | ||
110 | index = hwif->index; | ||
111 | ide_init_port_data(hwif, index); | ||
112 | ide_init_port_hw(hwif, &hw); | ||
109 | hwif_setup(hwif); | 113 | hwif_setup(hwif); |
110 | printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx); | 114 | printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index); |
111 | return; | 115 | |
116 | idx[0] = index; | ||
117 | |||
118 | ide_device_add(idx); | ||
119 | |||
120 | return 0; | ||
112 | 121 | ||
113 | out_busy: | 122 | out_busy: |
114 | printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); | 123 | printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); |
124 | |||
125 | return -EBUSY; | ||
115 | } | 126 | } |
127 | |||
128 | module_init(h8300_ide_init); | ||
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index e0bb0cfa7bdd..e888fc35b27c 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
@@ -386,7 +386,7 @@ static int taskfile_load_raw(ide_drive_t *drive, | |||
386 | 386 | ||
387 | /* convert gtf to IDE Taskfile */ | 387 | /* convert gtf to IDE Taskfile */ |
388 | memcpy(&args.tf_array[7], >f->tfa, 7); | 388 | memcpy(&args.tf_array[7], >f->tfa, 7); |
389 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 389 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
390 | 390 | ||
391 | if (ide_noacpitfs) { | 391 | if (ide_noacpitfs) { |
392 | DEBPRINT("_GTF execution disabled\n"); | 392 | DEBPRINT("_GTF execution disabled\n"); |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index d8fdd865dea9..717e114ced52 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -201,7 +201,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, | |||
201 | 201 | ||
202 | memset(&task, 0, sizeof(task)); | 202 | memset(&task, 0, sizeof(task)); |
203 | task.tf_flags = IDE_TFLAG_NO_SELECT_MASK; /* FIXME? */ | 203 | task.tf_flags = IDE_TFLAG_NO_SELECT_MASK; /* FIXME? */ |
204 | task.tf_flags |= (IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE); | 204 | task.tf_flags |= (IDE_TFLAG_TF | IDE_TFLAG_DEVICE); |
205 | 205 | ||
206 | if (drive->select.b.lba) { | 206 | if (drive->select.b.lba) { |
207 | if (lba48) { | 207 | if (lba48) { |
@@ -219,13 +219,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, | |||
219 | tf->lbal = (u8) block; | 219 | tf->lbal = (u8) block; |
220 | tf->lbam = (u8)(block >> 8); | 220 | tf->lbam = (u8)(block >> 8); |
221 | tf->lbah = (u8)(block >> 16); | 221 | tf->lbah = (u8)(block >> 16); |
222 | #ifdef DEBUG | 222 | |
223 | printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n", | 223 | task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); |
224 | drive->name, tf->hob_nsect, tf->nsect, | ||
225 | tf->hob_lbah, tf->hob_lbam, tf->hob_lbal, | ||
226 | tf->lbah, tf->lbam, tf->lbal); | ||
227 | #endif | ||
228 | task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); | ||
229 | } else { | 224 | } else { |
230 | tf->nsect = nsectors & 0xff; | 225 | tf->nsect = nsectors & 0xff; |
231 | tf->lbal = block; | 226 | tf->lbal = block; |
@@ -319,9 +314,9 @@ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) | |||
319 | else | 314 | else |
320 | tf->command = WIN_READ_NATIVE_MAX; | 315 | tf->command = WIN_READ_NATIVE_MAX; |
321 | tf->device = ATA_LBA; | 316 | tf->device = ATA_LBA; |
322 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 317 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
323 | if (lba48) | 318 | if (lba48) |
324 | args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); | 319 | args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); |
325 | /* submit command request */ | 320 | /* submit command request */ |
326 | ide_no_data_taskfile(drive, &args); | 321 | ide_no_data_taskfile(drive, &args); |
327 | 322 | ||
@@ -358,9 +353,9 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) | |||
358 | tf->command = WIN_SET_MAX; | 353 | tf->command = WIN_SET_MAX; |
359 | } | 354 | } |
360 | tf->device |= ATA_LBA; | 355 | tf->device |= ATA_LBA; |
361 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 356 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
362 | if (lba48) | 357 | if (lba48) |
363 | args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); | 358 | args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); |
364 | /* submit command request */ | 359 | /* submit command request */ |
365 | ide_no_data_taskfile(drive, &args); | 360 | ide_no_data_taskfile(drive, &args); |
366 | /* if OK, compute maximum address value */ | 361 | /* if OK, compute maximum address value */ |
@@ -500,7 +495,7 @@ static int smart_enable(ide_drive_t *drive) | |||
500 | tf->lbam = SMART_LCYL_PASS; | 495 | tf->lbam = SMART_LCYL_PASS; |
501 | tf->lbah = SMART_HCYL_PASS; | 496 | tf->lbah = SMART_HCYL_PASS; |
502 | tf->command = WIN_SMART; | 497 | tf->command = WIN_SMART; |
503 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 498 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
504 | return ide_no_data_taskfile(drive, &args); | 499 | return ide_no_data_taskfile(drive, &args); |
505 | } | 500 | } |
506 | 501 | ||
@@ -515,7 +510,7 @@ static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd) | |||
515 | tf->lbam = SMART_LCYL_PASS; | 510 | tf->lbam = SMART_LCYL_PASS; |
516 | tf->lbah = SMART_HCYL_PASS; | 511 | tf->lbah = SMART_HCYL_PASS; |
517 | tf->command = WIN_SMART; | 512 | tf->command = WIN_SMART; |
518 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 513 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
519 | args.data_phase = TASKFILE_IN; | 514 | args.data_phase = TASKFILE_IN; |
520 | (void) smart_enable(drive); | 515 | (void) smart_enable(drive); |
521 | return ide_raw_taskfile(drive, &args, buf, 1); | 516 | return ide_raw_taskfile(drive, &args, buf, 1); |
@@ -625,8 +620,10 @@ static int set_multcount(ide_drive_t *drive, int arg) | |||
625 | 620 | ||
626 | if (drive->special.b.set_multmode) | 621 | if (drive->special.b.set_multmode) |
627 | return -EBUSY; | 622 | return -EBUSY; |
623 | |||
628 | ide_init_drive_cmd (&rq); | 624 | ide_init_drive_cmd (&rq); |
629 | rq.cmd_type = REQ_TYPE_ATA_CMD; | 625 | rq.cmd_type = REQ_TYPE_ATA_TASKFILE; |
626 | |||
630 | drive->mult_req = arg; | 627 | drive->mult_req = arg; |
631 | drive->special.b.set_multmode = 1; | 628 | drive->special.b.set_multmode = 1; |
632 | (void) ide_do_drive_cmd (drive, &rq, ide_wait); | 629 | (void) ide_do_drive_cmd (drive, &rq, ide_wait); |
@@ -694,7 +691,7 @@ static int write_cache(ide_drive_t *drive, int arg) | |||
694 | args.tf.feature = arg ? | 691 | args.tf.feature = arg ? |
695 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; | 692 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; |
696 | args.tf.command = WIN_SETFEATURES; | 693 | args.tf.command = WIN_SETFEATURES; |
697 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 694 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
698 | err = ide_no_data_taskfile(drive, &args); | 695 | err = ide_no_data_taskfile(drive, &args); |
699 | if (err == 0) | 696 | if (err == 0) |
700 | drive->wcache = arg; | 697 | drive->wcache = arg; |
@@ -714,7 +711,7 @@ static int do_idedisk_flushcache (ide_drive_t *drive) | |||
714 | args.tf.command = WIN_FLUSH_CACHE_EXT; | 711 | args.tf.command = WIN_FLUSH_CACHE_EXT; |
715 | else | 712 | else |
716 | args.tf.command = WIN_FLUSH_CACHE; | 713 | args.tf.command = WIN_FLUSH_CACHE; |
717 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 714 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
718 | return ide_no_data_taskfile(drive, &args); | 715 | return ide_no_data_taskfile(drive, &args); |
719 | } | 716 | } |
720 | 717 | ||
@@ -729,7 +726,7 @@ static int set_acoustic (ide_drive_t *drive, int arg) | |||
729 | args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM; | 726 | args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM; |
730 | args.tf.nsect = arg; | 727 | args.tf.nsect = arg; |
731 | args.tf.command = WIN_SETFEATURES; | 728 | args.tf.command = WIN_SETFEATURES; |
732 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 729 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
733 | ide_no_data_taskfile(drive, &args); | 730 | ide_no_data_taskfile(drive, &args); |
734 | drive->acoustic = arg; | 731 | drive->acoustic = arg; |
735 | return 0; | 732 | return 0; |
@@ -766,7 +763,6 @@ static void idedisk_add_settings(ide_drive_t *drive) | |||
766 | ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); | 763 | ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); |
767 | ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); | 764 | ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); |
768 | ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); | 765 | ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); |
769 | ide_add_setting(drive, "bswap", SETTING_READ, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL); | ||
770 | ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount); | 766 | ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount); |
771 | ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); | 767 | ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); |
772 | ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); | 768 | ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); |
@@ -975,6 +971,17 @@ static ide_driver_t idedisk_driver = { | |||
975 | #endif | 971 | #endif |
976 | }; | 972 | }; |
977 | 973 | ||
974 | static int idedisk_set_doorlock(ide_drive_t *drive, int on) | ||
975 | { | ||
976 | ide_task_t task; | ||
977 | |||
978 | memset(&task, 0, sizeof(task)); | ||
979 | task.tf.command = on ? WIN_DOORLOCK : WIN_DOORUNLOCK; | ||
980 | task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | ||
981 | |||
982 | return ide_no_data_taskfile(drive, &task); | ||
983 | } | ||
984 | |||
978 | static int idedisk_open(struct inode *inode, struct file *filp) | 985 | static int idedisk_open(struct inode *inode, struct file *filp) |
979 | { | 986 | { |
980 | struct gendisk *disk = inode->i_bdev->bd_disk; | 987 | struct gendisk *disk = inode->i_bdev->bd_disk; |
@@ -989,17 +996,13 @@ static int idedisk_open(struct inode *inode, struct file *filp) | |||
989 | idkp->openers++; | 996 | idkp->openers++; |
990 | 997 | ||
991 | if (drive->removable && idkp->openers == 1) { | 998 | if (drive->removable && idkp->openers == 1) { |
992 | ide_task_t args; | ||
993 | memset(&args, 0, sizeof(ide_task_t)); | ||
994 | args.tf.command = WIN_DOORLOCK; | ||
995 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | ||
996 | check_disk_change(inode->i_bdev); | 999 | check_disk_change(inode->i_bdev); |
997 | /* | 1000 | /* |
998 | * Ignore the return code from door_lock, | 1001 | * Ignore the return code from door_lock, |
999 | * since the open() has already succeeded, | 1002 | * since the open() has already succeeded, |
1000 | * and the door_lock is irrelevant at this point. | 1003 | * and the door_lock is irrelevant at this point. |
1001 | */ | 1004 | */ |
1002 | if (drive->doorlocking && ide_no_data_taskfile(drive, &args)) | 1005 | if (drive->doorlocking && idedisk_set_doorlock(drive, 1)) |
1003 | drive->doorlocking = 0; | 1006 | drive->doorlocking = 0; |
1004 | } | 1007 | } |
1005 | return 0; | 1008 | return 0; |
@@ -1015,11 +1018,7 @@ static int idedisk_release(struct inode *inode, struct file *filp) | |||
1015 | ide_cacheflush_p(drive); | 1018 | ide_cacheflush_p(drive); |
1016 | 1019 | ||
1017 | if (drive->removable && idkp->openers == 1) { | 1020 | if (drive->removable && idkp->openers == 1) { |
1018 | ide_task_t args; | 1021 | if (drive->doorlocking && idedisk_set_doorlock(drive, 0)) |
1019 | memset(&args, 0, sizeof(ide_task_t)); | ||
1020 | args.tf.command = WIN_DOORUNLOCK; | ||
1021 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | ||
1022 | if (drive->doorlocking && ide_no_data_taskfile(drive, &args)) | ||
1023 | drive->doorlocking = 0; | 1022 | drive->doorlocking = 0; |
1024 | } | 1023 | } |
1025 | 1024 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 18c78ad2b31e..5bf32038dc43 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -153,13 +153,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive) | |||
153 | if (!dma_stat) { | 153 | if (!dma_stat) { |
154 | struct request *rq = HWGROUP(drive)->rq; | 154 | struct request *rq = HWGROUP(drive)->rq; |
155 | 155 | ||
156 | if (rq->rq_disk) { | 156 | task_end_request(drive, rq, stat); |
157 | ide_driver_t *drv; | ||
158 | |||
159 | drv = *(ide_driver_t **)rq->rq_disk->private_data; | ||
160 | drv->end_request(drive, 1, rq->nr_sectors); | ||
161 | } else | ||
162 | ide_end_request(drive, 1, rq->nr_sectors); | ||
163 | return ide_stopped; | 157 | return ide_stopped; |
164 | } | 158 | } |
165 | printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", | 159 | printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", |
@@ -408,23 +402,29 @@ static int dma_timer_expiry (ide_drive_t *drive) | |||
408 | } | 402 | } |
409 | 403 | ||
410 | /** | 404 | /** |
411 | * ide_dma_host_off - Generic DMA kill | 405 | * ide_dma_host_set - Enable/disable DMA on a host |
412 | * @drive: drive to control | 406 | * @drive: drive to control |
413 | * | 407 | * |
414 | * Perform the generic IDE controller DMA off operation. This | 408 | * Enable/disable DMA on an IDE controller following generic |
415 | * works for most IDE bus mastering controllers | 409 | * bus-mastering IDE controller behaviour. |
416 | */ | 410 | */ |
417 | 411 | ||
418 | void ide_dma_host_off(ide_drive_t *drive) | 412 | void ide_dma_host_set(ide_drive_t *drive, int on) |
419 | { | 413 | { |
420 | ide_hwif_t *hwif = HWIF(drive); | 414 | ide_hwif_t *hwif = HWIF(drive); |
421 | u8 unit = (drive->select.b.unit & 0x01); | 415 | u8 unit = (drive->select.b.unit & 0x01); |
422 | u8 dma_stat = hwif->INB(hwif->dma_status); | 416 | u8 dma_stat = hwif->INB(hwif->dma_status); |
423 | 417 | ||
424 | hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status); | 418 | if (on) |
419 | dma_stat |= (1 << (5 + unit)); | ||
420 | else | ||
421 | dma_stat &= ~(1 << (5 + unit)); | ||
422 | |||
423 | hwif->OUTB(dma_stat, hwif->dma_status); | ||
425 | } | 424 | } |
426 | 425 | ||
427 | EXPORT_SYMBOL(ide_dma_host_off); | 426 | EXPORT_SYMBOL_GPL(ide_dma_host_set); |
427 | #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ | ||
428 | 428 | ||
429 | /** | 429 | /** |
430 | * ide_dma_off_quietly - Generic DMA kill | 430 | * ide_dma_off_quietly - Generic DMA kill |
@@ -438,11 +438,10 @@ void ide_dma_off_quietly(ide_drive_t *drive) | |||
438 | drive->using_dma = 0; | 438 | drive->using_dma = 0; |
439 | ide_toggle_bounce(drive, 0); | 439 | ide_toggle_bounce(drive, 0); |
440 | 440 | ||
441 | drive->hwif->dma_host_off(drive); | 441 | drive->hwif->dma_host_set(drive, 0); |
442 | } | 442 | } |
443 | 443 | ||
444 | EXPORT_SYMBOL(ide_dma_off_quietly); | 444 | EXPORT_SYMBOL(ide_dma_off_quietly); |
445 | #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ | ||
446 | 445 | ||
447 | /** | 446 | /** |
448 | * ide_dma_off - disable DMA on a device | 447 | * ide_dma_off - disable DMA on a device |
@@ -455,52 +454,29 @@ EXPORT_SYMBOL(ide_dma_off_quietly); | |||
455 | void ide_dma_off(ide_drive_t *drive) | 454 | void ide_dma_off(ide_drive_t *drive) |
456 | { | 455 | { |
457 | printk(KERN_INFO "%s: DMA disabled\n", drive->name); | 456 | printk(KERN_INFO "%s: DMA disabled\n", drive->name); |
458 | drive->hwif->dma_off_quietly(drive); | 457 | ide_dma_off_quietly(drive); |
459 | } | 458 | } |
460 | 459 | ||
461 | EXPORT_SYMBOL(ide_dma_off); | 460 | EXPORT_SYMBOL(ide_dma_off); |
462 | 461 | ||
463 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | ||
464 | /** | ||
465 | * ide_dma_host_on - Enable DMA on a host | ||
466 | * @drive: drive to enable for DMA | ||
467 | * | ||
468 | * Enable DMA on an IDE controller following generic bus mastering | ||
469 | * IDE controller behaviour | ||
470 | */ | ||
471 | |||
472 | void ide_dma_host_on(ide_drive_t *drive) | ||
473 | { | ||
474 | if (drive->using_dma) { | ||
475 | ide_hwif_t *hwif = HWIF(drive); | ||
476 | u8 unit = (drive->select.b.unit & 0x01); | ||
477 | u8 dma_stat = hwif->INB(hwif->dma_status); | ||
478 | |||
479 | hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status); | ||
480 | } | ||
481 | } | ||
482 | |||
483 | EXPORT_SYMBOL(ide_dma_host_on); | ||
484 | |||
485 | /** | 462 | /** |
486 | * __ide_dma_on - Enable DMA on a device | 463 | * ide_dma_on - Enable DMA on a device |
487 | * @drive: drive to enable DMA on | 464 | * @drive: drive to enable DMA on |
488 | * | 465 | * |
489 | * Enable IDE DMA for a device on this IDE controller. | 466 | * Enable IDE DMA for a device on this IDE controller. |
490 | */ | 467 | */ |
491 | 468 | ||
492 | int __ide_dma_on (ide_drive_t *drive) | 469 | void ide_dma_on(ide_drive_t *drive) |
493 | { | 470 | { |
494 | drive->using_dma = 1; | 471 | drive->using_dma = 1; |
495 | ide_toggle_bounce(drive, 1); | 472 | ide_toggle_bounce(drive, 1); |
496 | 473 | ||
497 | drive->hwif->dma_host_on(drive); | 474 | drive->hwif->dma_host_set(drive, 1); |
498 | |||
499 | return 0; | ||
500 | } | 475 | } |
501 | 476 | ||
502 | EXPORT_SYMBOL(__ide_dma_on); | 477 | EXPORT_SYMBOL(ide_dma_on); |
503 | 478 | ||
479 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | ||
504 | /** | 480 | /** |
505 | * ide_dma_setup - begin a DMA phase | 481 | * ide_dma_setup - begin a DMA phase |
506 | * @drive: target device | 482 | * @drive: target device |
@@ -755,6 +731,7 @@ EXPORT_SYMBOL_GPL(ide_find_dma_mode); | |||
755 | 731 | ||
756 | static int ide_tune_dma(ide_drive_t *drive) | 732 | static int ide_tune_dma(ide_drive_t *drive) |
757 | { | 733 | { |
734 | ide_hwif_t *hwif = drive->hwif; | ||
758 | u8 speed; | 735 | u8 speed; |
759 | 736 | ||
760 | if (noautodma || drive->nodma || (drive->id->capability & 1) == 0) | 737 | if (noautodma || drive->nodma || (drive->id->capability & 1) == 0) |
@@ -767,15 +744,21 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
767 | if (ide_id_dma_bug(drive)) | 744 | if (ide_id_dma_bug(drive)) |
768 | return 0; | 745 | return 0; |
769 | 746 | ||
770 | if (drive->hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) | 747 | if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) |
771 | return config_drive_for_dma(drive); | 748 | return config_drive_for_dma(drive); |
772 | 749 | ||
773 | speed = ide_max_dma_mode(drive); | 750 | speed = ide_max_dma_mode(drive); |
774 | 751 | ||
775 | if (!speed) | 752 | if (!speed) { |
776 | return 0; | 753 | /* is this really correct/needed? */ |
754 | if ((hwif->host_flags & IDE_HFLAG_CY82C693) && | ||
755 | ide_dma_good_drive(drive)) | ||
756 | return 1; | ||
757 | else | ||
758 | return 0; | ||
759 | } | ||
777 | 760 | ||
778 | if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE) | 761 | if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) |
779 | return 0; | 762 | return 0; |
780 | 763 | ||
781 | if (ide_set_dma_mode(drive, speed)) | 764 | if (ide_set_dma_mode(drive, speed)) |
@@ -820,7 +803,6 @@ err_out: | |||
820 | 803 | ||
821 | int ide_set_dma(ide_drive_t *drive) | 804 | int ide_set_dma(ide_drive_t *drive) |
822 | { | 805 | { |
823 | ide_hwif_t *hwif = drive->hwif; | ||
824 | int rc; | 806 | int rc; |
825 | 807 | ||
826 | /* | 808 | /* |
@@ -829,13 +811,15 @@ int ide_set_dma(ide_drive_t *drive) | |||
829 | * things, if not checked and cleared. | 811 | * things, if not checked and cleared. |
830 | * PARANOIA!!! | 812 | * PARANOIA!!! |
831 | */ | 813 | */ |
832 | hwif->dma_off_quietly(drive); | 814 | ide_dma_off_quietly(drive); |
833 | 815 | ||
834 | rc = ide_dma_check(drive); | 816 | rc = ide_dma_check(drive); |
835 | if (rc) | 817 | if (rc) |
836 | return rc; | 818 | return rc; |
837 | 819 | ||
838 | return hwif->ide_dma_on(drive); | 820 | ide_dma_on(drive); |
821 | |||
822 | return 0; | ||
839 | } | 823 | } |
840 | 824 | ||
841 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | 825 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI |
@@ -972,14 +956,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports) | |||
972 | if (!(hwif->dma_prdtable)) | 956 | if (!(hwif->dma_prdtable)) |
973 | hwif->dma_prdtable = (hwif->dma_base + 4); | 957 | hwif->dma_prdtable = (hwif->dma_base + 4); |
974 | 958 | ||
975 | if (!hwif->dma_off_quietly) | 959 | if (!hwif->dma_host_set) |
976 | hwif->dma_off_quietly = &ide_dma_off_quietly; | 960 | hwif->dma_host_set = &ide_dma_host_set; |
977 | if (!hwif->dma_host_off) | ||
978 | hwif->dma_host_off = &ide_dma_host_off; | ||
979 | if (!hwif->ide_dma_on) | ||
980 | hwif->ide_dma_on = &__ide_dma_on; | ||
981 | if (!hwif->dma_host_on) | ||
982 | hwif->dma_host_on = &ide_dma_host_on; | ||
983 | if (!hwif->dma_setup) | 961 | if (!hwif->dma_setup) |
984 | hwif->dma_setup = &ide_dma_setup; | 962 | hwif->dma_setup = &ide_dma_setup; |
985 | if (!hwif->dma_exec_cmd) | 963 | if (!hwif->dma_exec_cmd) |
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c index 0f72b98d727f..bb30c29f6ec0 100644 --- a/drivers/ide/ide-generic.c +++ b/drivers/ide/ide-generic.c | |||
@@ -14,10 +14,16 @@ | |||
14 | 14 | ||
15 | static int __init ide_generic_init(void) | 15 | static int __init ide_generic_init(void) |
16 | { | 16 | { |
17 | u8 idx[MAX_HWIFS]; | ||
18 | int i; | ||
19 | |||
17 | if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) | 20 | if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) |
18 | ide_get_lock(NULL, NULL); /* for atari only */ | 21 | ide_get_lock(NULL, NULL); /* for atari only */ |
19 | 22 | ||
20 | (void)ideprobe_init(); | 23 | for (i = 0; i < MAX_HWIFS; i++) |
24 | idx[i] = ide_hwifs[i].present ? 0xff : i; | ||
25 | |||
26 | ide_device_add_all(idx); | ||
21 | 27 | ||
22 | if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) | 28 | if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) |
23 | ide_release_lock(); /* for atari only */ | 29 | ide_release_lock(); /* for atari only */ |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 2711b5a6962d..6f8f544392a8 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -75,7 +75,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, | |||
75 | */ | 75 | */ |
76 | if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { | 76 | if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { |
77 | drive->state = 0; | 77 | drive->state = 0; |
78 | HWGROUP(drive)->hwif->ide_dma_on(drive); | 78 | ide_dma_on(drive); |
79 | } | 79 | } |
80 | 80 | ||
81 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { | 81 | if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { |
@@ -219,7 +219,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * | |||
219 | * we could be smarter and check for current xfer_speed | 219 | * we could be smarter and check for current xfer_speed |
220 | * in struct drive etc... | 220 | * in struct drive etc... |
221 | */ | 221 | */ |
222 | if (drive->hwif->ide_dma_on == NULL) | 222 | if (drive->hwif->dma_host_set == NULL) |
223 | break; | 223 | break; |
224 | /* | 224 | /* |
225 | * TODO: respect ->using_dma setting | 225 | * TODO: respect ->using_dma setting |
@@ -231,7 +231,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * | |||
231 | return ide_stopped; | 231 | return ide_stopped; |
232 | 232 | ||
233 | out_do_tf: | 233 | out_do_tf: |
234 | args->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 234 | args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
235 | args->data_phase = TASKFILE_NO_DATA; | 235 | args->data_phase = TASKFILE_NO_DATA; |
236 | return do_rw_taskfile(drive, args); | 236 | return do_rw_taskfile(drive, args); |
237 | } | 237 | } |
@@ -354,7 +354,6 @@ void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | |||
354 | 354 | ||
355 | void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | 355 | void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) |
356 | { | 356 | { |
357 | ide_hwif_t *hwif = HWIF(drive); | ||
358 | unsigned long flags; | 357 | unsigned long flags; |
359 | struct request *rq; | 358 | struct request *rq; |
360 | 359 | ||
@@ -362,17 +361,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
362 | rq = HWGROUP(drive)->rq; | 361 | rq = HWGROUP(drive)->rq; |
363 | spin_unlock_irqrestore(&ide_lock, flags); | 362 | spin_unlock_irqrestore(&ide_lock, flags); |
364 | 363 | ||
365 | if (rq->cmd_type == REQ_TYPE_ATA_CMD) { | 364 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
366 | u8 *args = (u8 *) rq->buffer; | ||
367 | if (rq->errors == 0) | ||
368 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); | ||
369 | |||
370 | if (args) { | ||
371 | args[0] = stat; | ||
372 | args[1] = err; | ||
373 | args[2] = hwif->INB(IDE_NSECTOR_REG); | ||
374 | } | ||
375 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | ||
376 | ide_task_t *args = (ide_task_t *) rq->special; | 365 | ide_task_t *args = (ide_task_t *) rq->special; |
377 | if (rq->errors == 0) | 366 | if (rq->errors == 0) |
378 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); | 367 | rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); |
@@ -383,10 +372,6 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
383 | tf->error = err; | 372 | tf->error = err; |
384 | tf->status = stat; | 373 | tf->status = stat; |
385 | 374 | ||
386 | args->tf_flags |= (IDE_TFLAG_IN_TF|IDE_TFLAG_IN_DEVICE); | ||
387 | if (args->tf_flags & IDE_TFLAG_LBA48) | ||
388 | args->tf_flags |= IDE_TFLAG_IN_HOB; | ||
389 | |||
390 | ide_tf_read(drive, args); | 375 | ide_tf_read(drive, args); |
391 | } | 376 | } |
392 | } else if (blk_pm_request(rq)) { | 377 | } else if (blk_pm_request(rq)) { |
@@ -626,42 +611,6 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) | |||
626 | return __ide_abort(drive, rq); | 611 | return __ide_abort(drive, rq); |
627 | } | 612 | } |
628 | 613 | ||
629 | /** | ||
630 | * drive_cmd_intr - drive command completion interrupt | ||
631 | * @drive: drive the completion interrupt occurred on | ||
632 | * | ||
633 | * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. | ||
634 | * We do any necessary data reading and then wait for the drive to | ||
635 | * go non busy. At that point we may read the error data and complete | ||
636 | * the request | ||
637 | */ | ||
638 | |||
639 | static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) | ||
640 | { | ||
641 | struct request *rq = HWGROUP(drive)->rq; | ||
642 | ide_hwif_t *hwif = HWIF(drive); | ||
643 | u8 *args = (u8 *) rq->buffer; | ||
644 | u8 stat = hwif->INB(IDE_STATUS_REG); | ||
645 | int retries = 10; | ||
646 | |||
647 | local_irq_enable_in_hardirq(); | ||
648 | if (rq->cmd_type == REQ_TYPE_ATA_CMD && | ||
649 | (stat & DRQ_STAT) && args && args[3]) { | ||
650 | u8 io_32bit = drive->io_32bit; | ||
651 | drive->io_32bit = 0; | ||
652 | hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); | ||
653 | drive->io_32bit = io_32bit; | ||
654 | while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) | ||
655 | udelay(100); | ||
656 | } | ||
657 | |||
658 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) | ||
659 | return ide_error(drive, "drive_cmd", stat); | ||
660 | /* calls ide_end_drive_cmd */ | ||
661 | ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); | ||
662 | return ide_stopped; | ||
663 | } | ||
664 | |||
665 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) | 614 | static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) |
666 | { | 615 | { |
667 | tf->nsect = drive->sect; | 616 | tf->nsect = drive->sect; |
@@ -710,7 +659,7 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) | |||
710 | return ide_stopped; | 659 | return ide_stopped; |
711 | } | 660 | } |
712 | 661 | ||
713 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE | | 662 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | |
714 | IDE_TFLAG_CUSTOM_HANDLER; | 663 | IDE_TFLAG_CUSTOM_HANDLER; |
715 | 664 | ||
716 | do_rw_taskfile(drive, &args); | 665 | do_rw_taskfile(drive, &args); |
@@ -787,7 +736,7 @@ static ide_startstop_t do_special (ide_drive_t *drive) | |||
787 | 736 | ||
788 | if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { | 737 | if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { |
789 | if (keep_dma) | 738 | if (keep_dma) |
790 | hwif->ide_dma_on(drive); | 739 | ide_dma_on(drive); |
791 | } | 740 | } |
792 | } | 741 | } |
793 | 742 | ||
@@ -847,16 +796,9 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
847 | struct request *rq) | 796 | struct request *rq) |
848 | { | 797 | { |
849 | ide_hwif_t *hwif = HWIF(drive); | 798 | ide_hwif_t *hwif = HWIF(drive); |
850 | u8 *args = rq->buffer; | 799 | ide_task_t *task = rq->special; |
851 | ide_task_t ltask; | ||
852 | struct ide_taskfile *tf = <ask.tf; | ||
853 | |||
854 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | ||
855 | ide_task_t *task = rq->special; | ||
856 | |||
857 | if (task == NULL) | ||
858 | goto done; | ||
859 | 800 | ||
801 | if (task) { | ||
860 | hwif->data_phase = task->data_phase; | 802 | hwif->data_phase = task->data_phase; |
861 | 803 | ||
862 | switch (hwif->data_phase) { | 804 | switch (hwif->data_phase) { |
@@ -873,33 +815,6 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, | |||
873 | return do_rw_taskfile(drive, task); | 815 | return do_rw_taskfile(drive, task); |
874 | } | 816 | } |
875 | 817 | ||
876 | if (args == NULL) | ||
877 | goto done; | ||
878 | |||
879 | memset(<ask, 0, sizeof(ltask)); | ||
880 | if (rq->cmd_type == REQ_TYPE_ATA_CMD) { | ||
881 | #ifdef DEBUG | ||
882 | printk("%s: DRIVE_CMD\n", drive->name); | ||
883 | #endif | ||
884 | tf->feature = args[2]; | ||
885 | if (args[0] == WIN_SMART) { | ||
886 | tf->nsect = args[3]; | ||
887 | tf->lbal = args[1]; | ||
888 | tf->lbam = 0x4f; | ||
889 | tf->lbah = 0xc2; | ||
890 | ltask.tf_flags = IDE_TFLAG_OUT_TF; | ||
891 | } else { | ||
892 | tf->nsect = args[1]; | ||
893 | ltask.tf_flags = IDE_TFLAG_OUT_FEATURE | | ||
894 | IDE_TFLAG_OUT_NSECT; | ||
895 | } | ||
896 | } | ||
897 | tf->command = args[0]; | ||
898 | ide_tf_load(drive, <ask); | ||
899 | ide_execute_command(drive, args[0], &drive_cmd_intr, WAIT_WORSTCASE, NULL); | ||
900 | return ide_started; | ||
901 | |||
902 | done: | ||
903 | /* | 818 | /* |
904 | * NULL is actually a valid way of waiting for | 819 | * NULL is actually a valid way of waiting for |
905 | * all current requests to be flushed from the queue. | 820 | * all current requests to be flushed from the queue. |
@@ -939,8 +854,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | |||
939 | if (rc) | 854 | if (rc) |
940 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); | 855 | printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); |
941 | SELECT_DRIVE(drive); | 856 | SELECT_DRIVE(drive); |
942 | if (IDE_CONTROL_REG) | 857 | ide_set_irq(drive, 1); |
943 | HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG); | ||
944 | rc = ide_wait_not_busy(HWIF(drive), 100000); | 858 | rc = ide_wait_not_busy(HWIF(drive), 100000); |
945 | if (rc) | 859 | if (rc) |
946 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); | 860 | printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); |
@@ -1004,8 +918,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
1004 | if (drive->current_speed == 0xff) | 918 | if (drive->current_speed == 0xff) |
1005 | ide_config_drive_speed(drive, drive->desired_speed); | 919 | ide_config_drive_speed(drive, drive->desired_speed); |
1006 | 920 | ||
1007 | if (rq->cmd_type == REQ_TYPE_ATA_CMD || | 921 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) |
1008 | rq->cmd_type == REQ_TYPE_ATA_TASKFILE) | ||
1009 | return execute_drive_cmd(drive, rq); | 922 | return execute_drive_cmd(drive, rq); |
1010 | else if (blk_pm_request(rq)) { | 923 | else if (blk_pm_request(rq)) { |
1011 | struct request_pm_state *pm = rq->data; | 924 | struct request_pm_state *pm = rq->data; |
@@ -1213,15 +1126,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
1213 | } | 1126 | } |
1214 | again: | 1127 | again: |
1215 | hwif = HWIF(drive); | 1128 | hwif = HWIF(drive); |
1216 | if (hwgroup->hwif->sharing_irq && | 1129 | if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { |
1217 | hwif != hwgroup->hwif && | ||
1218 | hwif->io_ports[IDE_CONTROL_OFFSET]) { | ||
1219 | /* | 1130 | /* |
1220 | * set nIEN for previous hwif, drives in the | 1131 | * set nIEN for previous hwif, drives in the |
1221 | * quirk_list may not like intr setups/cleanups | 1132 | * quirk_list may not like intr setups/cleanups |
1222 | */ | 1133 | */ |
1223 | if (drive->quirk_list != 1) | 1134 | if (drive->quirk_list != 1) |
1224 | hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG); | 1135 | ide_set_irq(drive, 0); |
1225 | } | 1136 | } |
1226 | hwgroup->hwif = hwif; | 1137 | hwgroup->hwif = hwif; |
1227 | hwgroup->drive = drive; | 1138 | hwgroup->drive = drive; |
@@ -1334,7 +1245,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) | |||
1334 | */ | 1245 | */ |
1335 | drive->retry_pio++; | 1246 | drive->retry_pio++; |
1336 | drive->state = DMA_PIO_RETRY; | 1247 | drive->state = DMA_PIO_RETRY; |
1337 | hwif->dma_off_quietly(drive); | 1248 | ide_dma_off_quietly(drive); |
1338 | 1249 | ||
1339 | /* | 1250 | /* |
1340 | * un-busy drive etc (hwgroup->busy is cleared on return) and | 1251 | * un-busy drive etc (hwgroup->busy is cleared on return) and |
@@ -1679,7 +1590,6 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1679 | void ide_init_drive_cmd (struct request *rq) | 1590 | void ide_init_drive_cmd (struct request *rq) |
1680 | { | 1591 | { |
1681 | memset(rq, 0, sizeof(*rq)); | 1592 | memset(rq, 0, sizeof(*rq)); |
1682 | rq->cmd_type = REQ_TYPE_ATA_CMD; | ||
1683 | rq->ref_count = 1; | 1593 | rq->ref_count = 1; |
1684 | } | 1594 | } |
1685 | 1595 | ||
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index c97c0719ddf1..e2a7e95e1636 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -619,7 +619,7 @@ no_80w: | |||
619 | int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) | 619 | int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) |
620 | { | 620 | { |
621 | if (args->tf.command == WIN_SETFEATURES && | 621 | if (args->tf.command == WIN_SETFEATURES && |
622 | args->tf.lbal > XFER_UDMA_2 && | 622 | args->tf.nsect > XFER_UDMA_2 && |
623 | args->tf.feature == SETFEATURES_XFER) { | 623 | args->tf.feature == SETFEATURES_XFER) { |
624 | if (eighty_ninty_three(drive) == 0) { | 624 | if (eighty_ninty_three(drive) == 0) { |
625 | printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot " | 625 | printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot " |
@@ -639,7 +639,7 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) | |||
639 | int set_transfer (ide_drive_t *drive, ide_task_t *args) | 639 | int set_transfer (ide_drive_t *drive, ide_task_t *args) |
640 | { | 640 | { |
641 | if (args->tf.command == WIN_SETFEATURES && | 641 | if (args->tf.command == WIN_SETFEATURES && |
642 | args->tf.lbal >= XFER_SW_DMA_0 && | 642 | args->tf.nsect >= XFER_SW_DMA_0 && |
643 | args->tf.feature == SETFEATURES_XFER && | 643 | args->tf.feature == SETFEATURES_XFER && |
644 | (drive->id->dma_ultra || | 644 | (drive->id->dma_ultra || |
645 | drive->id->dma_mword || | 645 | drive->id->dma_mword || |
@@ -688,8 +688,7 @@ int ide_driveid_update(ide_drive_t *drive) | |||
688 | */ | 688 | */ |
689 | 689 | ||
690 | SELECT_MASK(drive, 1); | 690 | SELECT_MASK(drive, 1); |
691 | if (IDE_CONTROL_REG) | 691 | ide_set_irq(drive, 1); |
692 | hwif->OUTB(drive->ctl,IDE_CONTROL_REG); | ||
693 | msleep(50); | 692 | msleep(50); |
694 | hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG); | 693 | hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG); |
695 | timeout = jiffies + WAIT_WORSTCASE; | 694 | timeout = jiffies + WAIT_WORSTCASE; |
@@ -742,8 +741,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
742 | // msleep(50); | 741 | // msleep(50); |
743 | 742 | ||
744 | #ifdef CONFIG_BLK_DEV_IDEDMA | 743 | #ifdef CONFIG_BLK_DEV_IDEDMA |
745 | if (hwif->ide_dma_on) /* check if host supports DMA */ | 744 | if (hwif->dma_host_set) /* check if host supports DMA */ |
746 | hwif->dma_host_off(drive); | 745 | hwif->dma_host_set(drive, 0); |
747 | #endif | 746 | #endif |
748 | 747 | ||
749 | /* Skip setting PIO flow-control modes on pre-EIDE drives */ | 748 | /* Skip setting PIO flow-control modes on pre-EIDE drives */ |
@@ -772,13 +771,12 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
772 | SELECT_DRIVE(drive); | 771 | SELECT_DRIVE(drive); |
773 | SELECT_MASK(drive, 0); | 772 | SELECT_MASK(drive, 0); |
774 | udelay(1); | 773 | udelay(1); |
775 | if (IDE_CONTROL_REG) | 774 | ide_set_irq(drive, 0); |
776 | hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG); | ||
777 | hwif->OUTB(speed, IDE_NSECTOR_REG); | 775 | hwif->OUTB(speed, IDE_NSECTOR_REG); |
778 | hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG); | 776 | hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG); |
779 | hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG); | 777 | hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG); |
780 | if ((IDE_CONTROL_REG) && (drive->quirk_list == 2)) | 778 | if (drive->quirk_list == 2) |
781 | hwif->OUTB(drive->ctl, IDE_CONTROL_REG); | 779 | ide_set_irq(drive, 1); |
782 | 780 | ||
783 | error = __ide_wait_stat(drive, drive->ready_stat, | 781 | error = __ide_wait_stat(drive, drive->ready_stat, |
784 | BUSY_STAT|DRQ_STAT|ERR_STAT, | 782 | BUSY_STAT|DRQ_STAT|ERR_STAT, |
@@ -799,10 +797,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
799 | 797 | ||
800 | skip: | 798 | skip: |
801 | #ifdef CONFIG_BLK_DEV_IDEDMA | 799 | #ifdef CONFIG_BLK_DEV_IDEDMA |
802 | if (speed >= XFER_SW_DMA_0) | 800 | if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && |
803 | hwif->dma_host_on(drive); | 801 | drive->using_dma) |
804 | else if (hwif->ide_dma_on) /* check if host supports DMA */ | 802 | hwif->dma_host_set(drive, 1); |
805 | hwif->dma_off_quietly(drive); | 803 | else if (hwif->dma_host_set) /* check if host supports DMA */ |
804 | ide_dma_off_quietly(drive); | ||
806 | #endif | 805 | #endif |
807 | 806 | ||
808 | switch(speed) { | 807 | switch(speed) { |
@@ -1012,10 +1011,10 @@ static void check_dma_crc(ide_drive_t *drive) | |||
1012 | { | 1011 | { |
1013 | #ifdef CONFIG_BLK_DEV_IDEDMA | 1012 | #ifdef CONFIG_BLK_DEV_IDEDMA |
1014 | if (drive->crc_count) { | 1013 | if (drive->crc_count) { |
1015 | drive->hwif->dma_off_quietly(drive); | 1014 | ide_dma_off_quietly(drive); |
1016 | ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive)); | 1015 | ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive)); |
1017 | if (drive->current_speed >= XFER_SW_DMA_0) | 1016 | if (drive->current_speed >= XFER_SW_DMA_0) |
1018 | (void) HWIF(drive)->ide_dma_on(drive); | 1017 | ide_dma_on(drive); |
1019 | } else | 1018 | } else |
1020 | ide_dma_off(drive); | 1019 | ide_dma_off(drive); |
1021 | #endif | 1020 | #endif |
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index a3bd8e8ed6b0..9b44fbdfe41f 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c | |||
@@ -454,8 +454,7 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) | |||
454 | static void ide_dump_opcode(ide_drive_t *drive) | 454 | static void ide_dump_opcode(ide_drive_t *drive) |
455 | { | 455 | { |
456 | struct request *rq; | 456 | struct request *rq; |
457 | u8 opcode = 0; | 457 | ide_task_t *task = NULL; |
458 | int found = 0; | ||
459 | 458 | ||
460 | spin_lock(&ide_lock); | 459 | spin_lock(&ide_lock); |
461 | rq = NULL; | 460 | rq = NULL; |
@@ -464,25 +463,15 @@ static void ide_dump_opcode(ide_drive_t *drive) | |||
464 | spin_unlock(&ide_lock); | 463 | spin_unlock(&ide_lock); |
465 | if (!rq) | 464 | if (!rq) |
466 | return; | 465 | return; |
467 | if (rq->cmd_type == REQ_TYPE_ATA_CMD) { | 466 | |
468 | char *args = rq->buffer; | 467 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) |
469 | if (args) { | 468 | task = rq->special; |
470 | opcode = args[0]; | ||
471 | found = 1; | ||
472 | } | ||
473 | } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | ||
474 | ide_task_t *args = rq->special; | ||
475 | if (args) { | ||
476 | opcode = args->tf.command; | ||
477 | found = 1; | ||
478 | } | ||
479 | } | ||
480 | 469 | ||
481 | printk("ide: failed opcode was: "); | 470 | printk("ide: failed opcode was: "); |
482 | if (!found) | 471 | if (task == NULL) |
483 | printk("unknown\n"); | 472 | printk(KERN_CONT "unknown\n"); |
484 | else | 473 | else |
485 | printk("0x%02x\n", opcode); | 474 | printk(KERN_CONT "0x%02x\n", task->tf.command); |
486 | } | 475 | } |
487 | 476 | ||
488 | u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) | 477 | u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) |
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index e245521af7b5..cbbb0f75be92 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c | |||
@@ -31,7 +31,6 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id | |||
31 | { | 31 | { |
32 | hw_regs_t hw; | 32 | hw_regs_t hw; |
33 | ide_hwif_t *hwif; | 33 | ide_hwif_t *hwif; |
34 | int index; | ||
35 | 34 | ||
36 | if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) | 35 | if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) |
37 | return -1; | 36 | return -1; |
@@ -41,11 +40,19 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id | |||
41 | pnp_port_start(dev, 1)); | 40 | pnp_port_start(dev, 1)); |
42 | hw.irq = pnp_irq(dev, 0); | 41 | hw.irq = pnp_irq(dev, 0); |
43 | 42 | ||
44 | index = ide_register_hw(&hw, NULL, 1, &hwif); | 43 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
44 | if (hwif) { | ||
45 | u8 index = hwif->index; | ||
46 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | ||
47 | |||
48 | ide_init_port_data(hwif, index); | ||
49 | ide_init_port_hw(hwif, &hw); | ||
45 | 50 | ||
46 | if (index != -1) { | 51 | printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); |
47 | printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); | ||
48 | pnp_set_drvdata(dev,hwif); | 52 | pnp_set_drvdata(dev,hwif); |
53 | |||
54 | ide_device_add(idx); | ||
55 | |||
49 | return 0; | 56 | return 0; |
50 | } | 57 | } |
51 | 58 | ||
@@ -68,12 +75,15 @@ static struct pnp_driver idepnp_driver = { | |||
68 | .remove = idepnp_remove, | 75 | .remove = idepnp_remove, |
69 | }; | 76 | }; |
70 | 77 | ||
71 | void __init pnpide_init(void) | 78 | static int __init pnpide_init(void) |
72 | { | 79 | { |
73 | pnp_register_driver(&idepnp_driver); | 80 | return pnp_register_driver(&idepnp_driver); |
74 | } | 81 | } |
75 | 82 | ||
76 | void __exit pnpide_exit(void) | 83 | static void __exit pnpide_exit(void) |
77 | { | 84 | { |
78 | pnp_unregister_driver(&idepnp_driver); | 85 | pnp_unregister_driver(&idepnp_driver); |
79 | } | 86 | } |
87 | |||
88 | module_init(pnpide_init); | ||
89 | module_exit(pnpide_exit); | ||
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 0379d1f697cf..edf650b20c67 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -235,9 +235,6 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd) | |||
235 | drive->media = ide_disk; | 235 | drive->media = ide_disk; |
236 | printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" ); | 236 | printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" ); |
237 | 237 | ||
238 | if (hwif->quirkproc) | ||
239 | drive->quirk_list = hwif->quirkproc(drive); | ||
240 | |||
241 | return; | 238 | return; |
242 | 239 | ||
243 | err_misc: | 240 | err_misc: |
@@ -353,22 +350,19 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd) | |||
353 | * the irq handler isn't expecting. | 350 | * the irq handler isn't expecting. |
354 | */ | 351 | */ |
355 | if (IDE_CONTROL_REG) { | 352 | if (IDE_CONTROL_REG) { |
356 | u8 ctl = drive->ctl | 2; | ||
357 | if (!hwif->irq) { | 353 | if (!hwif->irq) { |
358 | autoprobe = 1; | 354 | autoprobe = 1; |
359 | cookie = probe_irq_on(); | 355 | cookie = probe_irq_on(); |
360 | /* enable device irq */ | ||
361 | ctl &= ~2; | ||
362 | } | 356 | } |
363 | hwif->OUTB(ctl, IDE_CONTROL_REG); | 357 | ide_set_irq(drive, autoprobe); |
364 | } | 358 | } |
365 | 359 | ||
366 | retval = actual_try_to_identify(drive, cmd); | 360 | retval = actual_try_to_identify(drive, cmd); |
367 | 361 | ||
368 | if (autoprobe) { | 362 | if (autoprobe) { |
369 | int irq; | 363 | int irq; |
370 | /* mask device irq */ | 364 | |
371 | hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG); | 365 | ide_set_irq(drive, 0); |
372 | /* clear drive IRQ */ | 366 | /* clear drive IRQ */ |
373 | (void) hwif->INB(IDE_STATUS_REG); | 367 | (void) hwif->INB(IDE_STATUS_REG); |
374 | udelay(5); | 368 | udelay(5); |
@@ -388,6 +382,20 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd) | |||
388 | return retval; | 382 | return retval; |
389 | } | 383 | } |
390 | 384 | ||
385 | static int ide_busy_sleep(ide_hwif_t *hwif) | ||
386 | { | ||
387 | unsigned long timeout = jiffies + WAIT_WORSTCASE; | ||
388 | u8 stat; | ||
389 | |||
390 | do { | ||
391 | msleep(50); | ||
392 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | ||
393 | if ((stat & BUSY_STAT) == 0) | ||
394 | return 0; | ||
395 | } while (time_before(jiffies, timeout)); | ||
396 | |||
397 | return 1; | ||
398 | } | ||
391 | 399 | ||
392 | /** | 400 | /** |
393 | * do_probe - probe an IDE device | 401 | * do_probe - probe an IDE device |
@@ -456,7 +464,6 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
456 | if ((rc == 1 && cmd == WIN_PIDENTIFY) && | 464 | if ((rc == 1 && cmd == WIN_PIDENTIFY) && |
457 | ((drive->autotune == IDE_TUNE_DEFAULT) || | 465 | ((drive->autotune == IDE_TUNE_DEFAULT) || |
458 | (drive->autotune == IDE_TUNE_AUTO))) { | 466 | (drive->autotune == IDE_TUNE_AUTO))) { |
459 | unsigned long timeout; | ||
460 | printk("%s: no response (status = 0x%02x), " | 467 | printk("%s: no response (status = 0x%02x), " |
461 | "resetting drive\n", drive->name, | 468 | "resetting drive\n", drive->name, |
462 | hwif->INB(IDE_STATUS_REG)); | 469 | hwif->INB(IDE_STATUS_REG)); |
@@ -464,10 +471,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
464 | hwif->OUTB(drive->select.all, IDE_SELECT_REG); | 471 | hwif->OUTB(drive->select.all, IDE_SELECT_REG); |
465 | msleep(50); | 472 | msleep(50); |
466 | hwif->OUTB(WIN_SRST, IDE_COMMAND_REG); | 473 | hwif->OUTB(WIN_SRST, IDE_COMMAND_REG); |
467 | timeout = jiffies; | 474 | (void)ide_busy_sleep(hwif); |
468 | while (((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && | ||
469 | time_before(jiffies, timeout + WAIT_WORSTCASE)) | ||
470 | msleep(50); | ||
471 | rc = try_to_identify(drive, cmd); | 475 | rc = try_to_identify(drive, cmd); |
472 | } | 476 | } |
473 | if (rc == 1) | 477 | if (rc == 1) |
@@ -495,20 +499,16 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
495 | static void enable_nest (ide_drive_t *drive) | 499 | static void enable_nest (ide_drive_t *drive) |
496 | { | 500 | { |
497 | ide_hwif_t *hwif = HWIF(drive); | 501 | ide_hwif_t *hwif = HWIF(drive); |
498 | unsigned long timeout; | ||
499 | 502 | ||
500 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); | 503 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); |
501 | SELECT_DRIVE(drive); | 504 | SELECT_DRIVE(drive); |
502 | msleep(50); | 505 | msleep(50); |
503 | hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG); | 506 | hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG); |
504 | timeout = jiffies + WAIT_WORSTCASE; | 507 | |
505 | do { | 508 | if (ide_busy_sleep(hwif)) { |
506 | if (time_after(jiffies, timeout)) { | 509 | printk(KERN_CONT "failed (timeout)\n"); |
507 | printk("failed (timeout)\n"); | 510 | return; |
508 | return; | 511 | } |
509 | } | ||
510 | msleep(50); | ||
511 | } while ((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT); | ||
512 | 512 | ||
513 | msleep(50); | 513 | msleep(50); |
514 | 514 | ||
@@ -656,8 +656,7 @@ static int wait_hwif_ready(ide_hwif_t *hwif) | |||
656 | /* Ignore disks that we will not probe for later. */ | 656 | /* Ignore disks that we will not probe for later. */ |
657 | if (!drive->noprobe || drive->present) { | 657 | if (!drive->noprobe || drive->present) { |
658 | SELECT_DRIVE(drive); | 658 | SELECT_DRIVE(drive); |
659 | if (IDE_CONTROL_REG) | 659 | ide_set_irq(drive, 1); |
660 | hwif->OUTB(drive->ctl, IDE_CONTROL_REG); | ||
661 | mdelay(2); | 660 | mdelay(2); |
662 | rc = ide_wait_not_busy(hwif, 35000); | 661 | rc = ide_wait_not_busy(hwif, 35000); |
663 | if (rc) | 662 | if (rc) |
@@ -676,19 +675,18 @@ out: | |||
676 | 675 | ||
677 | /** | 676 | /** |
678 | * ide_undecoded_slave - look for bad CF adapters | 677 | * ide_undecoded_slave - look for bad CF adapters |
679 | * @hwif: interface | 678 | * @drive1: drive |
680 | * | 679 | * |
681 | * Analyse the drives on the interface and attempt to decide if we | 680 | * Analyse the drives on the interface and attempt to decide if we |
682 | * have the same drive viewed twice. This occurs with crap CF adapters | 681 | * have the same drive viewed twice. This occurs with crap CF adapters |
683 | * and PCMCIA sometimes. | 682 | * and PCMCIA sometimes. |
684 | */ | 683 | */ |
685 | 684 | ||
686 | void ide_undecoded_slave(ide_hwif_t *hwif) | 685 | void ide_undecoded_slave(ide_drive_t *drive1) |
687 | { | 686 | { |
688 | ide_drive_t *drive0 = &hwif->drives[0]; | 687 | ide_drive_t *drive0 = &drive1->hwif->drives[0]; |
689 | ide_drive_t *drive1 = &hwif->drives[1]; | ||
690 | 688 | ||
691 | if (drive0->present == 0 || drive1->present == 0) | 689 | if ((drive1->dn & 1) == 0 || drive0->present == 0) |
692 | return; | 690 | return; |
693 | 691 | ||
694 | /* If the models don't match they are not the same product */ | 692 | /* If the models don't match they are not the same product */ |
@@ -791,18 +789,11 @@ static void probe_hwif(ide_hwif_t *hwif) | |||
791 | } | 789 | } |
792 | } | 790 | } |
793 | if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { | 791 | if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { |
794 | unsigned long timeout = jiffies + WAIT_WORSTCASE; | ||
795 | u8 stat; | ||
796 | |||
797 | printk(KERN_WARNING "%s: reset\n", hwif->name); | 792 | printk(KERN_WARNING "%s: reset\n", hwif->name); |
798 | hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]); | 793 | hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]); |
799 | udelay(10); | 794 | udelay(10); |
800 | hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); | 795 | hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); |
801 | do { | 796 | (void)ide_busy_sleep(hwif); |
802 | msleep(50); | ||
803 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | ||
804 | } while ((stat & BUSY_STAT) && time_after(timeout, jiffies)); | ||
805 | |||
806 | } | 797 | } |
807 | local_irq_restore(flags); | 798 | local_irq_restore(flags); |
808 | /* | 799 | /* |
@@ -817,8 +808,12 @@ static void probe_hwif(ide_hwif_t *hwif) | |||
817 | return; | 808 | return; |
818 | } | 809 | } |
819 | 810 | ||
820 | if (hwif->fixup) | 811 | for (unit = 0; unit < MAX_DRIVES; unit++) { |
821 | hwif->fixup(hwif); | 812 | ide_drive_t *drive = &hwif->drives[unit]; |
813 | |||
814 | if (drive->present && hwif->quirkproc) | ||
815 | hwif->quirkproc(drive); | ||
816 | } | ||
822 | 817 | ||
823 | for (unit = 0; unit < MAX_DRIVES; ++unit) { | 818 | for (unit = 0; unit < MAX_DRIVES; ++unit) { |
824 | ide_drive_t *drive = &hwif->drives[unit]; | 819 | ide_drive_t *drive = &hwif->drives[unit]; |
@@ -833,7 +828,7 @@ static void probe_hwif(ide_hwif_t *hwif) | |||
833 | 828 | ||
834 | drive->nice1 = 1; | 829 | drive->nice1 = 1; |
835 | 830 | ||
836 | if (hwif->ide_dma_on) | 831 | if (hwif->dma_host_set) |
837 | ide_set_dma(drive); | 832 | ide_set_dma(drive); |
838 | } | 833 | } |
839 | } | 834 | } |
@@ -848,25 +843,6 @@ static void probe_hwif(ide_hwif_t *hwif) | |||
848 | } | 843 | } |
849 | } | 844 | } |
850 | 845 | ||
851 | static int hwif_init(ide_hwif_t *hwif); | ||
852 | static void hwif_register_devices(ide_hwif_t *hwif); | ||
853 | |||
854 | static int probe_hwif_init(ide_hwif_t *hwif) | ||
855 | { | ||
856 | probe_hwif(hwif); | ||
857 | |||
858 | if (!hwif_init(hwif)) { | ||
859 | printk(KERN_INFO "%s: failed to initialize IDE interface\n", | ||
860 | hwif->name); | ||
861 | return -1; | ||
862 | } | ||
863 | |||
864 | if (hwif->present) | ||
865 | hwif_register_devices(hwif); | ||
866 | |||
867 | return 0; | ||
868 | } | ||
869 | |||
870 | #if MAX_HWIFS > 1 | 846 | #if MAX_HWIFS > 1 |
871 | /* | 847 | /* |
872 | * save_match() is used to simplify logic in init_irq() below. | 848 | * save_match() is used to simplify logic in init_irq() below. |
@@ -1359,54 +1335,63 @@ static void hwif_register_devices(ide_hwif_t *hwif) | |||
1359 | } | 1335 | } |
1360 | } | 1336 | } |
1361 | 1337 | ||
1362 | int ideprobe_init (void) | 1338 | int ide_device_add_all(u8 *idx) |
1363 | { | 1339 | { |
1364 | unsigned int index; | 1340 | ide_hwif_t *hwif; |
1365 | int probe[MAX_HWIFS]; | 1341 | int i, rc = 0; |
1366 | 1342 | ||
1367 | memset(probe, 0, MAX_HWIFS * sizeof(int)); | 1343 | for (i = 0; i < MAX_HWIFS; i++) { |
1368 | for (index = 0; index < MAX_HWIFS; ++index) | 1344 | if (idx[i] == 0xff) |
1369 | probe[index] = !ide_hwifs[index].present; | 1345 | continue; |
1370 | 1346 | ||
1371 | for (index = 0; index < MAX_HWIFS; ++index) | 1347 | probe_hwif(&ide_hwifs[idx[i]]); |
1372 | if (probe[index]) | 1348 | } |
1373 | probe_hwif(&ide_hwifs[index]); | 1349 | |
1374 | for (index = 0; index < MAX_HWIFS; ++index) | 1350 | for (i = 0; i < MAX_HWIFS; i++) { |
1375 | if (probe[index]) | 1351 | if (idx[i] == 0xff) |
1376 | hwif_init(&ide_hwifs[index]); | 1352 | continue; |
1377 | for (index = 0; index < MAX_HWIFS; ++index) { | 1353 | |
1378 | if (probe[index]) { | 1354 | hwif = &ide_hwifs[idx[i]]; |
1379 | ide_hwif_t *hwif = &ide_hwifs[index]; | 1355 | |
1380 | if (!hwif->present) | 1356 | if (hwif_init(hwif) == 0) { |
1381 | continue; | 1357 | printk(KERN_INFO "%s: failed to initialize IDE " |
1382 | if (hwif->chipset == ide_unknown || hwif->chipset == ide_forced) | 1358 | "interface\n", hwif->name); |
1383 | hwif->chipset = ide_generic; | 1359 | rc = -1; |
1384 | hwif_register_devices(hwif); | 1360 | continue; |
1385 | } | 1361 | } |
1386 | } | 1362 | } |
1387 | for (index = 0; index < MAX_HWIFS; ++index) | ||
1388 | if (probe[index]) | ||
1389 | ide_proc_register_port(&ide_hwifs[index]); | ||
1390 | return 0; | ||
1391 | } | ||
1392 | 1363 | ||
1393 | EXPORT_SYMBOL_GPL(ideprobe_init); | 1364 | for (i = 0; i < MAX_HWIFS; i++) { |
1365 | if (idx[i] == 0xff) | ||
1366 | continue; | ||
1394 | 1367 | ||
1395 | int ide_device_add(u8 idx[4]) | 1368 | hwif = &ide_hwifs[idx[i]]; |
1396 | { | ||
1397 | int i, rc = 0; | ||
1398 | 1369 | ||
1399 | for (i = 0; i < 4; i++) { | 1370 | if (hwif->present) { |
1400 | if (idx[i] != 0xff) | 1371 | if (hwif->chipset == ide_unknown || |
1401 | rc |= probe_hwif_init(&ide_hwifs[idx[i]]); | 1372 | hwif->chipset == ide_forced) |
1373 | hwif->chipset = ide_generic; | ||
1374 | hwif_register_devices(hwif); | ||
1375 | } | ||
1402 | } | 1376 | } |
1403 | 1377 | ||
1404 | for (i = 0; i < 4; i++) { | 1378 | for (i = 0; i < MAX_HWIFS; i++) { |
1405 | if (idx[i] != 0xff) | 1379 | if (idx[i] != 0xff) |
1406 | ide_proc_register_port(&ide_hwifs[idx[i]]); | 1380 | ide_proc_register_port(&ide_hwifs[idx[i]]); |
1407 | } | 1381 | } |
1408 | 1382 | ||
1409 | return rc; | 1383 | return rc; |
1410 | } | 1384 | } |
1385 | EXPORT_SYMBOL_GPL(ide_device_add_all); | ||
1386 | |||
1387 | int ide_device_add(u8 idx[4]) | ||
1388 | { | ||
1389 | u8 idx_all[MAX_HWIFS]; | ||
1390 | int i; | ||
1411 | 1391 | ||
1392 | for (i = 0; i < MAX_HWIFS; i++) | ||
1393 | idx_all[i] = (i < 4) ? idx[i] : 0xff; | ||
1394 | |||
1395 | return ide_device_add_all(idx_all); | ||
1396 | } | ||
1412 | EXPORT_SYMBOL_GPL(ide_device_add); | 1397 | EXPORT_SYMBOL_GPL(ide_device_add); |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index a4007d30da52..aa663e7f46f2 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -346,14 +346,20 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va | |||
346 | 346 | ||
347 | static int set_xfer_rate (ide_drive_t *drive, int arg) | 347 | static int set_xfer_rate (ide_drive_t *drive, int arg) |
348 | { | 348 | { |
349 | ide_task_t task; | ||
349 | int err; | 350 | int err; |
350 | 351 | ||
351 | if (arg < 0 || arg > 70) | 352 | if (arg < 0 || arg > 70) |
352 | return -EINVAL; | 353 | return -EINVAL; |
353 | 354 | ||
354 | err = ide_wait_cmd(drive, | 355 | memset(&task, 0, sizeof(task)); |
355 | WIN_SETFEATURES, (u8) arg, | 356 | task.tf.command = WIN_SETFEATURES; |
356 | SETFEATURES_XFER, 0, NULL); | 357 | task.tf.feature = SETFEATURES_XFER; |
358 | task.tf.nsect = (u8)arg; | ||
359 | task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | | ||
360 | IDE_TFLAG_IN_NSECT; | ||
361 | |||
362 | err = ide_no_data_taskfile(drive, &task); | ||
357 | 363 | ||
358 | if (!err && arg) { | 364 | if (!err && arg) { |
359 | ide_set_xfer_rate(drive, (u8) arg); | 365 | ide_set_xfer_rate(drive, (u8) arg); |
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c new file mode 100644 index 000000000000..7ffa332d77ce --- /dev/null +++ b/drivers/ide/ide-scan-pci.c | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * support for probing IDE PCI devices in the PCI bus order | ||
3 | * | ||
4 | * Copyright (c) 1998-2000 Andre Hedrick <andre@linux-ide.org> | ||
5 | * Copyright (c) 1995-1998 Mark Lord | ||
6 | * | ||
7 | * May be copied or modified under the terms of the GNU General Public License | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/ide.h> | ||
14 | |||
15 | /* | ||
16 | * Module interfaces | ||
17 | */ | ||
18 | |||
19 | static int pre_init = 1; /* Before first ordered IDE scan */ | ||
20 | static LIST_HEAD(ide_pci_drivers); | ||
21 | |||
22 | /* | ||
23 | * __ide_pci_register_driver - attach IDE driver | ||
24 | * @driver: pci driver | ||
25 | * @module: owner module of the driver | ||
26 | * | ||
27 | * Registers a driver with the IDE layer. The IDE layer arranges that | ||
28 | * boot time setup is done in the expected device order and then | ||
29 | * hands the controllers off to the core PCI code to do the rest of | ||
30 | * the work. | ||
31 | * | ||
32 | * Returns are the same as for pci_register_driver | ||
33 | */ | ||
34 | |||
35 | int __ide_pci_register_driver(struct pci_driver *driver, struct module *module, | ||
36 | const char *mod_name) | ||
37 | { | ||
38 | if (!pre_init) | ||
39 | return __pci_register_driver(driver, module, mod_name); | ||
40 | driver->driver.owner = module; | ||
41 | list_add_tail(&driver->node, &ide_pci_drivers); | ||
42 | return 0; | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(__ide_pci_register_driver); | ||
45 | |||
46 | /** | ||
47 | * ide_scan_pcidev - find an IDE driver for a device | ||
48 | * @dev: PCI device to check | ||
49 | * | ||
50 | * Look for an IDE driver to handle the device we are considering. | ||
51 | * This is only used during boot up to get the ordering correct. After | ||
52 | * boot up the pci layer takes over the job. | ||
53 | */ | ||
54 | |||
55 | static int __init ide_scan_pcidev(struct pci_dev *dev) | ||
56 | { | ||
57 | struct list_head *l; | ||
58 | struct pci_driver *d; | ||
59 | |||
60 | list_for_each(l, &ide_pci_drivers) { | ||
61 | d = list_entry(l, struct pci_driver, node); | ||
62 | if (d->id_table) { | ||
63 | const struct pci_device_id *id = | ||
64 | pci_match_id(d->id_table, dev); | ||
65 | |||
66 | if (id != NULL && d->probe(dev, id) >= 0) { | ||
67 | dev->driver = d; | ||
68 | pci_dev_get(dev); | ||
69 | return 1; | ||
70 | } | ||
71 | } | ||
72 | } | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * ide_scan_pcibus - perform the initial IDE driver scan | ||
78 | * | ||
79 | * Perform the initial bus rather than driver ordered scan of the | ||
80 | * PCI drivers. After this all IDE pci handling becomes standard | ||
81 | * module ordering not traditionally ordered. | ||
82 | */ | ||
83 | |||
84 | int __init ide_scan_pcibus(void) | ||
85 | { | ||
86 | struct pci_dev *dev = NULL; | ||
87 | struct pci_driver *d; | ||
88 | struct list_head *l, *n; | ||
89 | |||
90 | pre_init = 0; | ||
91 | if (!ide_scan_direction) | ||
92 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev))) | ||
93 | ide_scan_pcidev(dev); | ||
94 | else | ||
95 | while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, | ||
96 | dev))) | ||
97 | ide_scan_pcidev(dev); | ||
98 | |||
99 | /* | ||
100 | * Hand the drivers over to the PCI layer now we | ||
101 | * are post init. | ||
102 | */ | ||
103 | |||
104 | list_for_each_safe(l, n, &ide_pci_drivers) { | ||
105 | list_del(l); | ||
106 | d = list_entry(l, struct pci_driver, node); | ||
107 | if (__pci_register_driver(d, d->driver.owner, | ||
108 | d->driver.mod_name)) | ||
109 | printk(KERN_ERR "%s: failed to register %s driver\n", | ||
110 | __FUNCTION__, d->driver.mod_name); | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int __init ide_scan_pci(void) | ||
117 | { | ||
118 | return ide_scan_pcibus(); | ||
119 | } | ||
120 | |||
121 | module_init(ide_scan_pci); | ||
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 3cbca3f4628a..d71a584f0765 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -1690,6 +1690,11 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) | |||
1690 | if (error) | 1690 | if (error) |
1691 | tape->failed_pc = NULL; | 1691 | tape->failed_pc = NULL; |
1692 | 1692 | ||
1693 | if (!blk_special_request(rq)) { | ||
1694 | ide_end_request(drive, uptodate, nr_sects); | ||
1695 | return 0; | ||
1696 | } | ||
1697 | |||
1693 | spin_lock_irqsave(&tape->spinlock, flags); | 1698 | spin_lock_irqsave(&tape->spinlock, flags); |
1694 | 1699 | ||
1695 | /* The request was a pipelined data transfer request */ | 1700 | /* The request was a pipelined data transfer request */ |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 2d63ea9ee61b..5eb6fa15dc4d 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -35,34 +35,6 @@ | |||
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | #include <asm/io.h> | 36 | #include <asm/io.h> |
37 | 37 | ||
38 | static void ata_bswap_data (void *buffer, int wcount) | ||
39 | { | ||
40 | u16 *p = buffer; | ||
41 | |||
42 | while (wcount--) { | ||
43 | *p = *p << 8 | *p >> 8; p++; | ||
44 | *p = *p << 8 | *p >> 8; p++; | ||
45 | } | ||
46 | } | ||
47 | |||
48 | static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount) | ||
49 | { | ||
50 | HWIF(drive)->ata_input_data(drive, buffer, wcount); | ||
51 | if (drive->bswap) | ||
52 | ata_bswap_data(buffer, wcount); | ||
53 | } | ||
54 | |||
55 | static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount) | ||
56 | { | ||
57 | if (drive->bswap) { | ||
58 | ata_bswap_data(buffer, wcount); | ||
59 | HWIF(drive)->ata_output_data(drive, buffer, wcount); | ||
60 | ata_bswap_data(buffer, wcount); | ||
61 | } else { | ||
62 | HWIF(drive)->ata_output_data(drive, buffer, wcount); | ||
63 | } | ||
64 | } | ||
65 | |||
66 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | 38 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) |
67 | { | 39 | { |
68 | ide_hwif_t *hwif = drive->hwif; | 40 | ide_hwif_t *hwif = drive->hwif; |
@@ -77,10 +49,13 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | |||
77 | "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", | 49 | "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", |
78 | drive->name, tf->feature, tf->nsect, tf->lbal, | 50 | drive->name, tf->feature, tf->nsect, tf->lbal, |
79 | tf->lbam, tf->lbah, tf->device, tf->command); | 51 | tf->lbam, tf->lbah, tf->device, tf->command); |
52 | printk("%s: hob: nsect 0x%02x lbal 0x%02x " | ||
53 | "lbam 0x%02x lbah 0x%02x\n", | ||
54 | drive->name, tf->hob_nsect, tf->hob_lbal, | ||
55 | tf->hob_lbam, tf->hob_lbah); | ||
80 | #endif | 56 | #endif |
81 | 57 | ||
82 | if (IDE_CONTROL_REG) | 58 | ide_set_irq(drive, 1); |
83 | hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* clear nIEN */ | ||
84 | 59 | ||
85 | if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0) | 60 | if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0) |
86 | SELECT_MASK(drive, 0); | 61 | SELECT_MASK(drive, 0); |
@@ -124,7 +99,7 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) | |||
124 | args.tf.command = WIN_IDENTIFY; | 99 | args.tf.command = WIN_IDENTIFY; |
125 | else | 100 | else |
126 | args.tf.command = WIN_PIDENTIFY; | 101 | args.tf.command = WIN_PIDENTIFY; |
127 | args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 102 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
128 | args.data_phase = TASKFILE_IN; | 103 | args.data_phase = TASKFILE_IN; |
129 | return ide_raw_taskfile(drive, &args, buf, 1); | 104 | return ide_raw_taskfile(drive, &args, buf, 1); |
130 | } | 105 | } |
@@ -285,7 +260,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) | |||
285 | return ide_stopped; | 260 | return ide_stopped; |
286 | } | 261 | } |
287 | 262 | ||
288 | static u8 wait_drive_not_busy(ide_drive_t *drive) | 263 | u8 wait_drive_not_busy(ide_drive_t *drive) |
289 | { | 264 | { |
290 | ide_hwif_t *hwif = HWIF(drive); | 265 | ide_hwif_t *hwif = HWIF(drive); |
291 | int retries; | 266 | int retries; |
@@ -293,8 +268,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive) | |||
293 | 268 | ||
294 | /* | 269 | /* |
295 | * Last sector was transfered, wait until drive is ready. | 270 | * Last sector was transfered, wait until drive is ready. |
296 | * This can take up to 10 usec, but we will wait max 1 ms | 271 | * This can take up to 10 usec, but we will wait max 1 ms. |
297 | * (drive_cmd_intr() waits that long). | ||
298 | */ | 272 | */ |
299 | for (retries = 0; retries < 100; retries++) { | 273 | for (retries = 0; retries < 100; retries++) { |
300 | if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) | 274 | if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) |
@@ -349,9 +323,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write) | |||
349 | 323 | ||
350 | /* do the actual data transfer */ | 324 | /* do the actual data transfer */ |
351 | if (write) | 325 | if (write) |
352 | taskfile_output_data(drive, buf, SECTOR_WORDS); | 326 | hwif->ata_output_data(drive, buf, SECTOR_WORDS); |
353 | else | 327 | else |
354 | taskfile_input_data(drive, buf, SECTOR_WORDS); | 328 | hwif->ata_input_data(drive, buf, SECTOR_WORDS); |
355 | 329 | ||
356 | kunmap_atomic(buf, KM_BIO_SRC_IRQ); | 330 | kunmap_atomic(buf, KM_BIO_SRC_IRQ); |
357 | #ifdef CONFIG_HIGHMEM | 331 | #ifdef CONFIG_HIGHMEM |
@@ -371,9 +345,18 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write) | |||
371 | static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, | 345 | static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, |
372 | unsigned int write) | 346 | unsigned int write) |
373 | { | 347 | { |
348 | u8 saved_io_32bit = drive->io_32bit; | ||
349 | |||
374 | if (rq->bio) /* fs request */ | 350 | if (rq->bio) /* fs request */ |
375 | rq->errors = 0; | 351 | rq->errors = 0; |
376 | 352 | ||
353 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | ||
354 | ide_task_t *task = rq->special; | ||
355 | |||
356 | if (task->tf_flags & IDE_TFLAG_IO_16BIT) | ||
357 | drive->io_32bit = 0; | ||
358 | } | ||
359 | |||
377 | touch_softlockup_watchdog(); | 360 | touch_softlockup_watchdog(); |
378 | 361 | ||
379 | switch (drive->hwif->data_phase) { | 362 | switch (drive->hwif->data_phase) { |
@@ -385,6 +368,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq, | |||
385 | ide_pio_sector(drive, write); | 368 | ide_pio_sector(drive, write); |
386 | break; | 369 | break; |
387 | } | 370 | } |
371 | |||
372 | drive->io_32bit = saved_io_32bit; | ||
388 | } | 373 | } |
389 | 374 | ||
390 | static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, | 375 | static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, |
@@ -422,27 +407,22 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, | |||
422 | return ide_error(drive, s, stat); | 407 | return ide_error(drive, s, stat); |
423 | } | 408 | } |
424 | 409 | ||
425 | static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) | 410 | void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) |
426 | { | 411 | { |
427 | HWIF(drive)->cursg = NULL; | ||
428 | |||
429 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { | 412 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { |
430 | ide_task_t *task = rq->special; | 413 | u8 err = drive->hwif->INB(IDE_ERROR_REG); |
431 | 414 | ||
432 | if (task->tf_flags & IDE_TFLAG_FLAGGED) { | 415 | ide_end_drive_cmd(drive, stat, err); |
433 | u8 err = drive->hwif->INB(IDE_ERROR_REG); | 416 | return; |
434 | ide_end_drive_cmd(drive, stat, err); | ||
435 | return; | ||
436 | } | ||
437 | } | 417 | } |
438 | 418 | ||
439 | if (rq->rq_disk) { | 419 | if (rq->rq_disk) { |
440 | ide_driver_t *drv; | 420 | ide_driver_t *drv; |
441 | 421 | ||
442 | drv = *(ide_driver_t **)rq->rq_disk->private_data;; | 422 | drv = *(ide_driver_t **)rq->rq_disk->private_data;; |
443 | drv->end_request(drive, 1, rq->hard_nr_sectors); | 423 | drv->end_request(drive, 1, rq->nr_sectors); |
444 | } else | 424 | } else |
445 | ide_end_request(drive, 1, rq->hard_nr_sectors); | 425 | ide_end_request(drive, 1, rq->nr_sectors); |
446 | } | 426 | } |
447 | 427 | ||
448 | /* | 428 | /* |
@@ -455,7 +435,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive) | |||
455 | u8 stat = hwif->INB(IDE_STATUS_REG); | 435 | u8 stat = hwif->INB(IDE_STATUS_REG); |
456 | 436 | ||
457 | /* new way for dealing with premature shared PCI interrupts */ | 437 | /* new way for dealing with premature shared PCI interrupts */ |
458 | if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) { | 438 | if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { |
459 | if (stat & (ERR_STAT | DRQ_STAT)) | 439 | if (stat & (ERR_STAT | DRQ_STAT)) |
460 | return task_error(drive, rq, __FUNCTION__, stat); | 440 | return task_error(drive, rq, __FUNCTION__, stat); |
461 | /* No data yet, so wait for another IRQ. */ | 441 | /* No data yet, so wait for another IRQ. */ |
@@ -468,7 +448,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive) | |||
468 | /* If it was the last datablock check status and finish transfer. */ | 448 | /* If it was the last datablock check status and finish transfer. */ |
469 | if (!hwif->nleft) { | 449 | if (!hwif->nleft) { |
470 | stat = wait_drive_not_busy(drive); | 450 | stat = wait_drive_not_busy(drive); |
471 | if (!OK_STAT(stat, 0, BAD_R_STAT)) | 451 | if (!OK_STAT(stat, 0, BAD_STAT)) |
472 | return task_error(drive, rq, __FUNCTION__, stat); | 452 | return task_error(drive, rq, __FUNCTION__, stat); |
473 | task_end_request(drive, rq, stat); | 453 | task_end_request(drive, rq, stat); |
474 | return ide_stopped; | 454 | return ide_stopped; |
@@ -512,7 +492,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq) | |||
512 | { | 492 | { |
513 | ide_startstop_t startstop; | 493 | ide_startstop_t startstop; |
514 | 494 | ||
515 | if (ide_wait_stat(&startstop, drive, DATA_READY, | 495 | if (ide_wait_stat(&startstop, drive, DRQ_STAT, |
516 | drive->bad_wstat, WAIT_DRQ)) { | 496 | drive->bad_wstat, WAIT_DRQ)) { |
517 | printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n", | 497 | printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n", |
518 | drive->name, | 498 | drive->name, |
@@ -580,7 +560,6 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
580 | unsigned int taskin = 0; | 560 | unsigned int taskin = 0; |
581 | unsigned int taskout = 0; | 561 | unsigned int taskout = 0; |
582 | u16 nsect = 0; | 562 | u16 nsect = 0; |
583 | u8 io_32bit = drive->io_32bit; | ||
584 | char __user *buf = (char __user *)arg; | 563 | char __user *buf = (char __user *)arg; |
585 | 564 | ||
586 | // printk("IDE Taskfile ...\n"); | 565 | // printk("IDE Taskfile ...\n"); |
@@ -633,9 +612,10 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
633 | 612 | ||
634 | args.data_phase = req_task->data_phase; | 613 | args.data_phase = req_task->data_phase; |
635 | 614 | ||
636 | args.tf_flags = IDE_TFLAG_OUT_DEVICE; | 615 | args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE | |
616 | IDE_TFLAG_IN_TF; | ||
637 | if (drive->addressing == 1) | 617 | if (drive->addressing == 1) |
638 | args.tf_flags |= IDE_TFLAG_LBA48; | 618 | args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB); |
639 | 619 | ||
640 | if (req_task->out_flags.all) { | 620 | if (req_task->out_flags.all) { |
641 | args.tf_flags |= IDE_TFLAG_FLAGGED; | 621 | args.tf_flags |= IDE_TFLAG_FLAGGED; |
@@ -671,7 +651,6 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
671 | if (req_task->in_flags.b.data) | 651 | if (req_task->in_flags.b.data) |
672 | args.tf_flags |= IDE_TFLAG_IN_DATA; | 652 | args.tf_flags |= IDE_TFLAG_IN_DATA; |
673 | 653 | ||
674 | drive->io_32bit = 0; | ||
675 | switch(req_task->data_phase) { | 654 | switch(req_task->data_phase) { |
676 | case TASKFILE_MULTI_OUT: | 655 | case TASKFILE_MULTI_OUT: |
677 | if (!drive->mult_count) { | 656 | if (!drive->mult_count) { |
@@ -767,41 +746,24 @@ abort: | |||
767 | 746 | ||
768 | // printk("IDE Taskfile ioctl ended. rc = %i\n", err); | 747 | // printk("IDE Taskfile ioctl ended. rc = %i\n", err); |
769 | 748 | ||
770 | drive->io_32bit = io_32bit; | ||
771 | |||
772 | return err; | 749 | return err; |
773 | } | 750 | } |
774 | #endif | 751 | #endif |
775 | 752 | ||
776 | int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf) | ||
777 | { | ||
778 | struct request rq; | ||
779 | u8 buffer[4]; | ||
780 | |||
781 | if (!buf) | ||
782 | buf = buffer; | ||
783 | memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors); | ||
784 | ide_init_drive_cmd(&rq); | ||
785 | rq.buffer = buf; | ||
786 | *buf++ = cmd; | ||
787 | *buf++ = nsect; | ||
788 | *buf++ = feature; | ||
789 | *buf++ = sectors; | ||
790 | return ide_do_drive_cmd(drive, &rq, ide_wait); | ||
791 | } | ||
792 | |||
793 | int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | 753 | int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) |
794 | { | 754 | { |
795 | int err = 0; | 755 | u8 *buf = NULL; |
796 | u8 args[4], *argbuf = args; | 756 | int bufsize = 0, err = 0; |
797 | u8 xfer_rate = 0; | 757 | u8 args[4], xfer_rate = 0; |
798 | int argsize = 4; | ||
799 | ide_task_t tfargs; | 758 | ide_task_t tfargs; |
800 | struct ide_taskfile *tf = &tfargs.tf; | 759 | struct ide_taskfile *tf = &tfargs.tf; |
801 | 760 | ||
802 | if (NULL == (void *) arg) { | 761 | if (NULL == (void *) arg) { |
803 | struct request rq; | 762 | struct request rq; |
763 | |||
804 | ide_init_drive_cmd(&rq); | 764 | ide_init_drive_cmd(&rq); |
765 | rq.cmd_type = REQ_TYPE_ATA_TASKFILE; | ||
766 | |||
805 | return ide_do_drive_cmd(drive, &rq, ide_wait); | 767 | return ide_do_drive_cmd(drive, &rq, ide_wait); |
806 | } | 768 | } |
807 | 769 | ||
@@ -810,23 +772,39 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
810 | 772 | ||
811 | memset(&tfargs, 0, sizeof(ide_task_t)); | 773 | memset(&tfargs, 0, sizeof(ide_task_t)); |
812 | tf->feature = args[2]; | 774 | tf->feature = args[2]; |
813 | tf->nsect = args[3]; | 775 | if (args[0] == WIN_SMART) { |
814 | tf->lbal = args[1]; | 776 | tf->nsect = args[3]; |
777 | tf->lbal = args[1]; | ||
778 | tf->lbam = 0x4f; | ||
779 | tf->lbah = 0xc2; | ||
780 | tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT; | ||
781 | } else { | ||
782 | tf->nsect = args[1]; | ||
783 | tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE | | ||
784 | IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT; | ||
785 | } | ||
815 | tf->command = args[0]; | 786 | tf->command = args[0]; |
787 | tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA; | ||
816 | 788 | ||
817 | if (args[3]) { | 789 | if (args[3]) { |
818 | argsize = 4 + (SECTOR_WORDS * 4 * args[3]); | 790 | tfargs.tf_flags |= IDE_TFLAG_IO_16BIT; |
819 | argbuf = kzalloc(argsize, GFP_KERNEL); | 791 | bufsize = SECTOR_WORDS * 4 * args[3]; |
820 | if (argbuf == NULL) | 792 | buf = kzalloc(bufsize, GFP_KERNEL); |
793 | if (buf == NULL) | ||
821 | return -ENOMEM; | 794 | return -ENOMEM; |
822 | } | 795 | } |
796 | |||
823 | if (set_transfer(drive, &tfargs)) { | 797 | if (set_transfer(drive, &tfargs)) { |
824 | xfer_rate = args[1]; | 798 | xfer_rate = args[1]; |
825 | if (ide_ata66_check(drive, &tfargs)) | 799 | if (ide_ata66_check(drive, &tfargs)) |
826 | goto abort; | 800 | goto abort; |
827 | } | 801 | } |
828 | 802 | ||
829 | err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf); | 803 | err = ide_raw_taskfile(drive, &tfargs, buf, args[3]); |
804 | |||
805 | args[0] = tf->status; | ||
806 | args[1] = tf->error; | ||
807 | args[2] = tf->nsect; | ||
830 | 808 | ||
831 | if (!err && xfer_rate) { | 809 | if (!err && xfer_rate) { |
832 | /* active-retuning-calls future */ | 810 | /* active-retuning-calls future */ |
@@ -834,10 +812,13 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
834 | ide_driveid_update(drive); | 812 | ide_driveid_update(drive); |
835 | } | 813 | } |
836 | abort: | 814 | abort: |
837 | if (copy_to_user((void __user *)arg, argbuf, argsize)) | 815 | if (copy_to_user((void __user *)arg, &args, 4)) |
838 | err = -EFAULT; | 816 | err = -EFAULT; |
839 | if (argsize > 4) | 817 | if (buf) { |
840 | kfree(argbuf); | 818 | if (copy_to_user((void __user *)(arg + 4), buf, bufsize)) |
819 | err = -EFAULT; | ||
820 | kfree(buf); | ||
821 | } | ||
841 | return err; | 822 | return err; |
842 | } | 823 | } |
843 | 824 | ||
@@ -854,7 +835,7 @@ int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) | |||
854 | memset(&task, 0, sizeof(task)); | 835 | memset(&task, 0, sizeof(task)); |
855 | memcpy(&task.tf_array[7], &args[1], 6); | 836 | memcpy(&task.tf_array[7], &args[1], 6); |
856 | task.tf.command = args[0]; | 837 | task.tf.command = args[0]; |
857 | task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; | 838 | task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
858 | 839 | ||
859 | err = ide_no_data_taskfile(drive, &task); | 840 | err = ide_no_data_taskfile(drive, &task); |
860 | 841 | ||
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index c6d4f630e18a..97894abd9ebc 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -95,7 +95,7 @@ DEFINE_MUTEX(ide_cfg_mtx); | |||
95 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); | 95 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); |
96 | 96 | ||
97 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER | 97 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER |
98 | static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */ | 98 | int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */ |
99 | #endif | 99 | #endif |
100 | 100 | ||
101 | int noautodma = 0; | 101 | int noautodma = 0; |
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(ide_hwifs); | |||
116 | /* | 116 | /* |
117 | * Do not even *think* about calling this! | 117 | * Do not even *think* about calling this! |
118 | */ | 118 | */ |
119 | static void init_hwif_data(ide_hwif_t *hwif, unsigned int index) | 119 | void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) |
120 | { | 120 | { |
121 | unsigned int unit; | 121 | unsigned int unit; |
122 | 122 | ||
@@ -159,6 +159,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index) | |||
159 | init_completion(&drive->gendev_rel_comp); | 159 | init_completion(&drive->gendev_rel_comp); |
160 | } | 160 | } |
161 | } | 161 | } |
162 | EXPORT_SYMBOL_GPL(ide_init_port_data); | ||
162 | 163 | ||
163 | static void init_hwif_default(ide_hwif_t *hwif, unsigned int index) | 164 | static void init_hwif_default(ide_hwif_t *hwif, unsigned int index) |
164 | { | 165 | { |
@@ -177,8 +178,6 @@ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index) | |||
177 | #endif | 178 | #endif |
178 | } | 179 | } |
179 | 180 | ||
180 | extern void ide_arm_init(void); | ||
181 | |||
182 | /* | 181 | /* |
183 | * init_ide_data() sets reasonable default values into all fields | 182 | * init_ide_data() sets reasonable default values into all fields |
184 | * of all instances of the hwifs and drives, but only on the first call. | 183 | * of all instances of the hwifs and drives, but only on the first call. |
@@ -210,16 +209,13 @@ static void __init init_ide_data (void) | |||
210 | /* Initialise all interface structures */ | 209 | /* Initialise all interface structures */ |
211 | for (index = 0; index < MAX_HWIFS; ++index) { | 210 | for (index = 0; index < MAX_HWIFS; ++index) { |
212 | hwif = &ide_hwifs[index]; | 211 | hwif = &ide_hwifs[index]; |
213 | init_hwif_data(hwif, index); | 212 | ide_init_port_data(hwif, index); |
214 | init_hwif_default(hwif, index); | 213 | init_hwif_default(hwif, index); |
215 | #if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI) | 214 | #if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI) |
216 | hwif->irq = | 215 | hwif->irq = |
217 | ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]); | 216 | ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]); |
218 | #endif | 217 | #endif |
219 | } | 218 | } |
220 | #ifdef CONFIG_IDE_ARM | ||
221 | ide_arm_init(); | ||
222 | #endif | ||
223 | } | 219 | } |
224 | 220 | ||
225 | /** | 221 | /** |
@@ -414,8 +410,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) | |||
414 | hwif->cds = tmp_hwif->cds; | 410 | hwif->cds = tmp_hwif->cds; |
415 | #endif | 411 | #endif |
416 | 412 | ||
417 | hwif->fixup = tmp_hwif->fixup; | ||
418 | |||
419 | hwif->set_pio_mode = tmp_hwif->set_pio_mode; | 413 | hwif->set_pio_mode = tmp_hwif->set_pio_mode; |
420 | hwif->set_dma_mode = tmp_hwif->set_dma_mode; | 414 | hwif->set_dma_mode = tmp_hwif->set_dma_mode; |
421 | hwif->mdma_filter = tmp_hwif->mdma_filter; | 415 | hwif->mdma_filter = tmp_hwif->mdma_filter; |
@@ -433,16 +427,13 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) | |||
433 | hwif->atapi_input_bytes = tmp_hwif->atapi_input_bytes; | 427 | hwif->atapi_input_bytes = tmp_hwif->atapi_input_bytes; |
434 | hwif->atapi_output_bytes = tmp_hwif->atapi_output_bytes; | 428 | hwif->atapi_output_bytes = tmp_hwif->atapi_output_bytes; |
435 | 429 | ||
430 | hwif->dma_host_set = tmp_hwif->dma_host_set; | ||
436 | hwif->dma_setup = tmp_hwif->dma_setup; | 431 | hwif->dma_setup = tmp_hwif->dma_setup; |
437 | hwif->dma_exec_cmd = tmp_hwif->dma_exec_cmd; | 432 | hwif->dma_exec_cmd = tmp_hwif->dma_exec_cmd; |
438 | hwif->dma_start = tmp_hwif->dma_start; | 433 | hwif->dma_start = tmp_hwif->dma_start; |
439 | hwif->ide_dma_end = tmp_hwif->ide_dma_end; | 434 | hwif->ide_dma_end = tmp_hwif->ide_dma_end; |
440 | hwif->ide_dma_on = tmp_hwif->ide_dma_on; | ||
441 | hwif->dma_off_quietly = tmp_hwif->dma_off_quietly; | ||
442 | hwif->ide_dma_test_irq = tmp_hwif->ide_dma_test_irq; | 435 | hwif->ide_dma_test_irq = tmp_hwif->ide_dma_test_irq; |
443 | hwif->ide_dma_clear_irq = tmp_hwif->ide_dma_clear_irq; | 436 | hwif->ide_dma_clear_irq = tmp_hwif->ide_dma_clear_irq; |
444 | hwif->dma_host_on = tmp_hwif->dma_host_on; | ||
445 | hwif->dma_host_off = tmp_hwif->dma_host_off; | ||
446 | hwif->dma_lost_irq = tmp_hwif->dma_lost_irq; | 437 | hwif->dma_lost_irq = tmp_hwif->dma_lost_irq; |
447 | hwif->dma_timeout = tmp_hwif->dma_timeout; | 438 | hwif->dma_timeout = tmp_hwif->dma_timeout; |
448 | 439 | ||
@@ -614,7 +605,7 @@ void ide_unregister(unsigned int index) | |||
614 | tmp_hwif = *hwif; | 605 | tmp_hwif = *hwif; |
615 | 606 | ||
616 | /* restore hwif data to pristine status */ | 607 | /* restore hwif data to pristine status */ |
617 | init_hwif_data(hwif, index); | 608 | ide_init_port_data(hwif, index); |
618 | init_hwif_default(hwif, index); | 609 | init_hwif_default(hwif, index); |
619 | 610 | ||
620 | ide_hwif_restore(hwif, &tmp_hwif); | 611 | ide_hwif_restore(hwif, &tmp_hwif); |
@@ -680,24 +671,34 @@ void ide_setup_ports ( hw_regs_t *hw, | |||
680 | */ | 671 | */ |
681 | } | 672 | } |
682 | 673 | ||
674 | void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) | ||
675 | { | ||
676 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); | ||
677 | hwif->irq = hw->irq; | ||
678 | hwif->noprobe = 0; | ||
679 | hwif->chipset = hw->chipset; | ||
680 | hwif->gendev.parent = hw->dev; | ||
681 | hwif->ack_intr = hw->ack_intr; | ||
682 | } | ||
683 | EXPORT_SYMBOL_GPL(ide_init_port_hw); | ||
684 | |||
683 | /** | 685 | /** |
684 | * ide_register_hw - register IDE interface | 686 | * ide_register_hw - register IDE interface |
685 | * @hw: hardware registers | 687 | * @hw: hardware registers |
686 | * @fixup: fixup function | 688 | * @quirkproc: quirkproc function |
687 | * @initializing: set while initializing built-in drivers | ||
688 | * @hwifp: pointer to returned hwif | 689 | * @hwifp: pointer to returned hwif |
689 | * | 690 | * |
690 | * Register an IDE interface, specifying exactly the registers etc. | 691 | * Register an IDE interface, specifying exactly the registers etc. |
691 | * Set init=1 iff calling before probes have taken place. | ||
692 | * | 692 | * |
693 | * Returns -1 on error. | 693 | * Returns -1 on error. |
694 | */ | 694 | */ |
695 | 695 | ||
696 | int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *), | 696 | int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *), |
697 | int initializing, ide_hwif_t **hwifp) | 697 | ide_hwif_t **hwifp) |
698 | { | 698 | { |
699 | int index, retry = 1; | 699 | int index, retry = 1; |
700 | ide_hwif_t *hwif; | 700 | ide_hwif_t *hwif; |
701 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
701 | 702 | ||
702 | do { | 703 | do { |
703 | for (index = 0; index < MAX_HWIFS; ++index) { | 704 | for (index = 0; index < MAX_HWIFS; ++index) { |
@@ -709,8 +710,7 @@ int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *), | |||
709 | hwif = &ide_hwifs[index]; | 710 | hwif = &ide_hwifs[index]; |
710 | if (hwif->hold) | 711 | if (hwif->hold) |
711 | continue; | 712 | continue; |
712 | if ((!hwif->present && !hwif->mate && !initializing) || | 713 | if (!hwif->present && hwif->mate == NULL) |
713 | (!hwif->io_ports[IDE_DATA_OFFSET] && initializing)) | ||
714 | goto found; | 714 | goto found; |
715 | } | 715 | } |
716 | for (index = 0; index < MAX_HWIFS; index++) | 716 | for (index = 0; index < MAX_HWIFS; index++) |
@@ -721,29 +721,23 @@ found: | |||
721 | if (hwif->present) | 721 | if (hwif->present) |
722 | ide_unregister(index); | 722 | ide_unregister(index); |
723 | else if (!hwif->hold) { | 723 | else if (!hwif->hold) { |
724 | init_hwif_data(hwif, index); | 724 | ide_init_port_data(hwif, index); |
725 | init_hwif_default(hwif, index); | 725 | init_hwif_default(hwif, index); |
726 | } | 726 | } |
727 | if (hwif->present) | 727 | if (hwif->present) |
728 | return -1; | 728 | return -1; |
729 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); | ||
730 | hwif->irq = hw->irq; | ||
731 | hwif->noprobe = 0; | ||
732 | hwif->fixup = fixup; | ||
733 | hwif->chipset = hw->chipset; | ||
734 | hwif->gendev.parent = hw->dev; | ||
735 | hwif->ack_intr = hw->ack_intr; | ||
736 | 729 | ||
737 | if (initializing == 0) { | 730 | ide_init_port_hw(hwif, hw); |
738 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | 731 | hwif->quirkproc = quirkproc; |
739 | 732 | ||
740 | ide_device_add(idx); | 733 | idx[0] = index; |
741 | } | 734 | |
735 | ide_device_add(idx); | ||
742 | 736 | ||
743 | if (hwifp) | 737 | if (hwifp) |
744 | *hwifp = hwif; | 738 | *hwifp = hwif; |
745 | 739 | ||
746 | return (initializing || hwif->present) ? index : -1; | 740 | return hwif->present ? index : -1; |
747 | } | 741 | } |
748 | 742 | ||
749 | EXPORT_SYMBOL(ide_register_hw); | 743 | EXPORT_SYMBOL(ide_register_hw); |
@@ -836,7 +830,7 @@ int set_using_dma(ide_drive_t *drive, int arg) | |||
836 | if (!drive->id || !(drive->id->capability & 1)) | 830 | if (!drive->id || !(drive->id->capability & 1)) |
837 | goto out; | 831 | goto out; |
838 | 832 | ||
839 | if (hwif->ide_dma_on == NULL) | 833 | if (hwif->dma_host_set == NULL) |
840 | goto out; | 834 | goto out; |
841 | 835 | ||
842 | err = -EBUSY; | 836 | err = -EBUSY; |
@@ -884,7 +878,10 @@ int set_pio_mode(ide_drive_t *drive, int arg) | |||
884 | 878 | ||
885 | if (drive->special.b.set_tune) | 879 | if (drive->special.b.set_tune) |
886 | return -EBUSY; | 880 | return -EBUSY; |
881 | |||
887 | ide_init_drive_cmd(&rq); | 882 | ide_init_drive_cmd(&rq); |
883 | rq.cmd_type = REQ_TYPE_ATA_TASKFILE; | ||
884 | |||
888 | drive->tune_req = (u8) arg; | 885 | drive->tune_req = (u8) arg; |
889 | drive->special.b.set_tune = 1; | 886 | drive->special.b.set_tune = 1; |
890 | (void) ide_do_drive_cmd(drive, &rq, ide_wait); | 887 | (void) ide_do_drive_cmd(drive, &rq, ide_wait); |
@@ -1066,7 +1063,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1066 | ide_init_hwif_ports(&hw, (unsigned long) args[0], | 1063 | ide_init_hwif_ports(&hw, (unsigned long) args[0], |
1067 | (unsigned long) args[1], NULL); | 1064 | (unsigned long) args[1], NULL); |
1068 | hw.irq = args[2]; | 1065 | hw.irq = args[2]; |
1069 | if (ide_register_hw(&hw, NULL, 0, NULL) == -1) | 1066 | if (ide_register_hw(&hw, NULL, NULL) == -1) |
1070 | return -EIO; | 1067 | return -EIO; |
1071 | return 0; | 1068 | return 0; |
1072 | } | 1069 | } |
@@ -1227,26 +1224,12 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m | |||
1227 | return 0; /* zero = nothing matched */ | 1224 | return 0; /* zero = nothing matched */ |
1228 | } | 1225 | } |
1229 | 1226 | ||
1230 | #ifdef CONFIG_BLK_DEV_ALI14XX | ||
1231 | extern int probe_ali14xx; | 1227 | extern int probe_ali14xx; |
1232 | extern int ali14xx_init(void); | ||
1233 | #endif | ||
1234 | #ifdef CONFIG_BLK_DEV_UMC8672 | ||
1235 | extern int probe_umc8672; | 1228 | extern int probe_umc8672; |
1236 | extern int umc8672_init(void); | ||
1237 | #endif | ||
1238 | #ifdef CONFIG_BLK_DEV_DTC2278 | ||
1239 | extern int probe_dtc2278; | 1229 | extern int probe_dtc2278; |
1240 | extern int dtc2278_init(void); | ||
1241 | #endif | ||
1242 | #ifdef CONFIG_BLK_DEV_HT6560B | ||
1243 | extern int probe_ht6560b; | 1230 | extern int probe_ht6560b; |
1244 | extern int ht6560b_init(void); | ||
1245 | #endif | ||
1246 | #ifdef CONFIG_BLK_DEV_QD65XX | ||
1247 | extern int probe_qd65xx; | 1231 | extern int probe_qd65xx; |
1248 | extern int qd65xx_init(void); | 1232 | extern int cmd640_vlb; |
1249 | #endif | ||
1250 | 1233 | ||
1251 | static int __initdata is_chipset_set[MAX_HWIFS]; | 1234 | static int __initdata is_chipset_set[MAX_HWIFS]; |
1252 | 1235 | ||
@@ -1323,7 +1306,7 @@ static int __init ide_setup(char *s) | |||
1323 | if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { | 1306 | if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { |
1324 | const char *hd_words[] = { | 1307 | const char *hd_words[] = { |
1325 | "none", "noprobe", "nowerr", "cdrom", "nodma", | 1308 | "none", "noprobe", "nowerr", "cdrom", "nodma", |
1326 | "autotune", "noautotune", "minus8", "swapdata", "bswap", | 1309 | "autotune", "noautotune", "-8", "-9", "-10", |
1327 | "noflush", "remap", "remap63", "scsi", NULL }; | 1310 | "noflush", "remap", "remap63", "scsi", NULL }; |
1328 | unit = s[2] - 'a'; | 1311 | unit = s[2] - 'a'; |
1329 | hw = unit / MAX_DRIVES; | 1312 | hw = unit / MAX_DRIVES; |
@@ -1359,10 +1342,6 @@ static int __init ide_setup(char *s) | |||
1359 | case -7: /* "noautotune" */ | 1342 | case -7: /* "noautotune" */ |
1360 | drive->autotune = IDE_TUNE_NOAUTO; | 1343 | drive->autotune = IDE_TUNE_NOAUTO; |
1361 | goto obsolete_option; | 1344 | goto obsolete_option; |
1362 | case -9: /* "swapdata" */ | ||
1363 | case -10: /* "bswap" */ | ||
1364 | drive->bswap = 1; | ||
1365 | goto done; | ||
1366 | case -11: /* noflush */ | 1345 | case -11: /* noflush */ |
1367 | drive->noflush = 1; | 1346 | drive->noflush = 1; |
1368 | goto done; | 1347 | goto done; |
@@ -1462,11 +1441,8 @@ static int __init ide_setup(char *s) | |||
1462 | #endif | 1441 | #endif |
1463 | #ifdef CONFIG_BLK_DEV_CMD640 | 1442 | #ifdef CONFIG_BLK_DEV_CMD640 |
1464 | case -14: /* "cmd640_vlb" */ | 1443 | case -14: /* "cmd640_vlb" */ |
1465 | { | ||
1466 | extern int cmd640_vlb; /* flag for cmd640.c */ | ||
1467 | cmd640_vlb = 1; | 1444 | cmd640_vlb = 1; |
1468 | goto done; | 1445 | goto done; |
1469 | } | ||
1470 | #endif | 1446 | #endif |
1471 | #ifdef CONFIG_BLK_DEV_HT6560B | 1447 | #ifdef CONFIG_BLK_DEV_HT6560B |
1472 | case -13: /* "ht6560b" */ | 1448 | case -13: /* "ht6560b" */ |
@@ -1556,79 +1532,6 @@ done: | |||
1556 | return 1; | 1532 | return 1; |
1557 | } | 1533 | } |
1558 | 1534 | ||
1559 | extern void __init pnpide_init(void); | ||
1560 | extern void __exit pnpide_exit(void); | ||
1561 | extern void __init h8300_ide_init(void); | ||
1562 | |||
1563 | /* | ||
1564 | * probe_for_hwifs() finds/initializes "known" IDE interfaces | ||
1565 | */ | ||
1566 | static void __init probe_for_hwifs (void) | ||
1567 | { | ||
1568 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER | ||
1569 | ide_scan_pcibus(ide_scan_direction); | ||
1570 | #endif | ||
1571 | |||
1572 | #ifdef CONFIG_ETRAX_IDE | ||
1573 | { | ||
1574 | extern void init_e100_ide(void); | ||
1575 | init_e100_ide(); | ||
1576 | } | ||
1577 | #endif /* CONFIG_ETRAX_IDE */ | ||
1578 | #ifdef CONFIG_BLK_DEV_CMD640 | ||
1579 | { | ||
1580 | extern void ide_probe_for_cmd640x(void); | ||
1581 | ide_probe_for_cmd640x(); | ||
1582 | } | ||
1583 | #endif /* CONFIG_BLK_DEV_CMD640 */ | ||
1584 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
1585 | { | ||
1586 | extern int pmac_ide_probe(void); | ||
1587 | (void)pmac_ide_probe(); | ||
1588 | } | ||
1589 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
1590 | #ifdef CONFIG_BLK_DEV_GAYLE | ||
1591 | { | ||
1592 | extern void gayle_init(void); | ||
1593 | gayle_init(); | ||
1594 | } | ||
1595 | #endif /* CONFIG_BLK_DEV_GAYLE */ | ||
1596 | #ifdef CONFIG_BLK_DEV_FALCON_IDE | ||
1597 | { | ||
1598 | extern void falconide_init(void); | ||
1599 | falconide_init(); | ||
1600 | } | ||
1601 | #endif /* CONFIG_BLK_DEV_FALCON_IDE */ | ||
1602 | #ifdef CONFIG_BLK_DEV_MAC_IDE | ||
1603 | { | ||
1604 | extern void macide_init(void); | ||
1605 | macide_init(); | ||
1606 | } | ||
1607 | #endif /* CONFIG_BLK_DEV_MAC_IDE */ | ||
1608 | #ifdef CONFIG_BLK_DEV_Q40IDE | ||
1609 | { | ||
1610 | extern void q40ide_init(void); | ||
1611 | q40ide_init(); | ||
1612 | } | ||
1613 | #endif /* CONFIG_BLK_DEV_Q40IDE */ | ||
1614 | #ifdef CONFIG_BLK_DEV_BUDDHA | ||
1615 | { | ||
1616 | extern void buddha_init(void); | ||
1617 | buddha_init(); | ||
1618 | } | ||
1619 | #endif /* CONFIG_BLK_DEV_BUDDHA */ | ||
1620 | #ifdef CONFIG_BLK_DEV_IDEPNP | ||
1621 | pnpide_init(); | ||
1622 | #endif | ||
1623 | #ifdef CONFIG_H8300 | ||
1624 | h8300_ide_init(); | ||
1625 | #endif | ||
1626 | } | ||
1627 | |||
1628 | /* | ||
1629 | * Probe module | ||
1630 | */ | ||
1631 | |||
1632 | EXPORT_SYMBOL(ide_lock); | 1535 | EXPORT_SYMBOL(ide_lock); |
1633 | 1536 | ||
1634 | static int ide_bus_match(struct device *dev, struct device_driver *drv) | 1537 | static int ide_bus_match(struct device *dev, struct device_driver *drv) |
@@ -1775,30 +1678,6 @@ static int __init ide_init(void) | |||
1775 | 1678 | ||
1776 | proc_ide_create(); | 1679 | proc_ide_create(); |
1777 | 1680 | ||
1778 | #ifdef CONFIG_BLK_DEV_ALI14XX | ||
1779 | if (probe_ali14xx) | ||
1780 | (void)ali14xx_init(); | ||
1781 | #endif | ||
1782 | #ifdef CONFIG_BLK_DEV_UMC8672 | ||
1783 | if (probe_umc8672) | ||
1784 | (void)umc8672_init(); | ||
1785 | #endif | ||
1786 | #ifdef CONFIG_BLK_DEV_DTC2278 | ||
1787 | if (probe_dtc2278) | ||
1788 | (void)dtc2278_init(); | ||
1789 | #endif | ||
1790 | #ifdef CONFIG_BLK_DEV_HT6560B | ||
1791 | if (probe_ht6560b) | ||
1792 | (void)ht6560b_init(); | ||
1793 | #endif | ||
1794 | #ifdef CONFIG_BLK_DEV_QD65XX | ||
1795 | if (probe_qd65xx) | ||
1796 | (void)qd65xx_init(); | ||
1797 | #endif | ||
1798 | |||
1799 | /* Probe for special PCI and other "known" interface chipsets. */ | ||
1800 | probe_for_hwifs(); | ||
1801 | |||
1802 | return 0; | 1681 | return 0; |
1803 | } | 1682 | } |
1804 | 1683 | ||
@@ -1834,10 +1713,6 @@ void __exit cleanup_module (void) | |||
1834 | for (index = 0; index < MAX_HWIFS; ++index) | 1713 | for (index = 0; index < MAX_HWIFS; ++index) |
1835 | ide_unregister(index); | 1714 | ide_unregister(index); |
1836 | 1715 | ||
1837 | #ifdef CONFIG_BLK_DEV_IDEPNP | ||
1838 | pnpide_exit(); | ||
1839 | #endif | ||
1840 | |||
1841 | proc_ide_destroy(); | 1716 | proc_ide_destroy(); |
1842 | 1717 | ||
1843 | bus_unregister(&ide_bus_type); | 1718 | bus_unregister(&ide_bus_type); |
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile index 409822349f10..7043ec7d1e05 100644 --- a/drivers/ide/legacy/Makefile +++ b/drivers/ide/legacy/Makefile | |||
@@ -1,15 +1,24 @@ | |||
1 | 1 | ||
2 | # link order is important here | ||
3 | |||
2 | obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o | 4 | obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o |
5 | obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o | ||
3 | obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o | 6 | obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o |
4 | obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o | 7 | obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o |
5 | obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o | 8 | obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o |
6 | obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o | ||
7 | 9 | ||
8 | obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o | 10 | obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o |
11 | obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o | ||
12 | obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o | ||
13 | obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o | ||
14 | obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o | ||
9 | 15 | ||
10 | obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o | 16 | ifeq ($(CONFIG_BLK_DEV_IDECS), m) |
17 | obj-m += ide-cs.o | ||
18 | endif | ||
11 | 19 | ||
12 | # Last of all | 20 | ifeq ($(CONFIG_BLK_DEV_PLATFORM), m) |
13 | obj-$(CONFIG_BLK_DEV_HD) += hd.o | 21 | obj-m += ide_platform.o |
22 | endif | ||
14 | 23 | ||
15 | EXTRA_CFLAGS := -Idrivers/ide | 24 | EXTRA_CFLAGS := -Idrivers/ide |
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c index 38c3a6d63f30..5ec0be4cbad7 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/legacy/ali14xx.c | |||
@@ -231,8 +231,7 @@ int probe_ali14xx = 0; | |||
231 | module_param_named(probe, probe_ali14xx, bool, 0); | 231 | module_param_named(probe, probe_ali14xx, bool, 0); |
232 | MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); | 232 | MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); |
233 | 233 | ||
234 | /* Can be called directly from ide.c. */ | 234 | static int __init ali14xx_init(void) |
235 | int __init ali14xx_init(void) | ||
236 | { | 235 | { |
237 | if (probe_ali14xx == 0) | 236 | if (probe_ali14xx == 0) |
238 | goto out; | 237 | goto out; |
@@ -248,9 +247,7 @@ out: | |||
248 | return -ENODEV; | 247 | return -ENODEV; |
249 | } | 248 | } |
250 | 249 | ||
251 | #ifdef MODULE | ||
252 | module_init(ali14xx_init); | 250 | module_init(ali14xx_init); |
253 | #endif | ||
254 | 251 | ||
255 | MODULE_AUTHOR("see local file"); | 252 | MODULE_AUTHOR("see local file"); |
256 | MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets"); | 253 | MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets"); |
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index 4a0be251a05f..74d28e058f55 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c | |||
@@ -112,6 +112,7 @@ typedef enum BuddhaType_Enum { | |||
112 | BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF | 112 | BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF |
113 | } BuddhaType; | 113 | } BuddhaType; |
114 | 114 | ||
115 | static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" }; | ||
115 | 116 | ||
116 | /* | 117 | /* |
117 | * Check and acknowledge the interrupt status | 118 | * Check and acknowledge the interrupt status |
@@ -143,11 +144,11 @@ static int xsurf_ack_intr(ide_hwif_t *hwif) | |||
143 | * Probe for a Buddha or Catweasel IDE interface | 144 | * Probe for a Buddha or Catweasel IDE interface |
144 | */ | 145 | */ |
145 | 146 | ||
146 | void __init buddha_init(void) | 147 | static int __init buddha_init(void) |
147 | { | 148 | { |
148 | hw_regs_t hw; | 149 | hw_regs_t hw; |
149 | ide_hwif_t *hwif; | 150 | ide_hwif_t *hwif; |
150 | int i, index; | 151 | int i; |
151 | 152 | ||
152 | struct zorro_dev *z = NULL; | 153 | struct zorro_dev *z = NULL; |
153 | u_long buddha_board = 0; | 154 | u_long buddha_board = 0; |
@@ -156,6 +157,8 @@ void __init buddha_init(void) | |||
156 | 157 | ||
157 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { | 158 | while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { |
158 | unsigned long board; | 159 | unsigned long board; |
160 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
161 | |||
159 | if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { | 162 | if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { |
160 | buddha_num_hwifs = BUDDHA_NUM_HWIFS; | 163 | buddha_num_hwifs = BUDDHA_NUM_HWIFS; |
161 | type=BOARD_BUDDHA; | 164 | type=BOARD_BUDDHA; |
@@ -195,7 +198,10 @@ fail_base2: | |||
195 | /* X-Surf doesn't have this. IRQs are always on */ | 198 | /* X-Surf doesn't have this. IRQs are always on */ |
196 | if (type != BOARD_XSURF) | 199 | if (type != BOARD_XSURF) |
197 | z_writeb(0, buddha_board+BUDDHA_IRQ_MR); | 200 | z_writeb(0, buddha_board+BUDDHA_IRQ_MR); |
198 | 201 | ||
202 | printk(KERN_INFO "ide: %s IDE controller\n", | ||
203 | buddha_board_name[type]); | ||
204 | |||
199 | for(i=0;i<buddha_num_hwifs;i++) { | 205 | for(i=0;i<buddha_num_hwifs;i++) { |
200 | if(type != BOARD_XSURF) { | 206 | if(type != BOARD_XSURF) { |
201 | ide_setup_ports(&hw, (buddha_board+buddha_bases[i]), | 207 | ide_setup_ports(&hw, (buddha_board+buddha_bases[i]), |
@@ -213,23 +219,23 @@ fail_base2: | |||
213 | IRQ_AMIGA_PORTS); | 219 | IRQ_AMIGA_PORTS); |
214 | } | 220 | } |
215 | 221 | ||
216 | index = ide_register_hw(&hw, NULL, 1, &hwif); | 222 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
217 | if (index != -1) { | 223 | if (hwif) { |
224 | u8 index = hwif->index; | ||
225 | |||
226 | ide_init_port_data(hwif, index); | ||
227 | ide_init_port_hw(hwif, &hw); | ||
228 | |||
218 | hwif->mmio = 1; | 229 | hwif->mmio = 1; |
219 | printk("ide%d: ", index); | 230 | |
220 | switch(type) { | 231 | idx[i] = index; |
221 | case BOARD_BUDDHA: | 232 | } |
222 | printk("Buddha"); | ||
223 | break; | ||
224 | case BOARD_CATWEASEL: | ||
225 | printk("Catweasel"); | ||
226 | break; | ||
227 | case BOARD_XSURF: | ||
228 | printk("X-Surf"); | ||
229 | break; | ||
230 | } | ||
231 | printk(" IDE interface\n"); | ||
232 | } | ||
233 | } | 233 | } |
234 | |||
235 | ide_device_add(idx); | ||
234 | } | 236 | } |
237 | |||
238 | return 0; | ||
235 | } | 239 | } |
240 | |||
241 | module_init(buddha_init); | ||
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c index 24a845d45bd2..13eee6da2806 100644 --- a/drivers/ide/legacy/dtc2278.c +++ b/drivers/ide/legacy/dtc2278.c | |||
@@ -150,8 +150,7 @@ int probe_dtc2278 = 0; | |||
150 | module_param_named(probe, probe_dtc2278, bool, 0); | 150 | module_param_named(probe, probe_dtc2278, bool, 0); |
151 | MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); | 151 | MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); |
152 | 152 | ||
153 | /* Can be called directly from ide.c. */ | 153 | static int __init dtc2278_init(void) |
154 | int __init dtc2278_init(void) | ||
155 | { | 154 | { |
156 | if (probe_dtc2278 == 0) | 155 | if (probe_dtc2278 == 0) |
157 | return -ENODEV; | 156 | return -ENODEV; |
@@ -163,9 +162,7 @@ int __init dtc2278_init(void) | |||
163 | return 0; | 162 | return 0; |
164 | } | 163 | } |
165 | 164 | ||
166 | #ifdef MODULE | ||
167 | module_init(dtc2278_init); | 165 | module_init(dtc2278_init); |
168 | #endif | ||
169 | 166 | ||
170 | MODULE_AUTHOR("See Local File"); | 167 | MODULE_AUTHOR("See Local File"); |
171 | MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets"); | 168 | MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets"); |
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index 7d7936f1b900..2860956bdcb0 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c | |||
@@ -62,19 +62,31 @@ EXPORT_SYMBOL(falconide_intr_lock); | |||
62 | * Probe for a Falcon IDE interface | 62 | * Probe for a Falcon IDE interface |
63 | */ | 63 | */ |
64 | 64 | ||
65 | void __init falconide_init(void) | 65 | static int __init falconide_init(void) |
66 | { | 66 | { |
67 | if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) { | 67 | if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) { |
68 | hw_regs_t hw; | 68 | hw_regs_t hw; |
69 | int index; | 69 | |
70 | printk(KERN_INFO "ide: Falcon IDE controller\n"); | ||
70 | 71 | ||
71 | ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets, | 72 | ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets, |
72 | 0, 0, NULL, | 73 | 0, 0, NULL, |
73 | // falconide_iops, | 74 | // falconide_iops, |
74 | IRQ_MFP_IDE); | 75 | IRQ_MFP_IDE); |
75 | index = ide_register_hw(&hw, NULL, 1, NULL); | ||
76 | 76 | ||
77 | if (index != -1) | 77 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
78 | printk("ide%d: Falcon IDE interface\n", index); | 78 | if (hwif) { |
79 | u8 index = hwif->index; | ||
80 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | ||
81 | |||
82 | ide_init_port_data(hwif, index); | ||
83 | ide_init_port_hw(hwif, &hw); | ||
84 | |||
85 | ide_device_add(idx); | ||
86 | } | ||
79 | } | 87 | } |
88 | |||
89 | return 0; | ||
80 | } | 90 | } |
91 | |||
92 | module_init(falconide_init); | ||
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index 53331ee1e957..492fa047efc0 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c | |||
@@ -110,12 +110,13 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif) | |||
110 | * Probe for a Gayle IDE interface (and optionally for an IDE doubler) | 110 | * Probe for a Gayle IDE interface (and optionally for an IDE doubler) |
111 | */ | 111 | */ |
112 | 112 | ||
113 | void __init gayle_init(void) | 113 | static int __init gayle_init(void) |
114 | { | 114 | { |
115 | int a4000, i; | 115 | int a4000, i; |
116 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
116 | 117 | ||
117 | if (!MACH_IS_AMIGA) | 118 | if (!MACH_IS_AMIGA) |
118 | return; | 119 | return -ENODEV; |
119 | 120 | ||
120 | if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) | 121 | if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) |
121 | goto found; | 122 | goto found; |
@@ -125,15 +126,21 @@ void __init gayle_init(void) | |||
125 | NULL)) | 126 | NULL)) |
126 | goto found; | 127 | goto found; |
127 | #endif | 128 | #endif |
128 | return; | 129 | return -ENODEV; |
129 | 130 | ||
130 | found: | 131 | found: |
132 | printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", | ||
133 | a4000 ? 4000 : 1200, | ||
134 | #ifdef CONFIG_BLK_DEV_IDEDOUBLER | ||
135 | ide_doubler ? ", IDE doubler" : | ||
136 | #endif | ||
137 | ""); | ||
138 | |||
131 | for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { | 139 | for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { |
132 | unsigned long base, ctrlport, irqport; | 140 | unsigned long base, ctrlport, irqport; |
133 | ide_ack_intr_t *ack_intr; | 141 | ide_ack_intr_t *ack_intr; |
134 | hw_regs_t hw; | 142 | hw_regs_t hw; |
135 | ide_hwif_t *hwif; | 143 | ide_hwif_t *hwif; |
136 | int index; | ||
137 | unsigned long phys_base, res_start, res_n; | 144 | unsigned long phys_base, res_start, res_n; |
138 | 145 | ||
139 | if (a4000) { | 146 | if (a4000) { |
@@ -165,21 +172,23 @@ found: | |||
165 | // &gayle_iops, | 172 | // &gayle_iops, |
166 | IRQ_AMIGA_PORTS); | 173 | IRQ_AMIGA_PORTS); |
167 | 174 | ||
168 | index = ide_register_hw(&hw, NULL, 1, &hwif); | 175 | hwif = ide_find_port(base); |
169 | if (index != -1) { | 176 | if (hwif) { |
177 | u8 index = hwif->index; | ||
178 | |||
179 | ide_init_port_data(hwif, index); | ||
180 | ide_init_port_hw(hwif, &hw); | ||
181 | |||
170 | hwif->mmio = 1; | 182 | hwif->mmio = 1; |
171 | switch (i) { | 183 | |
172 | case 0: | 184 | idx[i] = index; |
173 | printk("ide%d: Gayle IDE interface (A%d style)\n", index, | ||
174 | a4000 ? 4000 : 1200); | ||
175 | break; | ||
176 | #ifdef CONFIG_BLK_DEV_IDEDOUBLER | ||
177 | case 1: | ||
178 | printk("ide%d: IDE doubler\n", index); | ||
179 | break; | ||
180 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ | ||
181 | } | ||
182 | } else | 185 | } else |
183 | release_mem_region(res_start, res_n); | 186 | release_mem_region(res_start, res_n); |
184 | } | 187 | } |
188 | |||
189 | ide_device_add(idx); | ||
190 | |||
191 | return 0; | ||
185 | } | 192 | } |
193 | |||
194 | module_init(gayle_init); | ||
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c index a4245d13f11b..8da5031a6d05 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/legacy/ht6560b.c | |||
@@ -307,8 +307,7 @@ int probe_ht6560b = 0; | |||
307 | module_param_named(probe, probe_ht6560b, bool, 0); | 307 | module_param_named(probe, probe_ht6560b, bool, 0); |
308 | MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); | 308 | MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); |
309 | 309 | ||
310 | /* Can be called directly from ide.c. */ | 310 | static int __init ht6560b_init(void) |
311 | int __init ht6560b_init(void) | ||
312 | { | 311 | { |
313 | ide_hwif_t *hwif, *mate; | 312 | ide_hwif_t *hwif, *mate; |
314 | static u8 idx[4] = { 0, 1, 0xff, 0xff }; | 313 | static u8 idx[4] = { 0, 1, 0xff, 0xff }; |
@@ -369,9 +368,7 @@ release_region: | |||
369 | return -ENODEV; | 368 | return -ENODEV; |
370 | } | 369 | } |
371 | 370 | ||
372 | #ifdef MODULE | ||
373 | module_init(ht6560b_init); | 371 | module_init(ht6560b_init); |
374 | #endif | ||
375 | 372 | ||
376 | MODULE_AUTHOR("See Local File"); | 373 | MODULE_AUTHOR("See Local File"); |
377 | MODULE_DESCRIPTION("HT-6560B EIDE-controller support"); | 374 | MODULE_DESCRIPTION("HT-6560B EIDE-controller support"); |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 03715c058664..f4ea15b32969 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq | |||
153 | hw.irq = irq; | 153 | hw.irq = irq; |
154 | hw.chipset = ide_pci; | 154 | hw.chipset = ide_pci; |
155 | hw.dev = &handle->dev; | 155 | hw.dev = &handle->dev; |
156 | return ide_register_hw(&hw, &ide_undecoded_slave, 0, NULL); | 156 | return ide_register_hw(&hw, &ide_undecoded_slave, NULL); |
157 | } | 157 | } |
158 | 158 | ||
159 | /*====================================================================== | 159 | /*====================================================================== |
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c index 7bb79f53dac8..69a0fb0e564f 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/legacy/ide_platform.c | |||
@@ -28,39 +28,27 @@ static struct { | |||
28 | int index; | 28 | int index; |
29 | } hwif_prop; | 29 | } hwif_prop; |
30 | 30 | ||
31 | static ide_hwif_t *__devinit plat_ide_locate_hwif(void __iomem *base, | 31 | static void __devinit plat_ide_setup_ports(hw_regs_t *hw, |
32 | void __iomem *ctrl, struct pata_platform_info *pdata, int irq, | 32 | void __iomem *base, |
33 | int mmio) | 33 | void __iomem *ctrl, |
34 | struct pata_platform_info *pdata, | ||
35 | int irq) | ||
34 | { | 36 | { |
35 | unsigned long port = (unsigned long)base; | 37 | unsigned long port = (unsigned long)base; |
36 | ide_hwif_t *hwif = ide_find_port(port); | ||
37 | int i; | 38 | int i; |
38 | 39 | ||
39 | if (hwif == NULL) | 40 | hw->io_ports[IDE_DATA_OFFSET] = port; |
40 | goto out; | ||
41 | |||
42 | hwif->io_ports[IDE_DATA_OFFSET] = port; | ||
43 | 41 | ||
44 | port += (1 << pdata->ioport_shift); | 42 | port += (1 << pdata->ioport_shift); |
45 | for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; | 43 | for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; |
46 | i++, port += (1 << pdata->ioport_shift)) | 44 | i++, port += (1 << pdata->ioport_shift)) |
47 | hwif->io_ports[i] = port; | 45 | hw->io_ports[i] = port; |
48 | |||
49 | hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; | ||
50 | 46 | ||
51 | hwif->irq = irq; | 47 | hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; |
52 | 48 | ||
53 | hwif->chipset = ide_generic; | 49 | hw->irq = irq; |
54 | 50 | ||
55 | if (mmio) { | 51 | hw->chipset = ide_generic; |
56 | hwif->mmio = 1; | ||
57 | default_hwif_mmiops(hwif); | ||
58 | } | ||
59 | |||
60 | hwif_prop.hwif = hwif; | ||
61 | hwif_prop.index = hwif->index; | ||
62 | out: | ||
63 | return hwif; | ||
64 | } | 52 | } |
65 | 53 | ||
66 | static int __devinit plat_ide_probe(struct platform_device *pdev) | 54 | static int __devinit plat_ide_probe(struct platform_device *pdev) |
@@ -71,6 +59,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) | |||
71 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 59 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
72 | int ret = 0; | 60 | int ret = 0; |
73 | int mmio = 0; | 61 | int mmio = 0; |
62 | hw_regs_t hw; | ||
74 | 63 | ||
75 | pdata = pdev->dev.platform_data; | 64 | pdata = pdev->dev.platform_data; |
76 | 65 | ||
@@ -106,15 +95,27 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) | |||
106 | res_alt->start, res_alt->end - res_alt->start + 1); | 95 | res_alt->start, res_alt->end - res_alt->start + 1); |
107 | } | 96 | } |
108 | 97 | ||
109 | hwif = plat_ide_locate_hwif(hwif_prop.plat_ide_mapbase, | 98 | hwif = ide_find_port((unsigned long)hwif_prop.plat_ide_mapbase); |
110 | hwif_prop.plat_ide_alt_mapbase, pdata, res_irq->start, mmio); | ||
111 | |||
112 | if (!hwif) { | 99 | if (!hwif) { |
113 | ret = -ENODEV; | 100 | ret = -ENODEV; |
114 | goto out; | 101 | goto out; |
115 | } | 102 | } |
116 | hwif->gendev.parent = &pdev->dev; | 103 | |
117 | hwif->noprobe = 0; | 104 | memset(&hw, 0, sizeof(hw)); |
105 | plat_ide_setup_ports(&hw, hwif_prop.plat_ide_mapbase, | ||
106 | hwif_prop.plat_ide_alt_mapbase, | ||
107 | pdata, res_irq->start); | ||
108 | hw.dev = &pdev->dev; | ||
109 | |||
110 | ide_init_port_hw(hwif, &hw); | ||
111 | |||
112 | if (mmio) { | ||
113 | hwif->mmio = 1; | ||
114 | default_hwif_mmiops(hwif); | ||
115 | } | ||
116 | |||
117 | hwif_prop.hwif = hwif; | ||
118 | hwif_prop.index = hwif->index; | ||
118 | 119 | ||
119 | idx[0] = hwif->index; | 120 | idx[0] = hwif->index; |
120 | 121 | ||
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 5c6aa77c2370..782d4c76c0e5 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c | |||
@@ -77,15 +77,17 @@ int macide_ack_intr(ide_hwif_t* hwif) | |||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | static const char *mac_ide_name[] = | ||
81 | { "Quadra", "Powerbook", "Powerbook Baboon" }; | ||
82 | |||
80 | /* | 83 | /* |
81 | * Probe for a Macintosh IDE interface | 84 | * Probe for a Macintosh IDE interface |
82 | */ | 85 | */ |
83 | 86 | ||
84 | void __init macide_init(void) | 87 | static int __init macide_init(void) |
85 | { | 88 | { |
86 | hw_regs_t hw; | 89 | hw_regs_t hw; |
87 | ide_hwif_t *hwif; | 90 | ide_hwif_t *hwif; |
88 | int index = -1; | ||
89 | 91 | ||
90 | switch (macintosh_config->ide_type) { | 92 | switch (macintosh_config->ide_type) { |
91 | case MAC_IDE_QUADRA: | 93 | case MAC_IDE_QUADRA: |
@@ -93,48 +95,50 @@ void __init macide_init(void) | |||
93 | 0, 0, macide_ack_intr, | 95 | 0, 0, macide_ack_intr, |
94 | // quadra_ide_iops, | 96 | // quadra_ide_iops, |
95 | IRQ_NUBUS_F); | 97 | IRQ_NUBUS_F); |
96 | index = ide_register_hw(&hw, NULL, 1, &hwif); | ||
97 | break; | 98 | break; |
98 | case MAC_IDE_PB: | 99 | case MAC_IDE_PB: |
99 | ide_setup_ports(&hw, IDE_BASE, macide_offsets, | 100 | ide_setup_ports(&hw, IDE_BASE, macide_offsets, |
100 | 0, 0, macide_ack_intr, | 101 | 0, 0, macide_ack_intr, |
101 | // macide_pb_iops, | 102 | // macide_pb_iops, |
102 | IRQ_NUBUS_C); | 103 | IRQ_NUBUS_C); |
103 | index = ide_register_hw(&hw, NULL, 1, &hwif); | ||
104 | break; | 104 | break; |
105 | case MAC_IDE_BABOON: | 105 | case MAC_IDE_BABOON: |
106 | ide_setup_ports(&hw, BABOON_BASE, macide_offsets, | 106 | ide_setup_ports(&hw, BABOON_BASE, macide_offsets, |
107 | 0, 0, NULL, | 107 | 0, 0, NULL, |
108 | // macide_baboon_iops, | 108 | // macide_baboon_iops, |
109 | IRQ_BABOON_1); | 109 | IRQ_BABOON_1); |
110 | index = ide_register_hw(&hw, NULL, 1, &hwif); | 110 | break; |
111 | if (index == -1) break; | 111 | default: |
112 | if (macintosh_config->ident == MAC_MODEL_PB190) { | 112 | return -ENODEV; |
113 | } | ||
114 | |||
115 | printk(KERN_INFO "ide: Macintosh %s IDE controller\n", | ||
116 | mac_ide_name[macintosh_config->ide_type - 1]); | ||
113 | 117 | ||
118 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); | ||
119 | if (hwif) { | ||
120 | u8 index = hwif->index; | ||
121 | u8 idx[4] = { index, 0xff, 0xff, 0xff }; | ||
122 | |||
123 | ide_init_port_data(hwif, index); | ||
124 | ide_init_port_hw(hwif, &hw); | ||
125 | |||
126 | if (macintosh_config->ide_type == MAC_IDE_BABOON && | ||
127 | macintosh_config->ident == MAC_MODEL_PB190) { | ||
114 | /* Fix breakage in ide-disk.c: drive capacity */ | 128 | /* Fix breakage in ide-disk.c: drive capacity */ |
115 | /* is not initialized for drives without a */ | 129 | /* is not initialized for drives without a */ |
116 | /* hardware ID, and we can't get that without */ | 130 | /* hardware ID, and we can't get that without */ |
117 | /* probing the drive which freezes a 190. */ | 131 | /* probing the drive which freezes a 190. */ |
118 | 132 | ide_drive_t *drive = &hwif->drives[0]; | |
119 | ide_drive_t *drive = &ide_hwifs[index].drives[0]; | ||
120 | drive->capacity64 = drive->cyl*drive->head*drive->sect; | 133 | drive->capacity64 = drive->cyl*drive->head*drive->sect; |
121 | |||
122 | } | 134 | } |
123 | break; | ||
124 | |||
125 | default: | ||
126 | return; | ||
127 | } | ||
128 | 135 | ||
129 | if (index != -1) { | ||
130 | hwif->mmio = 1; | 136 | hwif->mmio = 1; |
131 | if (macintosh_config->ide_type == MAC_IDE_QUADRA) | 137 | |
132 | printk(KERN_INFO "ide%d: Macintosh Quadra IDE interface\n", index); | 138 | ide_device_add(idx); |
133 | else if (macintosh_config->ide_type == MAC_IDE_PB) | ||
134 | printk(KERN_INFO "ide%d: Macintosh Powerbook IDE interface\n", index); | ||
135 | else if (macintosh_config->ide_type == MAC_IDE_BABOON) | ||
136 | printk(KERN_INFO "ide%d: Macintosh Powerbook Baboon IDE interface\n", index); | ||
137 | else | ||
138 | printk(KERN_INFO "ide%d: Unknown Macintosh IDE interface\n", index); | ||
139 | } | 139 | } |
140 | |||
141 | return 0; | ||
140 | } | 142 | } |
143 | |||
144 | module_init(macide_init); | ||
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index 6ea46a6723e2..f5329730df99 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c | |||
@@ -111,15 +111,17 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={ | |||
111 | * Probe for Q40 IDE interfaces | 111 | * Probe for Q40 IDE interfaces |
112 | */ | 112 | */ |
113 | 113 | ||
114 | void __init q40ide_init(void) | 114 | static int __init q40ide_init(void) |
115 | { | 115 | { |
116 | int i; | 116 | int i; |
117 | ide_hwif_t *hwif; | 117 | ide_hwif_t *hwif; |
118 | int index; | ||
119 | const char *name; | 118 | const char *name; |
119 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
120 | 120 | ||
121 | if (!MACH_IS_Q40) | 121 | if (!MACH_IS_Q40) |
122 | return ; | 122 | return -ENODEV; |
123 | |||
124 | printk(KERN_INFO "ide: Q40 IDE controller\n"); | ||
123 | 125 | ||
124 | for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { | 126 | for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { |
125 | hw_regs_t hw; | 127 | hw_regs_t hw; |
@@ -141,10 +143,20 @@ void __init q40ide_init(void) | |||
141 | 0, NULL, | 143 | 0, NULL, |
142 | // m68kide_iops, | 144 | // m68kide_iops, |
143 | q40ide_default_irq(pcide_bases[i])); | 145 | q40ide_default_irq(pcide_bases[i])); |
144 | index = ide_register_hw(&hw, NULL, 1, &hwif); | 146 | |
145 | // **FIXME** | 147 | hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]); |
146 | if (index != -1) | 148 | if (hwif) { |
149 | ide_init_port_data(hwif, hwif->index); | ||
150 | ide_init_port_hw(hwif, &hw); | ||
147 | hwif->mmio = 1; | 151 | hwif->mmio = 1; |
152 | |||
153 | idx[i] = hwif->index; | ||
154 | } | ||
148 | } | 155 | } |
156 | |||
157 | ide_device_add(idx); | ||
158 | |||
159 | return 0; | ||
149 | } | 160 | } |
150 | 161 | ||
162 | module_init(q40ide_init); | ||
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index 912e73853faa..2bac4c1a6532 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -478,8 +478,7 @@ int probe_qd65xx = 0; | |||
478 | module_param_named(probe, probe_qd65xx, bool, 0); | 478 | module_param_named(probe, probe_qd65xx, bool, 0); |
479 | MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); | 479 | MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); |
480 | 480 | ||
481 | /* Can be called directly from ide.c. */ | 481 | static int __init qd65xx_init(void) |
482 | int __init qd65xx_init(void) | ||
483 | { | 482 | { |
484 | if (probe_qd65xx == 0) | 483 | if (probe_qd65xx == 0) |
485 | return -ENODEV; | 484 | return -ENODEV; |
@@ -492,9 +491,7 @@ int __init qd65xx_init(void) | |||
492 | return 0; | 491 | return 0; |
493 | } | 492 | } |
494 | 493 | ||
495 | #ifdef MODULE | ||
496 | module_init(qd65xx_init); | 494 | module_init(qd65xx_init); |
497 | #endif | ||
498 | 495 | ||
499 | MODULE_AUTHOR("Samuel Thibault"); | 496 | MODULE_AUTHOR("Samuel Thibault"); |
500 | MODULE_DESCRIPTION("support of qd65xx vlb ide chipset"); | 497 | MODULE_DESCRIPTION("support of qd65xx vlb ide chipset"); |
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c index 79577b916874..a1ae1ae6699d 100644 --- a/drivers/ide/legacy/umc8672.c +++ b/drivers/ide/legacy/umc8672.c | |||
@@ -169,8 +169,7 @@ int probe_umc8672 = 0; | |||
169 | module_param_named(probe, probe_umc8672, bool, 0); | 169 | module_param_named(probe, probe_umc8672, bool, 0); |
170 | MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); | 170 | MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); |
171 | 171 | ||
172 | /* Can be called directly from ide.c. */ | 172 | static int __init umc8672_init(void) |
173 | int __init umc8672_init(void) | ||
174 | { | 173 | { |
175 | if (probe_umc8672 == 0) | 174 | if (probe_umc8672 == 0) |
176 | goto out; | 175 | goto out; |
@@ -181,9 +180,7 @@ out: | |||
181 | return -ENODEV;; | 180 | return -ENODEV;; |
182 | } | 181 | } |
183 | 182 | ||
184 | #ifdef MODULE | ||
185 | module_init(umc8672_init); | 183 | module_init(umc8672_init); |
186 | #endif | ||
187 | 184 | ||
188 | MODULE_AUTHOR("Wolfram Podien"); | 185 | MODULE_AUTHOR("Wolfram Podien"); |
189 | MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset"); | 186 | MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset"); |
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index a4d0d4ca73d0..2d3e5115b834 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c | |||
@@ -395,26 +395,10 @@ static int auide_dma_test_irq(ide_drive_t *drive) | |||
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | 397 | ||
398 | static void auide_dma_host_on(ide_drive_t *drive) | 398 | static void auide_dma_host_set(ide_drive_t *drive, int on) |
399 | { | 399 | { |
400 | } | 400 | } |
401 | 401 | ||
402 | static int auide_dma_on(ide_drive_t *drive) | ||
403 | { | ||
404 | drive->using_dma = 1; | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static void auide_dma_host_off(ide_drive_t *drive) | ||
410 | { | ||
411 | } | ||
412 | |||
413 | static void auide_dma_off_quietly(ide_drive_t *drive) | ||
414 | { | ||
415 | drive->using_dma = 0; | ||
416 | } | ||
417 | |||
418 | static void auide_dma_lost_irq(ide_drive_t *drive) | 402 | static void auide_dma_lost_irq(ide_drive_t *drive) |
419 | { | 403 | { |
420 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); | 404 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); |
@@ -641,12 +625,13 @@ static int au_ide_probe(struct device *dev) | |||
641 | /* FIXME: This might possibly break PCMCIA IDE devices */ | 625 | /* FIXME: This might possibly break PCMCIA IDE devices */ |
642 | 626 | ||
643 | hwif = &ide_hwifs[pdev->id]; | 627 | hwif = &ide_hwifs[pdev->id]; |
644 | hwif->irq = ahwif->irq; | ||
645 | hwif->chipset = ide_au1xxx; | ||
646 | 628 | ||
647 | memset(&hw, 0, sizeof(hw)); | 629 | memset(&hw, 0, sizeof(hw)); |
648 | auide_setup_ports(&hw, ahwif); | 630 | auide_setup_ports(&hw, ahwif); |
649 | memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports)); | 631 | hw.irq = ahwif->irq; |
632 | hw.chipset = ide_au1xxx; | ||
633 | |||
634 | ide_init_port_hw(hwif, &hw); | ||
650 | 635 | ||
651 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ | 636 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ |
652 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 637 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
@@ -660,7 +645,6 @@ static int au_ide_probe(struct device *dev) | |||
660 | hwif->pio_mask = ATA_PIO4; | 645 | hwif->pio_mask = ATA_PIO4; |
661 | hwif->host_flags = IDE_HFLAG_POST_SET_MODE; | 646 | hwif->host_flags = IDE_HFLAG_POST_SET_MODE; |
662 | 647 | ||
663 | hwif->noprobe = 0; | ||
664 | hwif->drives[0].unmask = 1; | 648 | hwif->drives[0].unmask = 1; |
665 | hwif->drives[1].unmask = 1; | 649 | hwif->drives[1].unmask = 1; |
666 | 650 | ||
@@ -682,29 +666,25 @@ static int au_ide_probe(struct device *dev) | |||
682 | hwif->set_dma_mode = &auide_set_dma_mode; | 666 | hwif->set_dma_mode = &auide_set_dma_mode; |
683 | 667 | ||
684 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 668 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
685 | hwif->dma_off_quietly = &auide_dma_off_quietly; | ||
686 | hwif->dma_timeout = &auide_dma_timeout; | 669 | hwif->dma_timeout = &auide_dma_timeout; |
687 | 670 | ||
688 | hwif->mdma_filter = &auide_mdma_filter; | 671 | hwif->mdma_filter = &auide_mdma_filter; |
689 | 672 | ||
673 | hwif->dma_host_set = &auide_dma_host_set; | ||
690 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; | 674 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; |
691 | hwif->dma_start = &auide_dma_start; | 675 | hwif->dma_start = &auide_dma_start; |
692 | hwif->ide_dma_end = &auide_dma_end; | 676 | hwif->ide_dma_end = &auide_dma_end; |
693 | hwif->dma_setup = &auide_dma_setup; | 677 | hwif->dma_setup = &auide_dma_setup; |
694 | hwif->ide_dma_test_irq = &auide_dma_test_irq; | 678 | hwif->ide_dma_test_irq = &auide_dma_test_irq; |
695 | hwif->dma_host_off = &auide_dma_host_off; | ||
696 | hwif->dma_host_on = &auide_dma_host_on; | ||
697 | hwif->dma_lost_irq = &auide_dma_lost_irq; | 679 | hwif->dma_lost_irq = &auide_dma_lost_irq; |
698 | hwif->ide_dma_on = &auide_dma_on; | 680 | #endif |
699 | #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | ||
700 | hwif->channel = 0; | 681 | hwif->channel = 0; |
701 | hwif->hold = 1; | ||
702 | hwif->select_data = 0; /* no chipset-specific code */ | 682 | hwif->select_data = 0; /* no chipset-specific code */ |
703 | hwif->config_data = 0; /* no chipset-specific code */ | 683 | hwif->config_data = 0; /* no chipset-specific code */ |
704 | 684 | ||
705 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ | 685 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ |
706 | hwif->drives[1].autotune = 1; | 686 | hwif->drives[1].autotune = 1; |
707 | #endif | 687 | |
708 | hwif->drives[0].no_io_32bit = 1; | 688 | hwif->drives[0].no_io_32bit = 1; |
709 | hwif->drives[1].no_io_32bit = 1; | 689 | hwif->drives[1].no_io_32bit = 1; |
710 | 690 | ||
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 521edd41b572..8b3959dfa2b7 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c | |||
@@ -117,6 +117,7 @@ static int __devinit swarm_ide_probe(struct device *dev) | |||
117 | default_hwif_mmiops(hwif); | 117 | default_hwif_mmiops(hwif); |
118 | /* Prevent resource map manipulation. */ | 118 | /* Prevent resource map manipulation. */ |
119 | hwif->mmio = 1; | 119 | hwif->mmio = 1; |
120 | hwif->chipset = ide_generic; | ||
120 | hwif->noprobe = 0; | 121 | hwif->noprobe = 0; |
121 | 122 | ||
122 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) | 123 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) |
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile index 95d1ea8f1f14..94803253e8af 100644 --- a/drivers/ide/pci/Makefile +++ b/drivers/ide/pci/Makefile | |||
@@ -36,4 +36,8 @@ obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o | |||
36 | # Must appear at the end of the block | 36 | # Must appear at the end of the block |
37 | obj-$(CONFIG_BLK_DEV_GENERIC) += generic.o | 37 | obj-$(CONFIG_BLK_DEV_GENERIC) += generic.o |
38 | 38 | ||
39 | ifeq ($(CONFIG_BLK_DEV_CMD640), m) | ||
40 | obj-m += cmd640.o | ||
41 | endif | ||
42 | |||
39 | EXTRA_CFLAGS := -Idrivers/ide | 43 | EXTRA_CFLAGS := -Idrivers/ide |
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c index 5ae26564fb72..491871984aaa 100644 --- a/drivers/ide/pci/atiixp.c +++ b/drivers/ide/pci/atiixp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/atiixp.c Version 0.03 Aug 3 2007 | 2 | * linux/drivers/ide/pci/atiixp.c Version 0.05 Nov 9 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 2003 ATI Inc. <hyu@ati.com> | 4 | * Copyright (C) 2003 ATI Inc. <hyu@ati.com> |
5 | * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz | 5 | * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz |
@@ -43,47 +43,8 @@ static atiixp_ide_timing mdma_timing[] = { | |||
43 | { 0x02, 0x00 }, | 43 | { 0x02, 0x00 }, |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static int save_mdma_mode[4]; | ||
47 | |||
48 | static DEFINE_SPINLOCK(atiixp_lock); | 46 | static DEFINE_SPINLOCK(atiixp_lock); |
49 | 47 | ||
50 | static void atiixp_dma_host_on(ide_drive_t *drive) | ||
51 | { | ||
52 | struct pci_dev *dev = drive->hwif->pci_dev; | ||
53 | unsigned long flags; | ||
54 | u16 tmp16; | ||
55 | |||
56 | spin_lock_irqsave(&atiixp_lock, flags); | ||
57 | |||
58 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); | ||
59 | if (save_mdma_mode[drive->dn]) | ||
60 | tmp16 &= ~(1 << drive->dn); | ||
61 | else | ||
62 | tmp16 |= (1 << drive->dn); | ||
63 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); | ||
64 | |||
65 | spin_unlock_irqrestore(&atiixp_lock, flags); | ||
66 | |||
67 | ide_dma_host_on(drive); | ||
68 | } | ||
69 | |||
70 | static void atiixp_dma_host_off(ide_drive_t *drive) | ||
71 | { | ||
72 | struct pci_dev *dev = drive->hwif->pci_dev; | ||
73 | unsigned long flags; | ||
74 | u16 tmp16; | ||
75 | |||
76 | spin_lock_irqsave(&atiixp_lock, flags); | ||
77 | |||
78 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); | ||
79 | tmp16 &= ~(1 << drive->dn); | ||
80 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); | ||
81 | |||
82 | spin_unlock_irqrestore(&atiixp_lock, flags); | ||
83 | |||
84 | ide_dma_host_off(drive); | ||
85 | } | ||
86 | |||
87 | /** | 48 | /** |
88 | * atiixp_set_pio_mode - set host controller for PIO mode | 49 | * atiixp_set_pio_mode - set host controller for PIO mode |
89 | * @drive: drive | 50 | * @drive: drive |
@@ -132,26 +93,33 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
132 | int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8; | 93 | int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8; |
133 | u32 tmp32; | 94 | u32 tmp32; |
134 | u16 tmp16; | 95 | u16 tmp16; |
96 | u16 udma_ctl = 0; | ||
135 | 97 | ||
136 | spin_lock_irqsave(&atiixp_lock, flags); | 98 | spin_lock_irqsave(&atiixp_lock, flags); |
137 | 99 | ||
138 | save_mdma_mode[drive->dn] = 0; | 100 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl); |
101 | |||
139 | if (speed >= XFER_UDMA_0) { | 102 | if (speed >= XFER_UDMA_0) { |
140 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16); | 103 | pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16); |
141 | tmp16 &= ~(0x07 << (drive->dn * 4)); | 104 | tmp16 &= ~(0x07 << (drive->dn * 4)); |
142 | tmp16 |= ((speed & 0x07) << (drive->dn * 4)); | 105 | tmp16 |= ((speed & 0x07) << (drive->dn * 4)); |
143 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16); | 106 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16); |
144 | } else { | 107 | |
145 | if ((speed >= XFER_MW_DMA_0) && (speed <= XFER_MW_DMA_2)) { | 108 | udma_ctl |= (1 << drive->dn); |
146 | save_mdma_mode[drive->dn] = speed; | 109 | } else if (speed >= XFER_MW_DMA_0) { |
147 | pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32); | 110 | u8 i = speed & 0x03; |
148 | tmp32 &= ~(0xff << timing_shift); | 111 | |
149 | tmp32 |= (mdma_timing[speed & 0x03].recover_width << timing_shift) | | 112 | pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32); |
150 | (mdma_timing[speed & 0x03].command_width << (timing_shift + 4)); | 113 | tmp32 &= ~(0xff << timing_shift); |
151 | pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32); | 114 | tmp32 |= (mdma_timing[i].recover_width << timing_shift) | |
152 | } | 115 | (mdma_timing[i].command_width << (timing_shift + 4)); |
116 | pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32); | ||
117 | |||
118 | udma_ctl &= ~(1 << drive->dn); | ||
153 | } | 119 | } |
154 | 120 | ||
121 | pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl); | ||
122 | |||
155 | spin_unlock_irqrestore(&atiixp_lock, flags); | 123 | spin_unlock_irqrestore(&atiixp_lock, flags); |
156 | } | 124 | } |
157 | 125 | ||
@@ -181,9 +149,6 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif) | |||
181 | hwif->cbl = ATA_CBL_PATA80; | 149 | hwif->cbl = ATA_CBL_PATA80; |
182 | else | 150 | else |
183 | hwif->cbl = ATA_CBL_PATA40; | 151 | hwif->cbl = ATA_CBL_PATA40; |
184 | |||
185 | hwif->dma_host_on = &atiixp_dma_host_on; | ||
186 | hwif->dma_host_off = &atiixp_dma_host_off; | ||
187 | } | 152 | } |
188 | 153 | ||
189 | static const struct ide_port_info atiixp_pci_info[] __devinitdata = { | 154 | static const struct ide_port_info atiixp_pci_info[] __devinitdata = { |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index 4aa48104e0c1..da3565e0071f 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -706,9 +706,9 @@ static int pci_conf2(void) | |||
706 | } | 706 | } |
707 | 707 | ||
708 | /* | 708 | /* |
709 | * Probe for a cmd640 chipset, and initialize it if found. Called from ide.c | 709 | * Probe for a cmd640 chipset, and initialize it if found. |
710 | */ | 710 | */ |
711 | int __init ide_probe_for_cmd640x (void) | 711 | static int __init cmd640x_init(void) |
712 | { | 712 | { |
713 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED | 713 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED |
714 | int second_port_toggled = 0; | 714 | int second_port_toggled = 0; |
@@ -717,6 +717,7 @@ int __init ide_probe_for_cmd640x (void) | |||
717 | const char *bus_type, *port2; | 717 | const char *bus_type, *port2; |
718 | unsigned int index; | 718 | unsigned int index; |
719 | u8 b, cfr; | 719 | u8 b, cfr; |
720 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
720 | 721 | ||
721 | if (cmd640_vlb && probe_for_cmd640_vlb()) { | 722 | if (cmd640_vlb && probe_for_cmd640_vlb()) { |
722 | bus_type = "VLB"; | 723 | bus_type = "VLB"; |
@@ -769,6 +770,8 @@ int __init ide_probe_for_cmd640x (void) | |||
769 | cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode; | 770 | cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode; |
770 | #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ | 771 | #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ |
771 | 772 | ||
773 | idx[0] = cmd_hwif0->index; | ||
774 | |||
772 | /* | 775 | /* |
773 | * Ensure compatibility by always using the slowest timings | 776 | * Ensure compatibility by always using the slowest timings |
774 | * for access to the drive's command register block, | 777 | * for access to the drive's command register block, |
@@ -826,6 +829,8 @@ int __init ide_probe_for_cmd640x (void) | |||
826 | cmd_hwif1->pio_mask = ATA_PIO5; | 829 | cmd_hwif1->pio_mask = ATA_PIO5; |
827 | cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode; | 830 | cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode; |
828 | #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ | 831 | #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ |
832 | |||
833 | idx[1] = cmd_hwif1->index; | ||
829 | } | 834 | } |
830 | printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name, | 835 | printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name, |
831 | cmd_hwif0->serialized ? "" : "not ", port2); | 836 | cmd_hwif0->serialized ? "" : "not ", port2); |
@@ -872,6 +877,13 @@ int __init ide_probe_for_cmd640x (void) | |||
872 | #ifdef CMD640_DUMP_REGS | 877 | #ifdef CMD640_DUMP_REGS |
873 | cmd640_dump_regs(); | 878 | cmd640_dump_regs(); |
874 | #endif | 879 | #endif |
880 | |||
881 | ide_device_add(idx); | ||
882 | |||
875 | return 1; | 883 | return 1; |
876 | } | 884 | } |
877 | 885 | ||
886 | module_param_named(probe_vlb, cmd640_vlb, bool, 0); | ||
887 | MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset"); | ||
888 | |||
889 | module_init(cmd640x_init); | ||
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 0b1e9479f019..cd4eb9def151 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/cmd64x.c Version 1.52 Dec 24, 2007 | 2 | * linux/drivers/ide/pci/cmd64x.c Version 1.53 Dec 24, 2007 |
3 | * | 3 | * |
4 | * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. | 4 | * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. |
5 | * Due to massive hardware bugs, UltraDMA is only supported | 5 | * Due to massive hardware bugs, UltraDMA is only supported |
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include <asm/io.h> | 23 | #include <asm/io.h> |
24 | 24 | ||
25 | #define DISPLAY_CMD64X_TIMINGS | ||
26 | |||
27 | #define CMD_DEBUG 0 | 25 | #define CMD_DEBUG 0 |
28 | 26 | ||
29 | #if CMD_DEBUG | 27 | #if CMD_DEBUG |
@@ -37,11 +35,6 @@ | |||
37 | */ | 35 | */ |
38 | #define CFR 0x50 | 36 | #define CFR 0x50 |
39 | #define CFR_INTR_CH0 0x04 | 37 | #define CFR_INTR_CH0 0x04 |
40 | #define CNTRL 0x51 | ||
41 | #define CNTRL_ENA_1ST 0x04 | ||
42 | #define CNTRL_ENA_2ND 0x08 | ||
43 | #define CNTRL_DIS_RA0 0x40 | ||
44 | #define CNTRL_DIS_RA1 0x80 | ||
45 | 38 | ||
46 | #define CMDTIM 0x52 | 39 | #define CMDTIM 0x52 |
47 | #define ARTTIM0 0x53 | 40 | #define ARTTIM0 0x53 |
@@ -60,108 +53,13 @@ | |||
60 | #define MRDMODE 0x71 | 53 | #define MRDMODE 0x71 |
61 | #define MRDMODE_INTR_CH0 0x04 | 54 | #define MRDMODE_INTR_CH0 0x04 |
62 | #define MRDMODE_INTR_CH1 0x08 | 55 | #define MRDMODE_INTR_CH1 0x08 |
63 | #define MRDMODE_BLK_CH0 0x10 | ||
64 | #define MRDMODE_BLK_CH1 0x20 | ||
65 | #define BMIDESR0 0x72 | ||
66 | #define UDIDETCR0 0x73 | 56 | #define UDIDETCR0 0x73 |
67 | #define DTPR0 0x74 | 57 | #define DTPR0 0x74 |
68 | #define BMIDECR1 0x78 | 58 | #define BMIDECR1 0x78 |
69 | #define BMIDECSR 0x79 | 59 | #define BMIDECSR 0x79 |
70 | #define BMIDESR1 0x7A | ||
71 | #define UDIDETCR1 0x7B | 60 | #define UDIDETCR1 0x7B |
72 | #define DTPR1 0x7C | 61 | #define DTPR1 0x7C |
73 | 62 | ||
74 | #if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) | ||
75 | #include <linux/stat.h> | ||
76 | #include <linux/proc_fs.h> | ||
77 | |||
78 | static u8 cmd64x_proc = 0; | ||
79 | |||
80 | #define CMD_MAX_DEVS 5 | ||
81 | |||
82 | static struct pci_dev *cmd_devs[CMD_MAX_DEVS]; | ||
83 | static int n_cmd_devs; | ||
84 | |||
85 | static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index) | ||
86 | { | ||
87 | char *p = buf; | ||
88 | u8 reg72 = 0, reg73 = 0; /* primary */ | ||
89 | u8 reg7a = 0, reg7b = 0; /* secondary */ | ||
90 | u8 reg50 = 1, reg51 = 1, reg57 = 0, reg71 = 0; /* extra */ | ||
91 | |||
92 | p += sprintf(p, "\nController: %d\n", index); | ||
93 | p += sprintf(p, "PCI-%x Chipset.\n", dev->device); | ||
94 | |||
95 | (void) pci_read_config_byte(dev, CFR, ®50); | ||
96 | (void) pci_read_config_byte(dev, CNTRL, ®51); | ||
97 | (void) pci_read_config_byte(dev, ARTTIM23, ®57); | ||
98 | (void) pci_read_config_byte(dev, MRDMODE, ®71); | ||
99 | (void) pci_read_config_byte(dev, BMIDESR0, ®72); | ||
100 | (void) pci_read_config_byte(dev, UDIDETCR0, ®73); | ||
101 | (void) pci_read_config_byte(dev, BMIDESR1, ®7a); | ||
102 | (void) pci_read_config_byte(dev, UDIDETCR1, ®7b); | ||
103 | |||
104 | /* PCI0643/6 originally didn't have the primary channel enable bit */ | ||
105 | if ((dev->device == PCI_DEVICE_ID_CMD_643) || | ||
106 | (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 3)) | ||
107 | reg51 |= CNTRL_ENA_1ST; | ||
108 | |||
109 | p += sprintf(p, "---------------- Primary Channel " | ||
110 | "---------------- Secondary Channel ------------\n"); | ||
111 | p += sprintf(p, " %s %s\n", | ||
112 | (reg51 & CNTRL_ENA_1ST) ? "enabled " : "disabled", | ||
113 | (reg51 & CNTRL_ENA_2ND) ? "enabled " : "disabled"); | ||
114 | p += sprintf(p, "---------------- drive0 --------- drive1 " | ||
115 | "-------- drive0 --------- drive1 ------\n"); | ||
116 | p += sprintf(p, "DMA enabled: %s %s" | ||
117 | " %s %s\n", | ||
118 | (reg72 & 0x20) ? "yes" : "no ", (reg72 & 0x40) ? "yes" : "no ", | ||
119 | (reg7a & 0x20) ? "yes" : "no ", (reg7a & 0x40) ? "yes" : "no "); | ||
120 | p += sprintf(p, "UltraDMA mode: %s (%c) %s (%c)", | ||
121 | ( reg73 & 0x01) ? " on" : "off", | ||
122 | ((reg73 & 0x30) == 0x30) ? ((reg73 & 0x04) ? '3' : '0') : | ||
123 | ((reg73 & 0x30) == 0x20) ? ((reg73 & 0x04) ? '3' : '1') : | ||
124 | ((reg73 & 0x30) == 0x10) ? ((reg73 & 0x04) ? '4' : '2') : | ||
125 | ((reg73 & 0x30) == 0x00) ? ((reg73 & 0x04) ? '5' : '2') : '?', | ||
126 | ( reg73 & 0x02) ? " on" : "off", | ||
127 | ((reg73 & 0xC0) == 0xC0) ? ((reg73 & 0x08) ? '3' : '0') : | ||
128 | ((reg73 & 0xC0) == 0x80) ? ((reg73 & 0x08) ? '3' : '1') : | ||
129 | ((reg73 & 0xC0) == 0x40) ? ((reg73 & 0x08) ? '4' : '2') : | ||
130 | ((reg73 & 0xC0) == 0x00) ? ((reg73 & 0x08) ? '5' : '2') : '?'); | ||
131 | p += sprintf(p, " %s (%c) %s (%c)\n", | ||
132 | ( reg7b & 0x01) ? " on" : "off", | ||
133 | ((reg7b & 0x30) == 0x30) ? ((reg7b & 0x04) ? '3' : '0') : | ||
134 | ((reg7b & 0x30) == 0x20) ? ((reg7b & 0x04) ? '3' : '1') : | ||
135 | ((reg7b & 0x30) == 0x10) ? ((reg7b & 0x04) ? '4' : '2') : | ||
136 | ((reg7b & 0x30) == 0x00) ? ((reg7b & 0x04) ? '5' : '2') : '?', | ||
137 | ( reg7b & 0x02) ? " on" : "off", | ||
138 | ((reg7b & 0xC0) == 0xC0) ? ((reg7b & 0x08) ? '3' : '0') : | ||
139 | ((reg7b & 0xC0) == 0x80) ? ((reg7b & 0x08) ? '3' : '1') : | ||
140 | ((reg7b & 0xC0) == 0x40) ? ((reg7b & 0x08) ? '4' : '2') : | ||
141 | ((reg7b & 0xC0) == 0x00) ? ((reg7b & 0x08) ? '5' : '2') : '?'); | ||
142 | p += sprintf(p, "Interrupt: %s, %s %s, %s\n", | ||
143 | (reg71 & MRDMODE_BLK_CH0 ) ? "blocked" : "enabled", | ||
144 | (reg50 & CFR_INTR_CH0 ) ? "pending" : "clear ", | ||
145 | (reg71 & MRDMODE_BLK_CH1 ) ? "blocked" : "enabled", | ||
146 | (reg57 & ARTTIM23_INTR_CH1) ? "pending" : "clear "); | ||
147 | |||
148 | return (char *)p; | ||
149 | } | ||
150 | |||
151 | static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count) | ||
152 | { | ||
153 | char *p = buffer; | ||
154 | int i; | ||
155 | |||
156 | for (i = 0; i < n_cmd_devs; i++) { | ||
157 | struct pci_dev *dev = cmd_devs[i]; | ||
158 | p = print_cmd64x_get_info(p, dev, i); | ||
159 | } | ||
160 | return p-buffer; /* => must be less than 4k! */ | ||
161 | } | ||
162 | |||
163 | #endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ | ||
164 | |||
165 | static u8 quantize_timing(int timing, int quant) | 63 | static u8 quantize_timing(int timing, int quant) |
166 | { | 64 | { |
167 | return (timing + quant - 1) / quant; | 65 | return (timing + quant - 1) / quant; |
@@ -472,16 +370,6 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha | |||
472 | mrdmode &= ~0x30; | 370 | mrdmode &= ~0x30; |
473 | (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02)); | 371 | (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02)); |
474 | 372 | ||
475 | #if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) | ||
476 | |||
477 | cmd_devs[n_cmd_devs++] = dev; | ||
478 | |||
479 | if (!cmd64x_proc) { | ||
480 | cmd64x_proc = 1; | ||
481 | ide_pci_create_host_proc("cmd64x", cmd64x_get_info); | ||
482 | } | ||
483 | #endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */ | ||
484 | |||
485 | return 0; | 373 | return 0; |
486 | } | 374 | } |
487 | 375 | ||
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c index d1a91bcb5b29..6ec00b8d7ec1 100644 --- a/drivers/ide/pci/cs5520.c +++ b/drivers/ide/pci/cs5520.c | |||
@@ -71,7 +71,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
71 | ide_hwif_t *hwif = HWIF(drive); | 71 | ide_hwif_t *hwif = HWIF(drive); |
72 | struct pci_dev *pdev = hwif->pci_dev; | 72 | struct pci_dev *pdev = hwif->pci_dev; |
73 | int controller = drive->dn > 1 ? 1 : 0; | 73 | int controller = drive->dn > 1 ? 1 : 0; |
74 | u8 reg; | ||
75 | 74 | ||
76 | /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */ | 75 | /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */ |
77 | 76 | ||
@@ -91,11 +90,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
91 | pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1), | 90 | pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1), |
92 | (cs5520_pio_clocks[pio].recovery << 4) | | 91 | (cs5520_pio_clocks[pio].recovery << 4) | |
93 | (cs5520_pio_clocks[pio].assert)); | 92 | (cs5520_pio_clocks[pio].assert)); |
94 | |||
95 | /* Set the DMA enable/disable flag */ | ||
96 | reg = inb(hwif->dma_base + 0x02 + 8*controller); | ||
97 | reg |= 1<<((drive->dn&1)+5); | ||
98 | outb(reg, hwif->dma_base + 0x02 + 8*controller); | ||
99 | } | 93 | } |
100 | 94 | ||
101 | static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) | 95 | static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) |
@@ -109,13 +103,14 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
109 | * We wrap the DMA activate to set the vdma flag. This is needed | 103 | * We wrap the DMA activate to set the vdma flag. This is needed |
110 | * so that the IDE DMA layer issues PIO not DMA commands over the | 104 | * so that the IDE DMA layer issues PIO not DMA commands over the |
111 | * DMA channel | 105 | * DMA channel |
106 | * | ||
107 | * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA | ||
112 | */ | 108 | */ |
113 | 109 | ||
114 | static int cs5520_dma_on(ide_drive_t *drive) | 110 | static void cs5520_dma_host_set(ide_drive_t *drive, int on) |
115 | { | 111 | { |
116 | /* ATAPI is harder so leave it for now */ | 112 | drive->vdma = on; |
117 | drive->vdma = 1; | 113 | ide_dma_host_set(drive, on); |
118 | return 0; | ||
119 | } | 114 | } |
120 | 115 | ||
121 | static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) | 116 | static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) |
@@ -126,7 +121,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) | |||
126 | if (hwif->dma_base == 0) | 121 | if (hwif->dma_base == 0) |
127 | return; | 122 | return; |
128 | 123 | ||
129 | hwif->ide_dma_on = &cs5520_dma_on; | 124 | hwif->dma_host_set = &cs5520_dma_host_set; |
130 | } | 125 | } |
131 | 126 | ||
132 | #define DECLARE_CS_DEV(name_str) \ | 127 | #define DECLARE_CS_DEV(name_str) \ |
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index 1cd4e9cb0521..3ec4c659a37d 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/cy82c693.c Version 0.42 Oct 23, 2007 | 2 | * linux/drivers/ide/pci/cy82c693.c Version 0.44 Nov 8, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer | 4 | * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer |
5 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator | 5 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator |
@@ -176,17 +176,12 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk) | |||
176 | * set DMA mode a specific channel for CY82C693 | 176 | * set DMA mode a specific channel for CY82C693 |
177 | */ | 177 | */ |
178 | 178 | ||
179 | static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single) | 179 | static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode) |
180 | { | 180 | { |
181 | u8 index = 0, data = 0; | 181 | ide_hwif_t *hwif = drive->hwif; |
182 | u8 single = (mode & 0x10) >> 4, index = 0, data = 0; | ||
182 | 183 | ||
183 | if (mode>2) /* make sure we set a valid mode */ | 184 | index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0; |
184 | mode = 2; | ||
185 | |||
186 | if (mode > drive->id->tDMA) /* to be absolutly sure we have a valid mode */ | ||
187 | mode = drive->id->tDMA; | ||
188 | |||
189 | index = (HWIF(drive)->channel==0) ? CY82_INDEX_CHANNEL0 : CY82_INDEX_CHANNEL1; | ||
190 | 185 | ||
191 | #if CY82C693_DEBUG_LOGS | 186 | #if CY82C693_DEBUG_LOGS |
192 | /* for debug let's show the previous values */ | 187 | /* for debug let's show the previous values */ |
@@ -199,7 +194,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single) | |||
199 | (data&0x3), ((data>>2)&1)); | 194 | (data&0x3), ((data>>2)&1)); |
200 | #endif /* CY82C693_DEBUG_LOGS */ | 195 | #endif /* CY82C693_DEBUG_LOGS */ |
201 | 196 | ||
202 | data = (u8)mode|(u8)(single<<2); | 197 | data = (mode & 3) | (single << 2); |
203 | 198 | ||
204 | outb(index, CY82_INDEX_PORT); | 199 | outb(index, CY82_INDEX_PORT); |
205 | outb(data, CY82_DATA_PORT); | 200 | outb(data, CY82_DATA_PORT); |
@@ -207,7 +202,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single) | |||
207 | #if CY82C693_DEBUG_INFO | 202 | #if CY82C693_DEBUG_INFO |
208 | printk(KERN_INFO "%s (ch=%d, dev=%d): set DMA mode to %d (single=%d)\n", | 203 | printk(KERN_INFO "%s (ch=%d, dev=%d): set DMA mode to %d (single=%d)\n", |
209 | drive->name, HWIF(drive)->channel, drive->select.b.unit, | 204 | drive->name, HWIF(drive)->channel, drive->select.b.unit, |
210 | mode, single); | 205 | mode & 3, single); |
211 | #endif /* CY82C693_DEBUG_INFO */ | 206 | #endif /* CY82C693_DEBUG_INFO */ |
212 | 207 | ||
213 | /* | 208 | /* |
@@ -230,39 +225,6 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single) | |||
230 | #endif /* CY82C693_DEBUG_INFO */ | 225 | #endif /* CY82C693_DEBUG_INFO */ |
231 | } | 226 | } |
232 | 227 | ||
233 | /* | ||
234 | * used to set DMA mode for CY82C693 (single and multi modes) | ||
235 | */ | ||
236 | static int cy82c693_ide_dma_on (ide_drive_t *drive) | ||
237 | { | ||
238 | struct hd_driveid *id = drive->id; | ||
239 | |||
240 | #if CY82C693_DEBUG_INFO | ||
241 | printk (KERN_INFO "dma_on: %s\n", drive->name); | ||
242 | #endif /* CY82C693_DEBUG_INFO */ | ||
243 | |||
244 | if (id != NULL) { | ||
245 | /* Enable DMA on any drive that has DMA | ||
246 | * (multi or single) enabled | ||
247 | */ | ||
248 | if (id->field_valid & 2) { /* regular DMA */ | ||
249 | int mmode, smode; | ||
250 | |||
251 | mmode = id->dma_mword & (id->dma_mword >> 8); | ||
252 | smode = id->dma_1word & (id->dma_1word >> 8); | ||
253 | |||
254 | if (mmode != 0) { | ||
255 | /* enable multi */ | ||
256 | cy82c693_dma_enable(drive, (mmode >> 1), 0); | ||
257 | } else if (smode != 0) { | ||
258 | /* enable single */ | ||
259 | cy82c693_dma_enable(drive, (smode >> 1), 1); | ||
260 | } | ||
261 | } | ||
262 | } | ||
263 | return __ide_dma_on(drive); | ||
264 | } | ||
265 | |||
266 | static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) | 228 | static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) |
267 | { | 229 | { |
268 | ide_hwif_t *hwif = HWIF(drive); | 230 | ide_hwif_t *hwif = HWIF(drive); |
@@ -429,11 +391,7 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c | |||
429 | static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif) | 391 | static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif) |
430 | { | 392 | { |
431 | hwif->set_pio_mode = &cy82c693_set_pio_mode; | 393 | hwif->set_pio_mode = &cy82c693_set_pio_mode; |
432 | 394 | hwif->set_dma_mode = &cy82c693_set_dma_mode; | |
433 | if (hwif->dma_base == 0) | ||
434 | return; | ||
435 | |||
436 | hwif->ide_dma_on = &cy82c693_ide_dma_on; | ||
437 | } | 395 | } |
438 | 396 | ||
439 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) | 397 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) |
@@ -454,11 +412,11 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = { | |||
454 | .init_iops = init_iops_cy82c693, | 412 | .init_iops = init_iops_cy82c693, |
455 | .init_hwif = init_hwif_cy82c693, | 413 | .init_hwif = init_hwif_cy82c693, |
456 | .chipset = ide_cy82c693, | 414 | .chipset = ide_cy82c693, |
457 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_TRUST_BIOS_FOR_DMA | | 415 | .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 | |
458 | IDE_HFLAG_BOOTABLE, | 416 | IDE_HFLAG_BOOTABLE, |
459 | .pio_mask = ATA_PIO4, | 417 | .pio_mask = ATA_PIO4, |
460 | .swdma_mask = ATA_SWDMA2_ONLY, | 418 | .swdma_mask = ATA_SWDMA2, |
461 | .mwdma_mask = ATA_MWDMA2_ONLY, | 419 | .mwdma_mask = ATA_MWDMA2, |
462 | }; | 420 | }; |
463 | 421 | ||
464 | static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 422 | static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index 83829081640a..26aa492071bb 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c | |||
@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | |||
80 | hw.irq = dev->irq; | 80 | hw.irq = dev->irq; |
81 | hw.chipset = ide_pci; /* this enables IRQ sharing */ | 81 | hw.chipset = ide_pci; /* this enables IRQ sharing */ |
82 | 82 | ||
83 | rc = ide_register_hw(&hw, &ide_undecoded_slave, 0, &hwif); | 83 | rc = ide_register_hw(&hw, &ide_undecoded_slave, &hwif); |
84 | if (rc < 0) { | 84 | if (rc < 0) { |
85 | printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc); | 85 | printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc); |
86 | pci_disable_device(dev); | 86 | pci_disable_device(dev); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 3777fb8c8043..12685939a813 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -725,15 +725,18 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
725 | hpt3xx_set_mode(drive, XFER_PIO_0 + pio); | 725 | hpt3xx_set_mode(drive, XFER_PIO_0 + pio); |
726 | } | 726 | } |
727 | 727 | ||
728 | static int hpt3xx_quirkproc(ide_drive_t *drive) | 728 | static void hpt3xx_quirkproc(ide_drive_t *drive) |
729 | { | 729 | { |
730 | struct hd_driveid *id = drive->id; | 730 | struct hd_driveid *id = drive->id; |
731 | const char **list = quirk_drives; | 731 | const char **list = quirk_drives; |
732 | 732 | ||
733 | while (*list) | 733 | while (*list) |
734 | if (strstr(id->model, *list++)) | 734 | if (strstr(id->model, *list++)) { |
735 | return 1; | 735 | drive->quirk_list = 1; |
736 | return 0; | 736 | return; |
737 | } | ||
738 | |||
739 | drive->quirk_list = 0; | ||
737 | } | 740 | } |
738 | 741 | ||
739 | static void hpt3xx_maskproc(ide_drive_t *drive, int mask) | 742 | static void hpt3xx_maskproc(ide_drive_t *drive, int mask) |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 99b7d763b6c7..e610a5340fdc 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -431,33 +431,29 @@ static u8 __devinit ata66_it821x(ide_hwif_t *hwif) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | /** | 433 | /** |
434 | * it821x_fixup - post init callback | 434 | * it821x_quirkproc - post init callback |
435 | * @hwif: interface | 435 | * @drive: drive |
436 | * | 436 | * |
437 | * This callback is run after the drives have been probed but | 437 | * This callback is run after the drive has been probed but |
438 | * before anything gets attached. It allows drivers to do any | 438 | * before anything gets attached. It allows drivers to do any |
439 | * final tuning that is needed, or fixups to work around bugs. | 439 | * final tuning that is needed, or fixups to work around bugs. |
440 | */ | 440 | */ |
441 | 441 | ||
442 | static void __devinit it821x_fixups(ide_hwif_t *hwif) | 442 | static void __devinit it821x_quirkproc(ide_drive_t *drive) |
443 | { | 443 | { |
444 | struct it821x_dev *itdev = ide_get_hwifdata(hwif); | 444 | struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif); |
445 | int i; | 445 | struct hd_driveid *id = drive->id; |
446 | u16 *idbits = (u16 *)drive->id; | ||
446 | 447 | ||
447 | if(!itdev->smart) { | 448 | if (!itdev->smart) { |
448 | /* | 449 | /* |
449 | * If we are in pass through mode then not much | 450 | * If we are in pass through mode then not much |
450 | * needs to be done, but we do bother to clear the | 451 | * needs to be done, but we do bother to clear the |
451 | * IRQ mask as we may well be in PIO (eg rev 0x10) | 452 | * IRQ mask as we may well be in PIO (eg rev 0x10) |
452 | * for now and we know unmasking is safe on this chipset. | 453 | * for now and we know unmasking is safe on this chipset. |
453 | */ | 454 | */ |
454 | for (i = 0; i < 2; i++) { | 455 | drive->unmask = 1; |
455 | ide_drive_t *drive = &hwif->drives[i]; | 456 | } else { |
456 | if(drive->present) | ||
457 | drive->unmask = 1; | ||
458 | } | ||
459 | return; | ||
460 | } | ||
461 | /* | 457 | /* |
462 | * Perform fixups on smart mode. We need to "lose" some | 458 | * Perform fixups on smart mode. We need to "lose" some |
463 | * capabilities the firmware lacks but does not filter, and | 459 | * capabilities the firmware lacks but does not filter, and |
@@ -465,16 +461,6 @@ static void __devinit it821x_fixups(ide_hwif_t *hwif) | |||
465 | * in RAID mode. | 461 | * in RAID mode. |
466 | */ | 462 | */ |
467 | 463 | ||
468 | for(i = 0; i < 2; i++) { | ||
469 | ide_drive_t *drive = &hwif->drives[i]; | ||
470 | struct hd_driveid *id; | ||
471 | u16 *idbits; | ||
472 | |||
473 | if(!drive->present) | ||
474 | continue; | ||
475 | id = drive->id; | ||
476 | idbits = (u16 *)drive->id; | ||
477 | |||
478 | /* Check for RAID v native */ | 464 | /* Check for RAID v native */ |
479 | if(strstr(id->model, "Integrated Technology Express")) { | 465 | if(strstr(id->model, "Integrated Technology Express")) { |
480 | /* In raid mode the ident block is slightly buggy | 466 | /* In raid mode the ident block is slightly buggy |
@@ -537,6 +523,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif) | |||
537 | struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL); | 523 | struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL); |
538 | u8 conf; | 524 | u8 conf; |
539 | 525 | ||
526 | hwif->quirkproc = &it821x_quirkproc; | ||
527 | |||
540 | if (idev == NULL) { | 528 | if (idev == NULL) { |
541 | printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n"); | 529 | printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n"); |
542 | return; | 530 | return; |
@@ -633,7 +621,6 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha | |||
633 | .name = name_str, \ | 621 | .name = name_str, \ |
634 | .init_chipset = init_chipset_it821x, \ | 622 | .init_chipset = init_chipset_it821x, \ |
635 | .init_hwif = init_hwif_it821x, \ | 623 | .init_hwif = init_hwif_it821x, \ |
636 | .fixup = it821x_fixups, \ | ||
637 | .host_flags = IDE_HFLAG_BOOTABLE, \ | 624 | .host_flags = IDE_HFLAG_BOOTABLE, \ |
638 | .pio_mask = ATA_PIO4, \ | 625 | .pio_mask = ATA_PIO4, \ |
639 | } | 626 | } |
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index ef4a99b99d1f..89d2363a1ebd 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c | |||
@@ -203,14 +203,17 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif) | |||
203 | return ATA_CBL_PATA80; | 203 | return ATA_CBL_PATA80; |
204 | } | 204 | } |
205 | 205 | ||
206 | static int pdcnew_quirkproc(ide_drive_t *drive) | 206 | static void pdcnew_quirkproc(ide_drive_t *drive) |
207 | { | 207 | { |
208 | const char **list, *model = drive->id->model; | 208 | const char **list, *model = drive->id->model; |
209 | 209 | ||
210 | for (list = pdc_quirk_drives; *list != NULL; list++) | 210 | for (list = pdc_quirk_drives; *list != NULL; list++) |
211 | if (strstr(model, *list) != NULL) | 211 | if (strstr(model, *list) != NULL) { |
212 | return 2; | 212 | drive->quirk_list = 2; |
213 | return 0; | 213 | return; |
214 | } | ||
215 | |||
216 | drive->quirk_list = 0; | ||
214 | } | 217 | } |
215 | 218 | ||
216 | static void pdcnew_reset(ide_drive_t *drive) | 219 | static void pdcnew_reset(ide_drive_t *drive) |
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index 67b2781e2213..3a1e081fe390 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c | |||
@@ -176,14 +176,17 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) | |||
176 | outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); | 176 | outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); |
177 | } | 177 | } |
178 | 178 | ||
179 | static int pdc202xx_quirkproc (ide_drive_t *drive) | 179 | static void pdc202xx_quirkproc(ide_drive_t *drive) |
180 | { | 180 | { |
181 | const char **list, *model = drive->id->model; | 181 | const char **list, *model = drive->id->model; |
182 | 182 | ||
183 | for (list = pdc_quirk_drives; *list != NULL; list++) | 183 | for (list = pdc_quirk_drives; *list != NULL; list++) |
184 | if (strstr(model, *list) != NULL) | 184 | if (strstr(model, *list) != NULL) { |
185 | return 2; | 185 | drive->quirk_list = 2; |
186 | return 0; | 186 | return; |
187 | } | ||
188 | |||
189 | drive->quirk_list = 0; | ||
187 | } | 190 | } |
188 | 191 | ||
189 | static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) | 192 | static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) |
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c index fef20bd4aa78..32fdf53379f5 100644 --- a/drivers/ide/pci/sc1200.c +++ b/drivers/ide/pci/sc1200.c | |||
@@ -220,9 +220,9 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
220 | } | 220 | } |
221 | if (mode != -1) { | 221 | if (mode != -1) { |
222 | printk("SC1200: %s: changing (U)DMA mode\n", drive->name); | 222 | printk("SC1200: %s: changing (U)DMA mode\n", drive->name); |
223 | hwif->dma_off_quietly(drive); | 223 | ide_dma_off_quietly(drive); |
224 | if (ide_set_dma_mode(drive, mode) == 0) | 224 | if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma) |
225 | hwif->dma_host_on(drive); | 225 | hwif->dma_host_set(drive, 1); |
226 | return; | 226 | return; |
227 | } | 227 | } |
228 | 228 | ||
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c index e9bd269547bb..877c09bf4829 100644 --- a/drivers/ide/pci/serverworks.c +++ b/drivers/ide/pci/serverworks.c | |||
@@ -164,25 +164,12 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
164 | ultra_timing &= ~(0x0F << (4*unit)); | 164 | ultra_timing &= ~(0x0F << (4*unit)); |
165 | ultra_enable &= ~(0x01 << drive->dn); | 165 | ultra_enable &= ~(0x01 << drive->dn); |
166 | 166 | ||
167 | switch(speed) { | 167 | if (speed >= XFER_UDMA_0) { |
168 | case XFER_MW_DMA_2: | 168 | dma_timing |= dma_modes[2]; |
169 | case XFER_MW_DMA_1: | 169 | ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit)); |
170 | case XFER_MW_DMA_0: | 170 | ultra_enable |= (0x01 << drive->dn); |
171 | dma_timing |= dma_modes[speed - XFER_MW_DMA_0]; | 171 | } else if (speed >= XFER_MW_DMA_0) |
172 | break; | 172 | dma_timing |= dma_modes[speed - XFER_MW_DMA_0]; |
173 | |||
174 | case XFER_UDMA_5: | ||
175 | case XFER_UDMA_4: | ||
176 | case XFER_UDMA_3: | ||
177 | case XFER_UDMA_2: | ||
178 | case XFER_UDMA_1: | ||
179 | case XFER_UDMA_0: | ||
180 | dma_timing |= dma_modes[2]; | ||
181 | ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit)); | ||
182 | ultra_enable |= (0x01 << drive->dn); | ||
183 | default: | ||
184 | break; | ||
185 | } | ||
186 | 173 | ||
187 | pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing); | 174 | pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing); |
188 | pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing); | 175 | pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing); |
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 7e9dade5648d..9e0be7d54980 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -277,21 +277,6 @@ sgiioc4_ide_dma_end(ide_drive_t * drive) | |||
277 | return dma_stat; | 277 | return dma_stat; |
278 | } | 278 | } |
279 | 279 | ||
280 | static int | ||
281 | sgiioc4_ide_dma_on(ide_drive_t * drive) | ||
282 | { | ||
283 | drive->using_dma = 1; | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static void sgiioc4_dma_off_quietly(ide_drive_t *drive) | ||
289 | { | ||
290 | drive->using_dma = 0; | ||
291 | |||
292 | drive->hwif->dma_host_off(drive); | ||
293 | } | ||
294 | |||
295 | static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) | 280 | static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) |
296 | { | 281 | { |
297 | } | 282 | } |
@@ -303,13 +288,10 @@ sgiioc4_ide_dma_test_irq(ide_drive_t * drive) | |||
303 | return sgiioc4_checkirq(HWIF(drive)); | 288 | return sgiioc4_checkirq(HWIF(drive)); |
304 | } | 289 | } |
305 | 290 | ||
306 | static void sgiioc4_dma_host_on(ide_drive_t * drive) | 291 | static void sgiioc4_dma_host_set(ide_drive_t *drive, int on) |
307 | { | ||
308 | } | ||
309 | |||
310 | static void sgiioc4_dma_host_off(ide_drive_t * drive) | ||
311 | { | 292 | { |
312 | sgiioc4_clearirq(drive); | 293 | if (!on) |
294 | sgiioc4_clearirq(drive); | ||
313 | } | 295 | } |
314 | 296 | ||
315 | static void | 297 | static void |
@@ -593,14 +575,11 @@ ide_init_sgiioc4(ide_hwif_t * hwif) | |||
593 | 575 | ||
594 | hwif->mwdma_mask = ATA_MWDMA2_ONLY; | 576 | hwif->mwdma_mask = ATA_MWDMA2_ONLY; |
595 | 577 | ||
578 | hwif->dma_host_set = &sgiioc4_dma_host_set; | ||
596 | hwif->dma_setup = &sgiioc4_ide_dma_setup; | 579 | hwif->dma_setup = &sgiioc4_ide_dma_setup; |
597 | hwif->dma_start = &sgiioc4_ide_dma_start; | 580 | hwif->dma_start = &sgiioc4_ide_dma_start; |
598 | hwif->ide_dma_end = &sgiioc4_ide_dma_end; | 581 | hwif->ide_dma_end = &sgiioc4_ide_dma_end; |
599 | hwif->ide_dma_on = &sgiioc4_ide_dma_on; | ||
600 | hwif->dma_off_quietly = &sgiioc4_dma_off_quietly; | ||
601 | hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; | 582 | hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; |
602 | hwif->dma_host_on = &sgiioc4_dma_host_on; | ||
603 | hwif->dma_host_off = &sgiioc4_dma_host_off; | ||
604 | hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; | 583 | hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; |
605 | hwif->dma_timeout = &ide_dma_timeout; | 584 | hwif->dma_timeout = &ide_dma_timeout; |
606 | } | 585 | } |
@@ -614,6 +593,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
614 | ide_hwif_t *hwif; | 593 | ide_hwif_t *hwif; |
615 | int h; | 594 | int h; |
616 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 595 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
596 | hw_regs_t hw; | ||
617 | 597 | ||
618 | /* | 598 | /* |
619 | * Find an empty HWIF; if none available, return -ENOMEM. | 599 | * Find an empty HWIF; if none available, return -ENOMEM. |
@@ -653,21 +633,16 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
653 | return -ENOMEM; | 633 | return -ENOMEM; |
654 | } | 634 | } |
655 | 635 | ||
656 | if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) { | 636 | /* Initialize the IO registers */ |
657 | hw_regs_t hw; | 637 | memset(&hw, 0, sizeof(hw)); |
658 | 638 | sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); | |
659 | /* Initialize the IO registers */ | 639 | hw.irq = dev->irq; |
660 | memset(&hw, 0, sizeof(hw)); | 640 | hw.chipset = ide_pci; |
661 | sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); | 641 | hw.dev = &dev->dev; |
662 | memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports)); | 642 | ide_init_port_hw(hwif, &hw); |
663 | hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; | ||
664 | } | ||
665 | 643 | ||
666 | hwif->irq = dev->irq; | ||
667 | hwif->chipset = ide_pci; | ||
668 | hwif->pci_dev = dev; | 644 | hwif->pci_dev = dev; |
669 | hwif->channel = 0; /* Single Channel chip */ | 645 | hwif->channel = 0; /* Single Channel chip */ |
670 | hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */ | ||
671 | 646 | ||
672 | /* The IOC4 uses MMIO rather than Port IO. */ | 647 | /* The IOC4 uses MMIO rather than Port IO. */ |
673 | default_hwif_mmiops(hwif); | 648 | default_hwif_mmiops(hwif); |
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index 7b45eaf5afd9..908f37b4e0ee 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c | |||
@@ -713,9 +713,6 @@ static int is_dev_seagate_sata(ide_drive_t *drive) | |||
713 | const char *s = &drive->id->model[0]; | 713 | const char *s = &drive->id->model[0]; |
714 | unsigned len; | 714 | unsigned len; |
715 | 715 | ||
716 | if (!drive->present) | ||
717 | return 0; | ||
718 | |||
719 | len = strnlen(s, sizeof(drive->id->model)); | 716 | len = strnlen(s, sizeof(drive->id->model)); |
720 | 717 | ||
721 | if ((len > 4) && (!memcmp(s, "ST", 2))) { | 718 | if ((len > 4) && (!memcmp(s, "ST", 2))) { |
@@ -730,18 +727,20 @@ static int is_dev_seagate_sata(ide_drive_t *drive) | |||
730 | } | 727 | } |
731 | 728 | ||
732 | /** | 729 | /** |
733 | * siimage_fixup - post probe fixups | 730 | * sil_quirkproc - post probe fixups |
734 | * @hwif: interface to fix up | 731 | * @drive: drive |
735 | * | 732 | * |
736 | * Called after drive probe we use this to decide whether the | 733 | * Called after drive probe we use this to decide whether the |
737 | * Seagate fixup must be applied. This used to be in init_iops but | 734 | * Seagate fixup must be applied. This used to be in init_iops but |
738 | * that can occur before we know what drives are present. | 735 | * that can occur before we know what drives are present. |
739 | */ | 736 | */ |
740 | 737 | ||
741 | static void __devinit siimage_fixup(ide_hwif_t *hwif) | 738 | static void __devinit sil_quirkproc(ide_drive_t *drive) |
742 | { | 739 | { |
740 | ide_hwif_t *hwif = drive->hwif; | ||
741 | |||
743 | /* Try and raise the rqsize */ | 742 | /* Try and raise the rqsize */ |
744 | if (!is_sata(hwif) || !is_dev_seagate_sata(&hwif->drives[0])) | 743 | if (!is_sata(hwif) || !is_dev_seagate_sata(drive)) |
745 | hwif->rqsize = 128; | 744 | hwif->rqsize = 128; |
746 | } | 745 | } |
747 | 746 | ||
@@ -804,6 +803,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif) | |||
804 | 803 | ||
805 | hwif->set_pio_mode = &sil_set_pio_mode; | 804 | hwif->set_pio_mode = &sil_set_pio_mode; |
806 | hwif->set_dma_mode = &sil_set_dma_mode; | 805 | hwif->set_dma_mode = &sil_set_dma_mode; |
806 | hwif->quirkproc = &sil_quirkproc; | ||
807 | 807 | ||
808 | if (sata) { | 808 | if (sata) { |
809 | static int first = 1; | 809 | static int first = 1; |
@@ -842,7 +842,6 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif) | |||
842 | .init_chipset = init_chipset_siimage, \ | 842 | .init_chipset = init_chipset_siimage, \ |
843 | .init_iops = init_iops_siimage, \ | 843 | .init_iops = init_iops_siimage, \ |
844 | .init_hwif = init_hwif_siimage, \ | 844 | .init_hwif = init_hwif_siimage, \ |
845 | .fixup = siimage_fixup, \ | ||
846 | .host_flags = IDE_HFLAG_BOOTABLE, \ | 845 | .host_flags = IDE_HFLAG_BOOTABLE, \ |
847 | .pio_mask = ATA_PIO4, \ | 846 | .pio_mask = ATA_PIO4, \ |
848 | .mwdma_mask = ATA_MWDMA2, \ | 847 | .mwdma_mask = ATA_MWDMA2, \ |
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c index 069f104fdcea..c7a125b66c29 100644 --- a/drivers/ide/pci/sl82c105.c +++ b/drivers/ide/pci/sl82c105.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org | 13 | * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org |
14 | * | 14 | * |
15 | * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> | 15 | * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> |
16 | * Copyright (C) 2007 Bartlomiej Zolnierkiewicz | ||
16 | */ | 17 | */ |
17 | 18 | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
@@ -90,14 +91,8 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
90 | drive->drive_data &= 0xffff0000; | 91 | drive->drive_data &= 0xffff0000; |
91 | drive->drive_data |= drv_ctrl; | 92 | drive->drive_data |= drv_ctrl; |
92 | 93 | ||
93 | if (!drive->using_dma) { | 94 | pci_write_config_word(dev, reg, drv_ctrl); |
94 | /* | 95 | pci_read_config_word (dev, reg, &drv_ctrl); |
95 | * If we are actually using MW DMA, then we can not | ||
96 | * reprogram the interface drive control register. | ||
97 | */ | ||
98 | pci_write_config_word(dev, reg, drv_ctrl); | ||
99 | pci_read_config_word (dev, reg, &drv_ctrl); | ||
100 | } | ||
101 | 96 | ||
102 | printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name, | 97 | printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name, |
103 | ide_xfer_verbose(pio + XFER_PIO_0), | 98 | ide_xfer_verbose(pio + XFER_PIO_0), |
@@ -123,17 +118,6 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
123 | */ | 118 | */ |
124 | drive->drive_data &= 0x0000ffff; | 119 | drive->drive_data &= 0x0000ffff; |
125 | drive->drive_data |= (unsigned long)drv_ctrl << 16; | 120 | drive->drive_data |= (unsigned long)drv_ctrl << 16; |
126 | |||
127 | /* | ||
128 | * If we are already using DMA, we just reprogram | ||
129 | * the drive control register. | ||
130 | */ | ||
131 | if (drive->using_dma) { | ||
132 | struct pci_dev *dev = HWIF(drive)->pci_dev; | ||
133 | int reg = 0x44 + drive->dn * 4; | ||
134 | |||
135 | pci_write_config_word(dev, reg, drv_ctrl); | ||
136 | } | ||
137 | } | 121 | } |
138 | 122 | ||
139 | /* | 123 | /* |
@@ -201,6 +185,11 @@ static void sl82c105_dma_start(ide_drive_t *drive) | |||
201 | { | 185 | { |
202 | ide_hwif_t *hwif = HWIF(drive); | 186 | ide_hwif_t *hwif = HWIF(drive); |
203 | struct pci_dev *dev = hwif->pci_dev; | 187 | struct pci_dev *dev = hwif->pci_dev; |
188 | int reg = 0x44 + drive->dn * 4; | ||
189 | |||
190 | DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); | ||
191 | |||
192 | pci_write_config_word(dev, reg, drive->drive_data >> 16); | ||
204 | 193 | ||
205 | sl82c105_reset_host(dev); | 194 | sl82c105_reset_host(dev); |
206 | ide_dma_start(drive); | 195 | ide_dma_start(drive); |
@@ -214,64 +203,24 @@ static void sl82c105_dma_timeout(ide_drive_t *drive) | |||
214 | ide_dma_timeout(drive); | 203 | ide_dma_timeout(drive); |
215 | } | 204 | } |
216 | 205 | ||
217 | static int sl82c105_ide_dma_on(ide_drive_t *drive) | 206 | static int sl82c105_dma_end(ide_drive_t *drive) |
218 | { | ||
219 | struct pci_dev *dev = HWIF(drive)->pci_dev; | ||
220 | int rc, reg = 0x44 + drive->dn * 4; | ||
221 | |||
222 | DBG(("sl82c105_ide_dma_on(drive:%s)\n", drive->name)); | ||
223 | |||
224 | rc = __ide_dma_on(drive); | ||
225 | if (rc == 0) { | ||
226 | pci_write_config_word(dev, reg, drive->drive_data >> 16); | ||
227 | |||
228 | printk(KERN_INFO "%s: DMA enabled\n", drive->name); | ||
229 | } | ||
230 | return rc; | ||
231 | } | ||
232 | |||
233 | static void sl82c105_dma_off_quietly(ide_drive_t *drive) | ||
234 | { | 207 | { |
235 | struct pci_dev *dev = HWIF(drive)->pci_dev; | 208 | struct pci_dev *dev = HWIF(drive)->pci_dev; |
236 | int reg = 0x44 + drive->dn * 4; | 209 | int reg = 0x44 + drive->dn * 4; |
210 | int ret; | ||
237 | 211 | ||
238 | DBG(("sl82c105_dma_off_quietly(drive:%s)\n", drive->name)); | 212 | DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name)); |
239 | 213 | ||
240 | pci_write_config_word(dev, reg, drive->drive_data); | 214 | ret = __ide_dma_end(drive); |
241 | 215 | ||
242 | ide_dma_off_quietly(drive); | 216 | pci_write_config_word(dev, reg, drive->drive_data); |
243 | } | ||
244 | 217 | ||
245 | /* | 218 | return ret; |
246 | * Ok, that is nasty, but we must make sure the DMA timings | ||
247 | * won't be used for a PIO access. The solution here is | ||
248 | * to make sure the 16 bits mode is diabled on the channel | ||
249 | * when DMA is enabled, thus causing the chip to use PIO0 | ||
250 | * timings for those operations. | ||
251 | */ | ||
252 | static void sl82c105_selectproc(ide_drive_t *drive) | ||
253 | { | ||
254 | ide_hwif_t *hwif = HWIF(drive); | ||
255 | struct pci_dev *dev = hwif->pci_dev; | ||
256 | u32 val, old, mask; | ||
257 | |||
258 | //DBG(("sl82c105_selectproc(drive:%s)\n", drive->name)); | ||
259 | |||
260 | mask = hwif->channel ? CTRL_P1F16 : CTRL_P0F16; | ||
261 | old = val = (u32)pci_get_drvdata(dev); | ||
262 | if (drive->using_dma) | ||
263 | val &= ~mask; | ||
264 | else | ||
265 | val |= mask; | ||
266 | if (old != val) { | ||
267 | pci_write_config_dword(dev, 0x40, val); | ||
268 | pci_set_drvdata(dev, (void *)val); | ||
269 | } | ||
270 | } | 219 | } |
271 | 220 | ||
272 | /* | 221 | /* |
273 | * ATA reset will clear the 16 bits mode in the control | 222 | * ATA reset will clear the 16 bits mode in the control |
274 | * register, we need to update our cache | 223 | * register, we need to reprogram it |
275 | */ | 224 | */ |
276 | static void sl82c105_resetproc(ide_drive_t *drive) | 225 | static void sl82c105_resetproc(ide_drive_t *drive) |
277 | { | 226 | { |
@@ -281,7 +230,8 @@ static void sl82c105_resetproc(ide_drive_t *drive) | |||
281 | DBG(("sl82c105_resetproc(drive:%s)\n", drive->name)); | 230 | DBG(("sl82c105_resetproc(drive:%s)\n", drive->name)); |
282 | 231 | ||
283 | pci_read_config_dword(dev, 0x40, &val); | 232 | pci_read_config_dword(dev, 0x40, &val); |
284 | pci_set_drvdata(dev, (void *)val); | 233 | val |= (CTRL_P1F16 | CTRL_P0F16); |
234 | pci_write_config_dword(dev, 0x40, val); | ||
285 | } | 235 | } |
286 | 236 | ||
287 | /* | 237 | /* |
@@ -334,7 +284,6 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c | |||
334 | pci_read_config_dword(dev, 0x40, &val); | 284 | pci_read_config_dword(dev, 0x40, &val); |
335 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; | 285 | val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; |
336 | pci_write_config_dword(dev, 0x40, val); | 286 | pci_write_config_dword(dev, 0x40, val); |
337 | pci_set_drvdata(dev, (void *)val); | ||
338 | 287 | ||
339 | return dev->irq; | 288 | return dev->irq; |
340 | } | 289 | } |
@@ -350,7 +299,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) | |||
350 | 299 | ||
351 | hwif->set_pio_mode = &sl82c105_set_pio_mode; | 300 | hwif->set_pio_mode = &sl82c105_set_pio_mode; |
352 | hwif->set_dma_mode = &sl82c105_set_dma_mode; | 301 | hwif->set_dma_mode = &sl82c105_set_dma_mode; |
353 | hwif->selectproc = &sl82c105_selectproc; | ||
354 | hwif->resetproc = &sl82c105_resetproc; | 302 | hwif->resetproc = &sl82c105_resetproc; |
355 | 303 | ||
356 | if (!hwif->dma_base) | 304 | if (!hwif->dma_base) |
@@ -369,10 +317,9 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif) | |||
369 | 317 | ||
370 | hwif->mwdma_mask = ATA_MWDMA2; | 318 | hwif->mwdma_mask = ATA_MWDMA2; |
371 | 319 | ||
372 | hwif->ide_dma_on = &sl82c105_ide_dma_on; | ||
373 | hwif->dma_off_quietly = &sl82c105_dma_off_quietly; | ||
374 | hwif->dma_lost_irq = &sl82c105_dma_lost_irq; | 320 | hwif->dma_lost_irq = &sl82c105_dma_lost_irq; |
375 | hwif->dma_start = &sl82c105_dma_start; | 321 | hwif->dma_start = &sl82c105_dma_start; |
322 | hwif->ide_dma_end = &sl82c105_dma_end; | ||
376 | hwif->dma_timeout = &sl82c105_dma_timeout; | 323 | hwif->dma_timeout = &sl82c105_dma_timeout; |
377 | 324 | ||
378 | if (hwif->mate) | 325 | if (hwif->mate) |
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c index 0151d7fdfb8a..04cd893e1ab0 100644 --- a/drivers/ide/pci/trm290.c +++ b/drivers/ide/pci/trm290.c | |||
@@ -241,11 +241,7 @@ static int trm290_ide_dma_test_irq (ide_drive_t *drive) | |||
241 | return (status == 0x00ff); | 241 | return (status == 0x00ff); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void trm290_dma_host_on(ide_drive_t *drive) | 244 | static void trm290_dma_host_set(ide_drive_t *drive, int on) |
245 | { | ||
246 | } | ||
247 | |||
248 | static void trm290_dma_host_off(ide_drive_t *drive) | ||
249 | { | 245 | { |
250 | } | 246 | } |
251 | 247 | ||
@@ -289,8 +285,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif) | |||
289 | 285 | ||
290 | ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3); | 286 | ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3); |
291 | 287 | ||
292 | hwif->dma_host_off = &trm290_dma_host_off; | 288 | hwif->dma_host_set = &trm290_dma_host_set; |
293 | hwif->dma_host_on = &trm290_dma_host_on; | ||
294 | hwif->dma_setup = &trm290_dma_setup; | 289 | hwif->dma_setup = &trm290_dma_setup; |
295 | hwif->dma_exec_cmd = &trm290_dma_exec_cmd; | 290 | hwif->dma_exec_cmd = &trm290_dma_exec_cmd; |
296 | hwif->dma_start = &trm290_dma_start; | 291 | hwif->dma_start = &trm290_dma_start; |
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile new file mode 100644 index 000000000000..65af5848b28c --- /dev/null +++ b/drivers/ide/ppc/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | |||
2 | obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o | ||
3 | obj-$(CONFIG_BLK_DEV_MPC8xx_IDE) += mpc8xx.o | ||
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c index 5f0da35ab5ad..3fd5d45b5e0e 100644 --- a/drivers/ide/ppc/mpc8xx.c +++ b/drivers/ide/ppc/mpc8xx.c | |||
@@ -838,3 +838,21 @@ void m8xx_ide_init(void) | |||
838 | ppc_ide_md.default_io_base = m8xx_ide_default_io_base; | 838 | ppc_ide_md.default_io_base = m8xx_ide_default_io_base; |
839 | ppc_ide_md.ide_init_hwif = m8xx_ide_init_hwif_ports; | 839 | ppc_ide_md.ide_init_hwif = m8xx_ide_init_hwif_ports; |
840 | } | 840 | } |
841 | |||
842 | static int __init mpc8xx_ide_probe(void) | ||
843 | { | ||
844 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | ||
845 | |||
846 | #ifdef IDE0_BASE_OFFSET | ||
847 | idx[0] = 0; | ||
848 | #ifdef IDE1_BASE_OFFSET | ||
849 | idx[1] = 1; | ||
850 | #endif | ||
851 | #endif | ||
852 | |||
853 | ide_device_add(idx); | ||
854 | |||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | module_init(mpc8xx_ide_probe); | ||
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index 3dce80092fff..736d12c8e68a 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c | |||
@@ -1012,12 +1012,11 @@ pmac_ide_do_resume(ide_hwif_t *hwif) | |||
1012 | * rare machines unfortunately, but it's better this way. | 1012 | * rare machines unfortunately, but it's better this way. |
1013 | */ | 1013 | */ |
1014 | static int | 1014 | static int |
1015 | pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) | 1015 | pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw) |
1016 | { | 1016 | { |
1017 | struct device_node *np = pmif->node; | 1017 | struct device_node *np = pmif->node; |
1018 | const int *bidp; | 1018 | const int *bidp; |
1019 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; | 1019 | u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; |
1020 | hw_regs_t hw; | ||
1021 | 1020 | ||
1022 | pmif->cable_80 = 0; | 1021 | pmif->cable_80 = 0; |
1023 | pmif->broken_dma = pmif->broken_dma_warn = 0; | 1022 | pmif->broken_dma = pmif->broken_dma_warn = 0; |
@@ -1103,11 +1102,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) | |||
1103 | /* Tell common code _not_ to mess with resources */ | 1102 | /* Tell common code _not_ to mess with resources */ |
1104 | hwif->mmio = 1; | 1103 | hwif->mmio = 1; |
1105 | hwif->hwif_data = pmif; | 1104 | hwif->hwif_data = pmif; |
1106 | memset(&hw, 0, sizeof(hw)); | 1105 | hw->chipset = ide_pmac; |
1107 | pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, &hwif->irq); | 1106 | ide_init_port_hw(hwif, hw); |
1108 | memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports)); | 1107 | hwif->noprobe = pmif->mediabay; |
1109 | hwif->chipset = ide_pmac; | ||
1110 | hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay; | ||
1111 | hwif->hold = pmif->mediabay; | 1108 | hwif->hold = pmif->mediabay; |
1112 | hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; | 1109 | hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; |
1113 | hwif->drives[0].unmask = 1; | 1110 | hwif->drives[0].unmask = 1; |
@@ -1136,8 +1133,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) | |||
1136 | hwif->noprobe = 0; | 1133 | hwif->noprobe = 0; |
1137 | #endif /* CONFIG_PMAC_MEDIABAY */ | 1134 | #endif /* CONFIG_PMAC_MEDIABAY */ |
1138 | 1135 | ||
1139 | hwif->sg_max_nents = MAX_DCMDS; | ||
1140 | |||
1141 | #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC | 1136 | #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC |
1142 | /* has a DBDMA controller channel */ | 1137 | /* has a DBDMA controller channel */ |
1143 | if (pmif->dma_regs) | 1138 | if (pmif->dma_regs) |
@@ -1163,6 +1158,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1163 | ide_hwif_t *hwif; | 1158 | ide_hwif_t *hwif; |
1164 | pmac_ide_hwif_t *pmif; | 1159 | pmac_ide_hwif_t *pmif; |
1165 | int i, rc; | 1160 | int i, rc; |
1161 | hw_regs_t hw; | ||
1166 | 1162 | ||
1167 | i = 0; | 1163 | i = 0; |
1168 | while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 | 1164 | while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0 |
@@ -1205,7 +1201,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1205 | regbase = (unsigned long) base; | 1201 | regbase = (unsigned long) base; |
1206 | 1202 | ||
1207 | hwif->pci_dev = mdev->bus->pdev; | 1203 | hwif->pci_dev = mdev->bus->pdev; |
1208 | hwif->gendev.parent = &mdev->ofdev.dev; | ||
1209 | 1204 | ||
1210 | pmif->mdev = mdev; | 1205 | pmif->mdev = mdev; |
1211 | pmif->node = mdev->ofdev.node; | 1206 | pmif->node = mdev->ofdev.node; |
@@ -1223,7 +1218,12 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) | |||
1223 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ | 1218 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ |
1224 | dev_set_drvdata(&mdev->ofdev.dev, hwif); | 1219 | dev_set_drvdata(&mdev->ofdev.dev, hwif); |
1225 | 1220 | ||
1226 | rc = pmac_ide_setup_device(pmif, hwif); | 1221 | memset(&hw, 0, sizeof(hw)); |
1222 | pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL); | ||
1223 | hw.irq = irq; | ||
1224 | hw.dev = &mdev->ofdev.dev; | ||
1225 | |||
1226 | rc = pmac_ide_setup_device(pmif, hwif, &hw); | ||
1227 | if (rc != 0) { | 1227 | if (rc != 0) { |
1228 | /* The inteface is released to the common IDE layer */ | 1228 | /* The inteface is released to the common IDE layer */ |
1229 | dev_set_drvdata(&mdev->ofdev.dev, NULL); | 1229 | dev_set_drvdata(&mdev->ofdev.dev, NULL); |
@@ -1282,6 +1282,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1282 | void __iomem *base; | 1282 | void __iomem *base; |
1283 | unsigned long rbase, rlen; | 1283 | unsigned long rbase, rlen; |
1284 | int i, rc; | 1284 | int i, rc; |
1285 | hw_regs_t hw; | ||
1285 | 1286 | ||
1286 | np = pci_device_to_OF_node(pdev); | 1287 | np = pci_device_to_OF_node(pdev); |
1287 | if (np == NULL) { | 1288 | if (np == NULL) { |
@@ -1315,7 +1316,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1315 | } | 1316 | } |
1316 | 1317 | ||
1317 | hwif->pci_dev = pdev; | 1318 | hwif->pci_dev = pdev; |
1318 | hwif->gendev.parent = &pdev->dev; | ||
1319 | pmif->mdev = NULL; | 1319 | pmif->mdev = NULL; |
1320 | pmif->node = np; | 1320 | pmif->node = np; |
1321 | 1321 | ||
@@ -1332,7 +1332,12 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1332 | 1332 | ||
1333 | pci_set_drvdata(pdev, hwif); | 1333 | pci_set_drvdata(pdev, hwif); |
1334 | 1334 | ||
1335 | rc = pmac_ide_setup_device(pmif, hwif); | 1335 | memset(&hw, 0, sizeof(hw)); |
1336 | pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL); | ||
1337 | hw.irq = pdev->irq; | ||
1338 | hw.dev = &pdev->dev; | ||
1339 | |||
1340 | rc = pmac_ide_setup_device(pmif, hwif, &hw); | ||
1336 | if (rc != 0) { | 1341 | if (rc != 0) { |
1337 | /* The inteface is released to the common IDE layer */ | 1342 | /* The inteface is released to the common IDE layer */ |
1338 | pci_set_drvdata(pdev, NULL); | 1343 | pci_set_drvdata(pdev, NULL); |
@@ -1698,11 +1703,7 @@ pmac_ide_dma_test_irq (ide_drive_t *drive) | |||
1698 | return 1; | 1703 | return 1; |
1699 | } | 1704 | } |
1700 | 1705 | ||
1701 | static void pmac_ide_dma_host_off(ide_drive_t *drive) | 1706 | static void pmac_ide_dma_host_set(ide_drive_t *drive, int on) |
1702 | { | ||
1703 | } | ||
1704 | |||
1705 | static void pmac_ide_dma_host_on(ide_drive_t *drive) | ||
1706 | { | 1707 | { |
1707 | } | 1708 | } |
1708 | 1709 | ||
@@ -1748,15 +1749,14 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) | |||
1748 | return; | 1749 | return; |
1749 | } | 1750 | } |
1750 | 1751 | ||
1751 | hwif->dma_off_quietly = &ide_dma_off_quietly; | 1752 | hwif->sg_max_nents = MAX_DCMDS; |
1752 | hwif->ide_dma_on = &__ide_dma_on; | 1753 | |
1754 | hwif->dma_host_set = &pmac_ide_dma_host_set; | ||
1753 | hwif->dma_setup = &pmac_ide_dma_setup; | 1755 | hwif->dma_setup = &pmac_ide_dma_setup; |
1754 | hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd; | 1756 | hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd; |
1755 | hwif->dma_start = &pmac_ide_dma_start; | 1757 | hwif->dma_start = &pmac_ide_dma_start; |
1756 | hwif->ide_dma_end = &pmac_ide_dma_end; | 1758 | hwif->ide_dma_end = &pmac_ide_dma_end; |
1757 | hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq; | 1759 | hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq; |
1758 | hwif->dma_host_off = &pmac_ide_dma_host_off; | ||
1759 | hwif->dma_host_on = &pmac_ide_dma_host_on; | ||
1760 | hwif->dma_timeout = &ide_dma_timeout; | 1760 | hwif->dma_timeout = &ide_dma_timeout; |
1761 | hwif->dma_lost_irq = &pmac_ide_dma_lost_irq; | 1761 | hwif->dma_lost_irq = &pmac_ide_dma_lost_irq; |
1762 | 1762 | ||
@@ -1786,3 +1786,5 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) | |||
1786 | } | 1786 | } |
1787 | 1787 | ||
1788 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ | 1788 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ |
1789 | |||
1790 | module_init(pmac_ide_probe); | ||
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index d2cd5a3d38f8..676c66e72881 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c | |||
@@ -165,13 +165,17 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_ | |||
165 | 165 | ||
166 | dma_base = pci_resource_start(dev, baridx); | 166 | dma_base = pci_resource_start(dev, baridx); |
167 | 167 | ||
168 | if (dma_base == 0) | 168 | if (dma_base == 0) { |
169 | printk(KERN_ERR "%s: DMA base is invalid\n", d->name); | 169 | printk(KERN_ERR "%s: DMA base is invalid\n", d->name); |
170 | return 0; | ||
171 | } | ||
170 | } | 172 | } |
171 | 173 | ||
172 | if ((d->host_flags & IDE_HFLAG_CS5520) == 0 && dma_base) { | 174 | if (hwif->channel) |
175 | dma_base += 8; | ||
176 | |||
177 | if ((d->host_flags & IDE_HFLAG_CS5520) == 0) { | ||
173 | u8 simplex_stat = 0; | 178 | u8 simplex_stat = 0; |
174 | dma_base += hwif->channel ? 8 : 0; | ||
175 | 179 | ||
176 | switch(dev->device) { | 180 | switch(dev->device) { |
177 | case PCI_DEVICE_ID_AL_M5219: | 181 | case PCI_DEVICE_ID_AL_M5219: |
@@ -359,6 +363,8 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port | |||
359 | unsigned long ctl = 0, base = 0; | 363 | unsigned long ctl = 0, base = 0; |
360 | ide_hwif_t *hwif; | 364 | ide_hwif_t *hwif; |
361 | u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0; | 365 | u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0; |
366 | u8 oldnoprobe = 0; | ||
367 | struct hw_regs_s hw; | ||
362 | 368 | ||
363 | if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { | 369 | if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { |
364 | /* Possibly we should fail if these checks report true */ | 370 | /* Possibly we should fail if these checks report true */ |
@@ -381,26 +387,25 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port | |||
381 | } | 387 | } |
382 | if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL) | 388 | if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL) |
383 | return NULL; /* no room in ide_hwifs[] */ | 389 | return NULL; /* no room in ide_hwifs[] */ |
384 | if (hwif->io_ports[IDE_DATA_OFFSET] != base || | 390 | |
385 | hwif->io_ports[IDE_CONTROL_OFFSET] != (ctl | 2)) { | 391 | memset(&hw, 0, sizeof(hw)); |
386 | hw_regs_t hw; | 392 | hw.irq = hwif->irq ? hwif->irq : irq; |
387 | 393 | hw.dev = &dev->dev; | |
388 | memset(&hw, 0, sizeof(hw)); | 394 | hw.chipset = d->chipset ? d->chipset : ide_pci; |
389 | #ifndef CONFIG_IDE_ARCH_OBSOLETE_INIT | 395 | ide_std_init_ports(&hw, base, ctl | 2); |
390 | ide_std_init_ports(&hw, base, ctl | 2); | 396 | |
391 | #else | 397 | if (hwif->io_ports[IDE_DATA_OFFSET] == base && |
392 | ide_init_hwif_ports(&hw, base, ctl | 2, NULL); | 398 | hwif->io_ports[IDE_CONTROL_OFFSET] == (ctl | 2)) |
393 | #endif | 399 | oldnoprobe = hwif->noprobe; |
394 | memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports)); | 400 | |
395 | hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; | 401 | ide_init_port_hw(hwif, &hw); |
396 | } | 402 | |
397 | hwif->chipset = d->chipset ? d->chipset : ide_pci; | 403 | hwif->noprobe = oldnoprobe; |
404 | |||
398 | hwif->pci_dev = dev; | 405 | hwif->pci_dev = dev; |
399 | hwif->cds = d; | 406 | hwif->cds = d; |
400 | hwif->channel = port; | 407 | hwif->channel = port; |
401 | 408 | ||
402 | if (!hwif->irq) | ||
403 | hwif->irq = irq; | ||
404 | if (mate) { | 409 | if (mate) { |
405 | hwif->mate = mate; | 410 | hwif->mate = mate; |
406 | mate->mate = hwif; | 411 | mate->mate = hwif; |
@@ -535,12 +540,8 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int | |||
535 | if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL) | 540 | if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL) |
536 | continue; | 541 | continue; |
537 | 542 | ||
538 | /* setup proper ancestral information */ | ||
539 | hwif->gendev.parent = &dev->dev; | ||
540 | |||
541 | *(idx + port) = hwif->index; | 543 | *(idx + port) = hwif->index; |
542 | 544 | ||
543 | |||
544 | if (d->init_iops) | 545 | if (d->init_iops) |
545 | d->init_iops(hwif); | 546 | d->init_iops(hwif); |
546 | 547 | ||
@@ -551,8 +552,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int | |||
551 | (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) | 552 | (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) |
552 | hwif->irq = port ? 15 : 14; | 553 | hwif->irq = port ? 15 : 14; |
553 | 554 | ||
554 | hwif->fixup = d->fixup; | ||
555 | |||
556 | hwif->host_flags = d->host_flags; | 555 | hwif->host_flags = d->host_flags; |
557 | hwif->pio_mask = d->pio_mask; | 556 | hwif->pio_mask = d->pio_mask; |
558 | 557 | ||
@@ -699,105 +698,3 @@ out: | |||
699 | } | 698 | } |
700 | 699 | ||
701 | EXPORT_SYMBOL_GPL(ide_setup_pci_devices); | 700 | EXPORT_SYMBOL_GPL(ide_setup_pci_devices); |
702 | |||
703 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER | ||
704 | /* | ||
705 | * Module interfaces | ||
706 | */ | ||
707 | |||
708 | static int pre_init = 1; /* Before first ordered IDE scan */ | ||
709 | static LIST_HEAD(ide_pci_drivers); | ||
710 | |||
711 | /* | ||
712 | * __ide_pci_register_driver - attach IDE driver | ||
713 | * @driver: pci driver | ||
714 | * @module: owner module of the driver | ||
715 | * | ||
716 | * Registers a driver with the IDE layer. The IDE layer arranges that | ||
717 | * boot time setup is done in the expected device order and then | ||
718 | * hands the controllers off to the core PCI code to do the rest of | ||
719 | * the work. | ||
720 | * | ||
721 | * Returns are the same as for pci_register_driver | ||
722 | */ | ||
723 | |||
724 | int __ide_pci_register_driver(struct pci_driver *driver, struct module *module, | ||
725 | const char *mod_name) | ||
726 | { | ||
727 | if (!pre_init) | ||
728 | return __pci_register_driver(driver, module, mod_name); | ||
729 | driver->driver.owner = module; | ||
730 | list_add_tail(&driver->node, &ide_pci_drivers); | ||
731 | return 0; | ||
732 | } | ||
733 | EXPORT_SYMBOL_GPL(__ide_pci_register_driver); | ||
734 | |||
735 | /** | ||
736 | * ide_scan_pcidev - find an IDE driver for a device | ||
737 | * @dev: PCI device to check | ||
738 | * | ||
739 | * Look for an IDE driver to handle the device we are considering. | ||
740 | * This is only used during boot up to get the ordering correct. After | ||
741 | * boot up the pci layer takes over the job. | ||
742 | */ | ||
743 | |||
744 | static int __init ide_scan_pcidev(struct pci_dev *dev) | ||
745 | { | ||
746 | struct list_head *l; | ||
747 | struct pci_driver *d; | ||
748 | |||
749 | list_for_each(l, &ide_pci_drivers) { | ||
750 | d = list_entry(l, struct pci_driver, node); | ||
751 | if (d->id_table) { | ||
752 | const struct pci_device_id *id = | ||
753 | pci_match_id(d->id_table, dev); | ||
754 | |||
755 | if (id != NULL && d->probe(dev, id) >= 0) { | ||
756 | dev->driver = d; | ||
757 | pci_dev_get(dev); | ||
758 | return 1; | ||
759 | } | ||
760 | } | ||
761 | } | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * ide_scan_pcibus - perform the initial IDE driver scan | ||
767 | * @scan_direction: set for reverse order scanning | ||
768 | * | ||
769 | * Perform the initial bus rather than driver ordered scan of the | ||
770 | * PCI drivers. After this all IDE pci handling becomes standard | ||
771 | * module ordering not traditionally ordered. | ||
772 | */ | ||
773 | |||
774 | void __init ide_scan_pcibus (int scan_direction) | ||
775 | { | ||
776 | struct pci_dev *dev = NULL; | ||
777 | struct pci_driver *d; | ||
778 | struct list_head *l, *n; | ||
779 | |||
780 | pre_init = 0; | ||
781 | if (!scan_direction) | ||
782 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev))) | ||
783 | ide_scan_pcidev(dev); | ||
784 | else | ||
785 | while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, | ||
786 | dev))) | ||
787 | ide_scan_pcidev(dev); | ||
788 | |||
789 | /* | ||
790 | * Hand the drivers over to the PCI layer now we | ||
791 | * are post init. | ||
792 | */ | ||
793 | |||
794 | list_for_each_safe(l, n, &ide_pci_drivers) { | ||
795 | list_del(l); | ||
796 | d = list_entry(l, struct pci_driver, node); | ||
797 | if (__pci_register_driver(d, d->driver.owner, | ||
798 | d->driver.mod_name)) | ||
799 | printk(KERN_ERR "%s: failed to register %s driver\n", | ||
800 | __FUNCTION__, d->driver.mod_name); | ||
801 | } | ||
802 | } | ||
803 | #endif | ||
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 48d647abea46..eaba4a9b231e 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c | |||
@@ -563,7 +563,8 @@ static void media_bay_step(int i) | |||
563 | ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL); | 563 | ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL); |
564 | hw.irq = bay->cd_irq; | 564 | hw.irq = bay->cd_irq; |
565 | hw.chipset = ide_pmac; | 565 | hw.chipset = ide_pmac; |
566 | bay->cd_index = ide_register_hw(&hw, NULL, 0, NULL); | 566 | bay->cd_index = |
567 | ide_register_hw(&hw, NULL, NULL); | ||
567 | pmu_resume(); | 568 | pmu_resume(); |
568 | } | 569 | } |
569 | if (bay->cd_index == -1) { | 570 | if (bay->cd_index == -1) { |
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 5452da1bb1a5..b66da74caa55 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c | |||
@@ -47,12 +47,10 @@ | |||
47 | 47 | ||
48 | #define LOG_TEMP 0 /* continously log temperature */ | 48 | #define LOG_TEMP 0 /* continously log temperature */ |
49 | 49 | ||
50 | #define I2C_DRIVERID_G4FAN 0x9001 /* fixme */ | ||
51 | |||
52 | static int do_probe( struct i2c_adapter *adapter, int addr, int kind); | 50 | static int do_probe( struct i2c_adapter *adapter, int addr, int kind); |
53 | 51 | ||
54 | /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */ | 52 | /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */ |
55 | static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, | 53 | static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, |
56 | 0x4c, 0x4d, 0x4e, 0x4f, | 54 | 0x4c, 0x4d, 0x4e, 0x4f, |
57 | 0x2c, 0x2d, 0x2e, 0x2f, | 55 | 0x2c, 0x2d, 0x2e, 0x2f, |
58 | I2C_CLIENT_END }; | 56 | I2C_CLIENT_END }; |
@@ -357,7 +355,6 @@ static struct i2c_driver g4fan_driver = { | |||
357 | .driver = { | 355 | .driver = { |
358 | .name = "therm_windtunnel", | 356 | .name = "therm_windtunnel", |
359 | }, | 357 | }, |
360 | .id = I2C_DRIVERID_G4FAN, | ||
361 | .attach_adapter = do_attach, | 358 | .attach_adapter = do_attach, |
362 | .detach_client = do_detach, | 359 | .detach_client = do_detach, |
363 | }; | 360 | }; |
diff --git a/drivers/media/video/dpc7146.c b/drivers/media/video/dpc7146.c index 255dae303708..566e479e2629 100644 --- a/drivers/media/video/dpc7146.c +++ b/drivers/media/video/dpc7146.c | |||
@@ -87,11 +87,24 @@ struct dpc | |||
87 | int cur_input; /* current input */ | 87 | int cur_input; /* current input */ |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static int dpc_check_clients(struct device *dev, void *data) | ||
91 | { | ||
92 | struct dpc* dpc = data; | ||
93 | struct i2c_client *client = i2c_verify_client(dev); | ||
94 | |||
95 | if( !client ) | ||
96 | return 0; | ||
97 | |||
98 | if( I2C_SAA7111A == client->addr ) | ||
99 | dpc->saa7111a = client; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
90 | /* fixme: add vbi stuff here */ | 104 | /* fixme: add vbi stuff here */ |
91 | static int dpc_probe(struct saa7146_dev* dev) | 105 | static int dpc_probe(struct saa7146_dev* dev) |
92 | { | 106 | { |
93 | struct dpc* dpc = NULL; | 107 | struct dpc* dpc = NULL; |
94 | struct i2c_client *client; | ||
95 | 108 | ||
96 | dpc = kzalloc(sizeof(struct dpc), GFP_KERNEL); | 109 | dpc = kzalloc(sizeof(struct dpc), GFP_KERNEL); |
97 | if( NULL == dpc ) { | 110 | if( NULL == dpc ) { |
@@ -115,9 +128,7 @@ static int dpc_probe(struct saa7146_dev* dev) | |||
115 | } | 128 | } |
116 | 129 | ||
117 | /* loop through all i2c-devices on the bus and look who is there */ | 130 | /* loop through all i2c-devices on the bus and look who is there */ |
118 | list_for_each_entry(client, &dpc->i2c_adapter.clients, list) | 131 | device_for_each_child(&dpc->i2c_adapter.dev, dpc, dpc_check_clients); |
119 | if( I2C_SAA7111A == client->addr ) | ||
120 | dpc->saa7111a = client; | ||
121 | 132 | ||
122 | /* check if all devices are present */ | 133 | /* check if all devices are present */ |
123 | if( 0 == dpc->saa7111a ) { | 134 | if( 0 == dpc->saa7111a ) { |
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c index b6cd21e6dab9..4895540be195 100644 --- a/drivers/media/video/ks0127.c +++ b/drivers/media/video/ks0127.c | |||
@@ -764,7 +764,6 @@ static struct i2c_client ks0127_client_tmpl = | |||
764 | .addr = 0, | 764 | .addr = 0, |
765 | .adapter = NULL, | 765 | .adapter = NULL, |
766 | .driver = &i2c_driver_ks0127, | 766 | .driver = &i2c_driver_ks0127, |
767 | .usage_count = 0 | ||
768 | }; | 767 | }; |
769 | 768 | ||
770 | static int ks0127_found_proc(struct i2c_adapter *adapter, int addr, int kind) | 769 | static int ks0127_found_proc(struct i2c_adapter *adapter, int addr, int kind) |
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 98ad3092a079..add6d0d680be 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c | |||
@@ -149,10 +149,33 @@ struct mxb | |||
149 | 149 | ||
150 | static struct saa7146_extension extension; | 150 | static struct saa7146_extension extension; |
151 | 151 | ||
152 | static int mxb_check_clients(struct device *dev, void *data) | ||
153 | { | ||
154 | struct mxb* mxb = data; | ||
155 | struct i2c_client *client = i2c_verify_client(dev); | ||
156 | |||
157 | if( !client ) | ||
158 | return 0; | ||
159 | |||
160 | if( I2C_ADDR_TEA6420_1 == client->addr ) | ||
161 | mxb->tea6420_1 = client; | ||
162 | if( I2C_ADDR_TEA6420_2 == client->addr ) | ||
163 | mxb->tea6420_2 = client; | ||
164 | if( I2C_TEA6415C_2 == client->addr ) | ||
165 | mxb->tea6415c = client; | ||
166 | if( I2C_ADDR_TDA9840 == client->addr ) | ||
167 | mxb->tda9840 = client; | ||
168 | if( I2C_SAA7111 == client->addr ) | ||
169 | mxb->saa7111a = client; | ||
170 | if( 0x60 == client->addr ) | ||
171 | mxb->tuner = client; | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
152 | static int mxb_probe(struct saa7146_dev* dev) | 176 | static int mxb_probe(struct saa7146_dev* dev) |
153 | { | 177 | { |
154 | struct mxb* mxb = NULL; | 178 | struct mxb* mxb = NULL; |
155 | struct i2c_client *client; | ||
156 | int result; | 179 | int result; |
157 | 180 | ||
158 | if ((result = request_module("saa7111")) < 0) { | 181 | if ((result = request_module("saa7111")) < 0) { |
@@ -195,20 +218,7 @@ static int mxb_probe(struct saa7146_dev* dev) | |||
195 | } | 218 | } |
196 | 219 | ||
197 | /* loop through all i2c-devices on the bus and look who is there */ | 220 | /* loop through all i2c-devices on the bus and look who is there */ |
198 | list_for_each_entry(client, &mxb->i2c_adapter.clients, list) { | 221 | device_for_each_child(&mxb->i2c_adapter.dev, mxb, mxb_check_clients); |
199 | if( I2C_ADDR_TEA6420_1 == client->addr ) | ||
200 | mxb->tea6420_1 = client; | ||
201 | if( I2C_ADDR_TEA6420_2 == client->addr ) | ||
202 | mxb->tea6420_2 = client; | ||
203 | if( I2C_TEA6415C_2 == client->addr ) | ||
204 | mxb->tea6415c = client; | ||
205 | if( I2C_ADDR_TDA9840 == client->addr ) | ||
206 | mxb->tda9840 = client; | ||
207 | if( I2C_SAA7111 == client->addr ) | ||
208 | mxb->saa7111a = client; | ||
209 | if( 0x60 == client->addr ) | ||
210 | mxb->tuner = client; | ||
211 | } | ||
212 | 222 | ||
213 | /* check if all devices are present */ | 223 | /* check if all devices are present */ |
214 | if( 0 == mxb->tea6420_1 || 0 == mxb->tea6420_2 || 0 == mxb->tea6415c | 224 | if( 0 == mxb->tea6420_1 || 0 == mxb->tea6420_2 || 0 == mxb->tea6415c |
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c index 9a03dc82c6ca..5bb75294b5aa 100644 --- a/drivers/media/video/vino.c +++ b/drivers/media/video/vino.c | |||
@@ -2589,11 +2589,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs) | |||
2589 | /* First try D1 and then SAA7191 */ | 2589 | /* First try D1 and then SAA7191 */ |
2590 | if (vino_drvdata->camera.driver | 2590 | if (vino_drvdata->camera.driver |
2591 | && (vino_drvdata->camera.owner == VINO_NO_CHANNEL)) { | 2591 | && (vino_drvdata->camera.owner == VINO_NO_CHANNEL)) { |
2592 | if (i2c_use_client(vino_drvdata->camera.driver)) { | 2592 | i2c_use_client(vino_drvdata->camera.driver); |
2593 | ret = -ENODEV; | ||
2594 | goto out; | ||
2595 | } | ||
2596 | |||
2597 | vino_drvdata->camera.owner = vcs->channel; | 2593 | vino_drvdata->camera.owner = vcs->channel; |
2598 | vcs->input = VINO_INPUT_D1; | 2594 | vcs->input = VINO_INPUT_D1; |
2599 | vcs->data_norm = VINO_DATA_NORM_D1; | 2595 | vcs->data_norm = VINO_DATA_NORM_D1; |
@@ -2602,11 +2598,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs) | |||
2602 | int input, data_norm; | 2598 | int input, data_norm; |
2603 | int saa7191_input; | 2599 | int saa7191_input; |
2604 | 2600 | ||
2605 | if (i2c_use_client(vino_drvdata->decoder.driver)) { | 2601 | i2c_use_client(vino_drvdata->decoder.driver); |
2606 | ret = -ENODEV; | ||
2607 | goto out; | ||
2608 | } | ||
2609 | |||
2610 | input = VINO_INPUT_COMPOSITE; | 2602 | input = VINO_INPUT_COMPOSITE; |
2611 | 2603 | ||
2612 | saa7191_input = vino_get_saa7191_input(input); | 2604 | saa7191_input = vino_get_saa7191_input(input); |
@@ -2688,10 +2680,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input) | |||
2688 | } | 2680 | } |
2689 | 2681 | ||
2690 | if (vino_drvdata->decoder.owner == VINO_NO_CHANNEL) { | 2682 | if (vino_drvdata->decoder.owner == VINO_NO_CHANNEL) { |
2691 | if (i2c_use_client(vino_drvdata->decoder.driver)) { | 2683 | i2c_use_client(vino_drvdata->decoder.driver); |
2692 | ret = -ENODEV; | ||
2693 | goto out; | ||
2694 | } | ||
2695 | vino_drvdata->decoder.owner = vcs->channel; | 2684 | vino_drvdata->decoder.owner = vcs->channel; |
2696 | } | 2685 | } |
2697 | 2686 | ||
@@ -2759,10 +2748,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input) | |||
2759 | } | 2748 | } |
2760 | 2749 | ||
2761 | if (vino_drvdata->camera.owner == VINO_NO_CHANNEL) { | 2750 | if (vino_drvdata->camera.owner == VINO_NO_CHANNEL) { |
2762 | if (i2c_use_client(vino_drvdata->camera.driver)) { | 2751 | i2c_use_client(vino_drvdata->camera.driver); |
2763 | ret = -ENODEV; | ||
2764 | goto out; | ||
2765 | } | ||
2766 | vino_drvdata->camera.owner = vcs->channel; | 2752 | vino_drvdata->camera.owner = vcs->channel; |
2767 | } | 2753 | } |
2768 | 2754 | ||
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c index b7c8e7813865..61aeaf79640d 100644 --- a/drivers/mfd/ucb1x00-assabet.c +++ b/drivers/mfd/ucb1x00-assabet.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include "ucb1x00.h" | 20 | #include "ucb1x00.h" |
21 | 21 | ||
22 | #define UCB1X00_ATTR(name,input)\ | 22 | #define UCB1X00_ATTR(name,input)\ |
23 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, | 23 | static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ |
24 | char *buf) \ | 24 | char *buf) \ |
25 | { \ | 25 | { \ |
26 | struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); \ | 26 | struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); \ |
@@ -38,17 +38,17 @@ UCB1X00_ATTR(batt_temp, UCB_ADC_INP_AD2); | |||
38 | 38 | ||
39 | static int ucb1x00_assabet_add(struct ucb1x00_dev *dev) | 39 | static int ucb1x00_assabet_add(struct ucb1x00_dev *dev) |
40 | { | 40 | { |
41 | device_create_file(&dev->ucb->dev, &device_attr_vbatt); | 41 | device_create_file(&dev->ucb->dev, &dev_attr_vbatt); |
42 | device_create_file(&dev->ucb->dev, &device_attr_vcharger); | 42 | device_create_file(&dev->ucb->dev, &dev_attr_vcharger); |
43 | device_create_file(&dev->ucb->dev, &device_attr_batt_temp); | 43 | device_create_file(&dev->ucb->dev, &dev_attr_batt_temp); |
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev) | 47 | static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev) |
48 | { | 48 | { |
49 | device_remove_file(&dev->ucb->cdev, &device_attr_batt_temp); | 49 | device_remove_file(&dev->ucb->dev, &dev_attr_batt_temp); |
50 | device_remove_file(&dev->ucb->cdev, &device_attr_vcharger); | 50 | device_remove_file(&dev->ucb->dev, &dev_attr_vcharger); |
51 | device_remove_file(&dev->ucb->cdev, &device_attr_vbatt); | 51 | device_remove_file(&dev->ucb->dev, &dev_attr_vbatt); |
52 | } | 52 | } |
53 | 53 | ||
54 | static struct ucb1x00_driver ucb1x00_assabet_driver = { | 54 | static struct ucb1x00_driver ucb1x00_assabet_driver = { |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 971e18b91f4a..c9dfeb15b487 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mmc/card.h> | 25 | #include <linux/mmc/card.h> |
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/i2c/tps65010.h> | ||
28 | 29 | ||
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
@@ -35,7 +36,6 @@ | |||
35 | #include <asm/arch/dma.h> | 36 | #include <asm/arch/dma.h> |
36 | #include <asm/arch/mux.h> | 37 | #include <asm/arch/mux.h> |
37 | #include <asm/arch/fpga.h> | 38 | #include <asm/arch/fpga.h> |
38 | #include <asm/arch/tps65010.h> | ||
39 | 39 | ||
40 | #define OMAP_MMC_REG_CMD 0x00 | 40 | #define OMAP_MMC_REG_CMD 0x00 |
41 | #define OMAP_MMC_REG_ARGL 0x04 | 41 | #define OMAP_MMC_REG_ARGL 0x04 |
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index dfef1637bfb8..e0900ca678ec 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #define DRV_VERSION "0.3" | 16 | #define DRV_VERSION "0.3" |
17 | 17 | ||
18 | /* Addresses to scan: none. This chip cannot be detected. */ | 18 | /* Addresses to scan: none. This chip cannot be detected. */ |
19 | static unsigned short normal_i2c[] = { I2C_CLIENT_END }; | 19 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; |
20 | 20 | ||
21 | /* Insmod parameters */ | 21 | /* Insmod parameters */ |
22 | I2C_CLIENT_INSMOD; | 22 | I2C_CLIENT_INSMOD; |
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 1c743641b73b..725b0c73c333 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c | |||
@@ -61,7 +61,7 @@ | |||
61 | /* i2c configuration */ | 61 | /* i2c configuration */ |
62 | #define ISL1208_I2C_ADDR 0xde | 62 | #define ISL1208_I2C_ADDR 0xde |
63 | 63 | ||
64 | static unsigned short normal_i2c[] = { | 64 | static const unsigned short normal_i2c[] = { |
65 | ISL1208_I2C_ADDR>>1, I2C_CLIENT_END | 65 | ISL1208_I2C_ADDR>>1, I2C_CLIENT_END |
66 | }; | 66 | }; |
67 | I2C_CLIENT_INSMOD; /* defines addr_data */ | 67 | I2C_CLIENT_INSMOD; /* defines addr_data */ |
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c index a1cd448639c9..7683412970c4 100644 --- a/drivers/rtc/rtc-max6900.c +++ b/drivers/rtc/rtc-max6900.c | |||
@@ -54,7 +54,7 @@ | |||
54 | 54 | ||
55 | #define MAX6900_I2C_ADDR 0xa0 | 55 | #define MAX6900_I2C_ADDR 0xa0 |
56 | 56 | ||
57 | static unsigned short normal_i2c[] = { | 57 | static const unsigned short normal_i2c[] = { |
58 | MAX6900_I2C_ADDR >> 1, | 58 | MAX6900_I2C_ADDR >> 1, |
59 | I2C_CLIENT_END | 59 | I2C_CLIENT_END |
60 | }; | 60 | }; |
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 0242d803ebe5..b3317fcc16c3 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * located at 0x51 will pass the validation routine due to | 25 | * located at 0x51 will pass the validation routine due to |
26 | * the way the registers are implemented. | 26 | * the way the registers are implemented. |
27 | */ | 27 | */ |
28 | static unsigned short normal_i2c[] = { I2C_CLIENT_END }; | 28 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; |
29 | 29 | ||
30 | /* Module parameters */ | 30 | /* Module parameters */ |
31 | I2C_CLIENT_INSMOD; | 31 | I2C_CLIENT_INSMOD; |
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c index 556d0e7da35b..c973ba94c422 100644 --- a/drivers/rtc/rtc-pcf8583.c +++ b/drivers/rtc/rtc-pcf8583.c | |||
@@ -40,7 +40,7 @@ struct pcf8583 { | |||
40 | #define CTRL_ALARM 0x02 | 40 | #define CTRL_ALARM 0x02 |
41 | #define CTRL_TIMER 0x01 | 41 | #define CTRL_TIMER 0x01 |
42 | 42 | ||
43 | static unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; | 43 | static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; |
44 | 44 | ||
45 | /* Module parameters */ | 45 | /* Module parameters */ |
46 | I2C_CLIENT_INSMOD; | 46 | I2C_CLIENT_INSMOD; |
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c index b3fae357ca49..b90fb1866ce9 100644 --- a/drivers/rtc/rtc-x1205.c +++ b/drivers/rtc/rtc-x1205.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * unknown chips, the user must explicitly set the probe parameter. | 32 | * unknown chips, the user must explicitly set the probe parameter. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | static unsigned short normal_i2c[] = { I2C_CLIENT_END }; | 35 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; |
36 | 36 | ||
37 | /* Insmod parameters */ | 37 | /* Insmod parameters */ |
38 | I2C_CLIENT_INSMOD; | 38 | I2C_CLIENT_INSMOD; |
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index be9f22d52fd8..0a89e080b389 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # S/390 block devices | 2 | # S/390 block devices |
3 | # | 3 | # |
4 | 4 | ||
5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o | 5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o |
6 | dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o | 6 | dasd_fba_mod-objs := dasd_fba.o |
7 | dasd_diag_mod-objs := dasd_diag.o | 7 | dasd_diag_mod-objs := dasd_diag.o |
8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ | 8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ |
9 | dasd_genhd.o dasd_erp.o | 9 | dasd_genhd.o dasd_erp.o |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e6bfce690ca3..1db15f3e5d20 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL"); | |||
48 | /* | 48 | /* |
49 | * SECTION: prototypes for static functions of dasd.c | 49 | * SECTION: prototypes for static functions of dasd.c |
50 | */ | 50 | */ |
51 | static int dasd_alloc_queue(struct dasd_device * device); | 51 | static int dasd_alloc_queue(struct dasd_block *); |
52 | static void dasd_setup_queue(struct dasd_device * device); | 52 | static void dasd_setup_queue(struct dasd_block *); |
53 | static void dasd_free_queue(struct dasd_device * device); | 53 | static void dasd_free_queue(struct dasd_block *); |
54 | static void dasd_flush_request_queue(struct dasd_device *); | 54 | static void dasd_flush_request_queue(struct dasd_block *); |
55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_block_queue(struct dasd_block *); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_device_tasklet(struct dasd_device *); |
57 | static void dasd_block_tasklet(struct dasd_block *); | ||
57 | static void do_kick_device(struct work_struct *); | 58 | static void do_kick_device(struct work_struct *); |
59 | static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); | ||
58 | 60 | ||
59 | /* | 61 | /* |
60 | * SECTION: Operations on the device structure. | 62 | * SECTION: Operations on the device structure. |
@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq; | |||
65 | /* | 67 | /* |
66 | * Allocate memory for a new device structure. | 68 | * Allocate memory for a new device structure. |
67 | */ | 69 | */ |
68 | struct dasd_device * | 70 | struct dasd_device *dasd_alloc_device(void) |
69 | dasd_alloc_device(void) | ||
70 | { | 71 | { |
71 | struct dasd_device *device; | 72 | struct dasd_device *device; |
72 | 73 | ||
73 | device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); | 74 | device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); |
74 | if (device == NULL) | 75 | if (!device) |
75 | return ERR_PTR(-ENOMEM); | 76 | return ERR_PTR(-ENOMEM); |
76 | /* open_count = 0 means device online but not in use */ | ||
77 | atomic_set(&device->open_count, -1); | ||
78 | 77 | ||
79 | /* Get two pages for normal block device operations. */ | 78 | /* Get two pages for normal block device operations. */ |
80 | device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); | 79 | device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); |
81 | if (device->ccw_mem == NULL) { | 80 | if (!device->ccw_mem) { |
82 | kfree(device); | 81 | kfree(device); |
83 | return ERR_PTR(-ENOMEM); | 82 | return ERR_PTR(-ENOMEM); |
84 | } | 83 | } |
85 | /* Get one page for error recovery. */ | 84 | /* Get one page for error recovery. */ |
86 | device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); | 85 | device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); |
87 | if (device->erp_mem == NULL) { | 86 | if (!device->erp_mem) { |
88 | free_pages((unsigned long) device->ccw_mem, 1); | 87 | free_pages((unsigned long) device->ccw_mem, 1); |
89 | kfree(device); | 88 | kfree(device); |
90 | return ERR_PTR(-ENOMEM); | 89 | return ERR_PTR(-ENOMEM); |
@@ -93,10 +92,9 @@ dasd_alloc_device(void) | |||
93 | dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); | 92 | dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); |
94 | dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); | 93 | dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); |
95 | spin_lock_init(&device->mem_lock); | 94 | spin_lock_init(&device->mem_lock); |
96 | spin_lock_init(&device->request_queue_lock); | 95 | atomic_set(&device->tasklet_scheduled, 0); |
97 | atomic_set (&device->tasklet_scheduled, 0); | ||
98 | tasklet_init(&device->tasklet, | 96 | tasklet_init(&device->tasklet, |
99 | (void (*)(unsigned long)) dasd_tasklet, | 97 | (void (*)(unsigned long)) dasd_device_tasklet, |
100 | (unsigned long) device); | 98 | (unsigned long) device); |
101 | INIT_LIST_HEAD(&device->ccw_queue); | 99 | INIT_LIST_HEAD(&device->ccw_queue); |
102 | init_timer(&device->timer); | 100 | init_timer(&device->timer); |
@@ -110,8 +108,7 @@ dasd_alloc_device(void) | |||
110 | /* | 108 | /* |
111 | * Free memory of a device structure. | 109 | * Free memory of a device structure. |
112 | */ | 110 | */ |
113 | void | 111 | void dasd_free_device(struct dasd_device *device) |
114 | dasd_free_device(struct dasd_device *device) | ||
115 | { | 112 | { |
116 | kfree(device->private); | 113 | kfree(device->private); |
117 | free_page((unsigned long) device->erp_mem); | 114 | free_page((unsigned long) device->erp_mem); |
@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device) | |||
120 | } | 117 | } |
121 | 118 | ||
122 | /* | 119 | /* |
120 | * Allocate memory for a new device structure. | ||
121 | */ | ||
122 | struct dasd_block *dasd_alloc_block(void) | ||
123 | { | ||
124 | struct dasd_block *block; | ||
125 | |||
126 | block = kzalloc(sizeof(*block), GFP_ATOMIC); | ||
127 | if (!block) | ||
128 | return ERR_PTR(-ENOMEM); | ||
129 | /* open_count = 0 means device online but not in use */ | ||
130 | atomic_set(&block->open_count, -1); | ||
131 | |||
132 | spin_lock_init(&block->request_queue_lock); | ||
133 | atomic_set(&block->tasklet_scheduled, 0); | ||
134 | tasklet_init(&block->tasklet, | ||
135 | (void (*)(unsigned long)) dasd_block_tasklet, | ||
136 | (unsigned long) block); | ||
137 | INIT_LIST_HEAD(&block->ccw_queue); | ||
138 | spin_lock_init(&block->queue_lock); | ||
139 | init_timer(&block->timer); | ||
140 | |||
141 | return block; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Free memory of a device structure. | ||
146 | */ | ||
147 | void dasd_free_block(struct dasd_block *block) | ||
148 | { | ||
149 | kfree(block); | ||
150 | } | ||
151 | |||
152 | /* | ||
123 | * Make a new device known to the system. | 153 | * Make a new device known to the system. |
124 | */ | 154 | */ |
125 | static int | 155 | static int dasd_state_new_to_known(struct dasd_device *device) |
126 | dasd_state_new_to_known(struct dasd_device *device) | ||
127 | { | 156 | { |
128 | int rc; | 157 | int rc; |
129 | 158 | ||
@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
133 | */ | 162 | */ |
134 | dasd_get_device(device); | 163 | dasd_get_device(device); |
135 | 164 | ||
136 | rc = dasd_alloc_queue(device); | 165 | if (device->block) { |
137 | if (rc) { | 166 | rc = dasd_alloc_queue(device->block); |
138 | dasd_put_device(device); | 167 | if (rc) { |
139 | return rc; | 168 | dasd_put_device(device); |
169 | return rc; | ||
170 | } | ||
140 | } | 171 | } |
141 | |||
142 | device->state = DASD_STATE_KNOWN; | 172 | device->state = DASD_STATE_KNOWN; |
143 | return 0; | 173 | return 0; |
144 | } | 174 | } |
@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
146 | /* | 176 | /* |
147 | * Let the system forget about a device. | 177 | * Let the system forget about a device. |
148 | */ | 178 | */ |
149 | static int | 179 | static int dasd_state_known_to_new(struct dasd_device *device) |
150 | dasd_state_known_to_new(struct dasd_device * device) | ||
151 | { | 180 | { |
152 | /* Disable extended error reporting for this device. */ | 181 | /* Disable extended error reporting for this device. */ |
153 | dasd_eer_disable(device); | 182 | dasd_eer_disable(device); |
154 | /* Forget the discipline information. */ | 183 | /* Forget the discipline information. */ |
155 | if (device->discipline) | 184 | if (device->discipline) { |
185 | if (device->discipline->uncheck_device) | ||
186 | device->discipline->uncheck_device(device); | ||
156 | module_put(device->discipline->owner); | 187 | module_put(device->discipline->owner); |
188 | } | ||
157 | device->discipline = NULL; | 189 | device->discipline = NULL; |
158 | if (device->base_discipline) | 190 | if (device->base_discipline) |
159 | module_put(device->base_discipline->owner); | 191 | module_put(device->base_discipline->owner); |
160 | device->base_discipline = NULL; | 192 | device->base_discipline = NULL; |
161 | device->state = DASD_STATE_NEW; | 193 | device->state = DASD_STATE_NEW; |
162 | 194 | ||
163 | dasd_free_queue(device); | 195 | if (device->block) |
196 | dasd_free_queue(device->block); | ||
164 | 197 | ||
165 | /* Give up reference we took in dasd_state_new_to_known. */ | 198 | /* Give up reference we took in dasd_state_new_to_known. */ |
166 | dasd_put_device(device); | 199 | dasd_put_device(device); |
@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device) | |||
170 | /* | 203 | /* |
171 | * Request the irq line for the device. | 204 | * Request the irq line for the device. |
172 | */ | 205 | */ |
173 | static int | 206 | static int dasd_state_known_to_basic(struct dasd_device *device) |
174 | dasd_state_known_to_basic(struct dasd_device * device) | ||
175 | { | 207 | { |
176 | int rc; | 208 | int rc; |
177 | 209 | ||
178 | /* Allocate and register gendisk structure. */ | 210 | /* Allocate and register gendisk structure. */ |
179 | rc = dasd_gendisk_alloc(device); | 211 | if (device->block) { |
180 | if (rc) | 212 | rc = dasd_gendisk_alloc(device->block); |
181 | return rc; | 213 | if (rc) |
182 | 214 | return rc; | |
215 | } | ||
183 | /* register 'device' debug area, used for all DBF_DEV_XXX calls */ | 216 | /* register 'device' debug area, used for all DBF_DEV_XXX calls */ |
184 | device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, | 217 | device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1, |
185 | 8 * sizeof (long)); | 218 | 8 * sizeof(long)); |
186 | debug_register_view(device->debug_area, &debug_sprintf_view); | 219 | debug_register_view(device->debug_area, &debug_sprintf_view); |
187 | debug_set_level(device->debug_area, DBF_WARNING); | 220 | debug_set_level(device->debug_area, DBF_WARNING); |
188 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); | 221 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); |
@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device) | |||
194 | /* | 227 | /* |
195 | * Release the irq line for the device. Terminate any running i/o. | 228 | * Release the irq line for the device. Terminate any running i/o. |
196 | */ | 229 | */ |
197 | static int | 230 | static int dasd_state_basic_to_known(struct dasd_device *device) |
198 | dasd_state_basic_to_known(struct dasd_device * device) | ||
199 | { | 231 | { |
200 | int rc; | 232 | int rc; |
201 | 233 | if (device->block) { | |
202 | dasd_gendisk_free(device); | 234 | dasd_gendisk_free(device->block); |
203 | rc = dasd_flush_ccw_queue(device, 1); | 235 | dasd_block_clear_timer(device->block); |
236 | } | ||
237 | rc = dasd_flush_device_queue(device); | ||
204 | if (rc) | 238 | if (rc) |
205 | return rc; | 239 | return rc; |
206 | dasd_clear_timer(device); | 240 | dasd_device_clear_timer(device); |
207 | 241 | ||
208 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); | 242 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); |
209 | if (device->debug_area != NULL) { | 243 | if (device->debug_area != NULL) { |
@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device) | |||
228 | * In case the analysis returns an error, the device setup is stopped | 262 | * In case the analysis returns an error, the device setup is stopped |
229 | * (a fake disk was already added to allow formatting). | 263 | * (a fake disk was already added to allow formatting). |
230 | */ | 264 | */ |
231 | static int | 265 | static int dasd_state_basic_to_ready(struct dasd_device *device) |
232 | dasd_state_basic_to_ready(struct dasd_device * device) | ||
233 | { | 266 | { |
234 | int rc; | 267 | int rc; |
268 | struct dasd_block *block; | ||
235 | 269 | ||
236 | rc = 0; | 270 | rc = 0; |
237 | if (device->discipline->do_analysis != NULL) | 271 | block = device->block; |
238 | rc = device->discipline->do_analysis(device); | ||
239 | if (rc) { | ||
240 | if (rc != -EAGAIN) | ||
241 | device->state = DASD_STATE_UNFMT; | ||
242 | return rc; | ||
243 | } | ||
244 | /* make disk known with correct capacity */ | 272 | /* make disk known with correct capacity */ |
245 | dasd_setup_queue(device); | 273 | if (block) { |
246 | set_capacity(device->gdp, device->blocks << device->s2b_shift); | 274 | if (block->base->discipline->do_analysis != NULL) |
247 | device->state = DASD_STATE_READY; | 275 | rc = block->base->discipline->do_analysis(block); |
248 | rc = dasd_scan_partitions(device); | 276 | if (rc) { |
249 | if (rc) | 277 | if (rc != -EAGAIN) |
250 | device->state = DASD_STATE_BASIC; | 278 | device->state = DASD_STATE_UNFMT; |
279 | return rc; | ||
280 | } | ||
281 | dasd_setup_queue(block); | ||
282 | set_capacity(block->gdp, | ||
283 | block->blocks << block->s2b_shift); | ||
284 | device->state = DASD_STATE_READY; | ||
285 | rc = dasd_scan_partitions(block); | ||
286 | if (rc) | ||
287 | device->state = DASD_STATE_BASIC; | ||
288 | } else { | ||
289 | device->state = DASD_STATE_READY; | ||
290 | } | ||
251 | return rc; | 291 | return rc; |
252 | } | 292 | } |
253 | 293 | ||
@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device) | |||
256 | * Forget format information. Check if the target level is basic | 296 | * Forget format information. Check if the target level is basic |
257 | * and if it is create fake disk for formatting. | 297 | * and if it is create fake disk for formatting. |
258 | */ | 298 | */ |
259 | static int | 299 | static int dasd_state_ready_to_basic(struct dasd_device *device) |
260 | dasd_state_ready_to_basic(struct dasd_device * device) | ||
261 | { | 300 | { |
262 | int rc; | 301 | int rc; |
263 | 302 | ||
264 | rc = dasd_flush_ccw_queue(device, 0); | ||
265 | if (rc) | ||
266 | return rc; | ||
267 | dasd_destroy_partitions(device); | ||
268 | dasd_flush_request_queue(device); | ||
269 | device->blocks = 0; | ||
270 | device->bp_block = 0; | ||
271 | device->s2b_shift = 0; | ||
272 | device->state = DASD_STATE_BASIC; | 303 | device->state = DASD_STATE_BASIC; |
304 | if (device->block) { | ||
305 | struct dasd_block *block = device->block; | ||
306 | rc = dasd_flush_block_queue(block); | ||
307 | if (rc) { | ||
308 | device->state = DASD_STATE_READY; | ||
309 | return rc; | ||
310 | } | ||
311 | dasd_destroy_partitions(block); | ||
312 | dasd_flush_request_queue(block); | ||
313 | block->blocks = 0; | ||
314 | block->bp_block = 0; | ||
315 | block->s2b_shift = 0; | ||
316 | } | ||
273 | return 0; | 317 | return 0; |
274 | } | 318 | } |
275 | 319 | ||
276 | /* | 320 | /* |
277 | * Back to basic. | 321 | * Back to basic. |
278 | */ | 322 | */ |
279 | static int | 323 | static int dasd_state_unfmt_to_basic(struct dasd_device *device) |
280 | dasd_state_unfmt_to_basic(struct dasd_device * device) | ||
281 | { | 324 | { |
282 | device->state = DASD_STATE_BASIC; | 325 | device->state = DASD_STATE_BASIC; |
283 | return 0; | 326 | return 0; |
@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device) | |||
291 | static int | 334 | static int |
292 | dasd_state_ready_to_online(struct dasd_device * device) | 335 | dasd_state_ready_to_online(struct dasd_device * device) |
293 | { | 336 | { |
337 | int rc; | ||
338 | |||
339 | if (device->discipline->ready_to_online) { | ||
340 | rc = device->discipline->ready_to_online(device); | ||
341 | if (rc) | ||
342 | return rc; | ||
343 | } | ||
294 | device->state = DASD_STATE_ONLINE; | 344 | device->state = DASD_STATE_ONLINE; |
295 | dasd_schedule_bh(device); | 345 | if (device->block) |
346 | dasd_schedule_block_bh(device->block); | ||
296 | return 0; | 347 | return 0; |
297 | } | 348 | } |
298 | 349 | ||
299 | /* | 350 | /* |
300 | * Stop the requeueing of requests again. | 351 | * Stop the requeueing of requests again. |
301 | */ | 352 | */ |
302 | static int | 353 | static int dasd_state_online_to_ready(struct dasd_device *device) |
303 | dasd_state_online_to_ready(struct dasd_device * device) | ||
304 | { | 354 | { |
355 | int rc; | ||
356 | |||
357 | if (device->discipline->online_to_ready) { | ||
358 | rc = device->discipline->online_to_ready(device); | ||
359 | if (rc) | ||
360 | return rc; | ||
361 | } | ||
305 | device->state = DASD_STATE_READY; | 362 | device->state = DASD_STATE_READY; |
306 | return 0; | 363 | return 0; |
307 | } | 364 | } |
@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device) | |||
309 | /* | 366 | /* |
310 | * Device startup state changes. | 367 | * Device startup state changes. |
311 | */ | 368 | */ |
312 | static int | 369 | static int dasd_increase_state(struct dasd_device *device) |
313 | dasd_increase_state(struct dasd_device *device) | ||
314 | { | 370 | { |
315 | int rc; | 371 | int rc; |
316 | 372 | ||
@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device) | |||
345 | /* | 401 | /* |
346 | * Device shutdown state changes. | 402 | * Device shutdown state changes. |
347 | */ | 403 | */ |
348 | static int | 404 | static int dasd_decrease_state(struct dasd_device *device) |
349 | dasd_decrease_state(struct dasd_device *device) | ||
350 | { | 405 | { |
351 | int rc; | 406 | int rc; |
352 | 407 | ||
@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device) | |||
381 | /* | 436 | /* |
382 | * This is the main startup/shutdown routine. | 437 | * This is the main startup/shutdown routine. |
383 | */ | 438 | */ |
384 | static void | 439 | static void dasd_change_state(struct dasd_device *device) |
385 | dasd_change_state(struct dasd_device *device) | ||
386 | { | 440 | { |
387 | int rc; | 441 | int rc; |
388 | 442 | ||
@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device) | |||
409 | * dasd_kick_device will schedule a call do do_kick_device to the kernel | 463 | * dasd_kick_device will schedule a call do do_kick_device to the kernel |
410 | * event daemon. | 464 | * event daemon. |
411 | */ | 465 | */ |
412 | static void | 466 | static void do_kick_device(struct work_struct *work) |
413 | do_kick_device(struct work_struct *work) | ||
414 | { | 467 | { |
415 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); | 468 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); |
416 | dasd_change_state(device); | 469 | dasd_change_state(device); |
417 | dasd_schedule_bh(device); | 470 | dasd_schedule_device_bh(device); |
418 | dasd_put_device(device); | 471 | dasd_put_device(device); |
419 | } | 472 | } |
420 | 473 | ||
421 | void | 474 | void dasd_kick_device(struct dasd_device *device) |
422 | dasd_kick_device(struct dasd_device *device) | ||
423 | { | 475 | { |
424 | dasd_get_device(device); | 476 | dasd_get_device(device); |
425 | /* queue call to dasd_kick_device to the kernel event daemon. */ | 477 | /* queue call to dasd_kick_device to the kernel event daemon. */ |
@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device) | |||
429 | /* | 481 | /* |
430 | * Set the target state for a device and starts the state change. | 482 | * Set the target state for a device and starts the state change. |
431 | */ | 483 | */ |
432 | void | 484 | void dasd_set_target_state(struct dasd_device *device, int target) |
433 | dasd_set_target_state(struct dasd_device *device, int target) | ||
434 | { | 485 | { |
435 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ | 486 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ |
436 | if (dasd_probeonly && target > DASD_STATE_READY) | 487 | if (dasd_probeonly && target > DASD_STATE_READY) |
@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target) | |||
447 | /* | 498 | /* |
448 | * Enable devices with device numbers in [from..to]. | 499 | * Enable devices with device numbers in [from..to]. |
449 | */ | 500 | */ |
450 | static inline int | 501 | static inline int _wait_for_device(struct dasd_device *device) |
451 | _wait_for_device(struct dasd_device *device) | ||
452 | { | 502 | { |
453 | return (device->state == device->target); | 503 | return (device->state == device->target); |
454 | } | 504 | } |
455 | 505 | ||
456 | void | 506 | void dasd_enable_device(struct dasd_device *device) |
457 | dasd_enable_device(struct dasd_device *device) | ||
458 | { | 507 | { |
459 | dasd_set_target_state(device, DASD_STATE_ONLINE); | 508 | dasd_set_target_state(device, DASD_STATE_ONLINE); |
460 | if (device->state <= DASD_STATE_KNOWN) | 509 | if (device->state <= DASD_STATE_KNOWN) |
@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; | |||
475 | /* | 524 | /* |
476 | * Increments counter in global and local profiling structures. | 525 | * Increments counter in global and local profiling structures. |
477 | */ | 526 | */ |
478 | #define dasd_profile_counter(value, counter, device) \ | 527 | #define dasd_profile_counter(value, counter, block) \ |
479 | { \ | 528 | { \ |
480 | int index; \ | 529 | int index; \ |
481 | for (index = 0; index < 31 && value >> (2+index); index++); \ | 530 | for (index = 0; index < 31 && value >> (2+index); index++); \ |
482 | dasd_global_profile.counter[index]++; \ | 531 | dasd_global_profile.counter[index]++; \ |
483 | device->profile.counter[index]++; \ | 532 | block->profile.counter[index]++; \ |
484 | } | 533 | } |
485 | 534 | ||
486 | /* | 535 | /* |
487 | * Add profiling information for cqr before execution. | 536 | * Add profiling information for cqr before execution. |
488 | */ | 537 | */ |
489 | static void | 538 | static void dasd_profile_start(struct dasd_block *block, |
490 | dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | 539 | struct dasd_ccw_req *cqr, |
491 | struct request *req) | 540 | struct request *req) |
492 | { | 541 | { |
493 | struct list_head *l; | 542 | struct list_head *l; |
494 | unsigned int counter; | 543 | unsigned int counter; |
@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | |||
498 | 547 | ||
499 | /* count the length of the chanq for statistics */ | 548 | /* count the length of the chanq for statistics */ |
500 | counter = 0; | 549 | counter = 0; |
501 | list_for_each(l, &device->ccw_queue) | 550 | list_for_each(l, &block->ccw_queue) |
502 | if (++counter >= 31) | 551 | if (++counter >= 31) |
503 | break; | 552 | break; |
504 | dasd_global_profile.dasd_io_nr_req[counter]++; | 553 | dasd_global_profile.dasd_io_nr_req[counter]++; |
505 | device->profile.dasd_io_nr_req[counter]++; | 554 | block->profile.dasd_io_nr_req[counter]++; |
506 | } | 555 | } |
507 | 556 | ||
508 | /* | 557 | /* |
509 | * Add profiling information for cqr after execution. | 558 | * Add profiling information for cqr after execution. |
510 | */ | 559 | */ |
511 | static void | 560 | static void dasd_profile_end(struct dasd_block *block, |
512 | dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, | 561 | struct dasd_ccw_req *cqr, |
513 | struct request *req) | 562 | struct request *req) |
514 | { | 563 | { |
515 | long strtime, irqtime, endtime, tottime; /* in microseconds */ | 564 | long strtime, irqtime, endtime, tottime; /* in microseconds */ |
516 | long tottimeps, sectors; | 565 | long tottimeps, sectors; |
@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, | |||
532 | 581 | ||
533 | if (!dasd_global_profile.dasd_io_reqs) | 582 | if (!dasd_global_profile.dasd_io_reqs) |
534 | memset(&dasd_global_profile, 0, | 583 | memset(&dasd_global_profile, 0, |
535 | sizeof (struct dasd_profile_info_t)); | 584 | sizeof(struct dasd_profile_info_t)); |
536 | dasd_global_profile.dasd_io_reqs++; | 585 | dasd_global_profile.dasd_io_reqs++; |
537 | dasd_global_profile.dasd_io_sects += sectors; | 586 | dasd_global_profile.dasd_io_sects += sectors; |
538 | 587 | ||
539 | if (!device->profile.dasd_io_reqs) | 588 | if (!block->profile.dasd_io_reqs) |
540 | memset(&device->profile, 0, | 589 | memset(&block->profile, 0, |
541 | sizeof (struct dasd_profile_info_t)); | 590 | sizeof(struct dasd_profile_info_t)); |
542 | device->profile.dasd_io_reqs++; | 591 | block->profile.dasd_io_reqs++; |
543 | device->profile.dasd_io_sects += sectors; | 592 | block->profile.dasd_io_sects += sectors; |
544 | 593 | ||
545 | dasd_profile_counter(sectors, dasd_io_secs, device); | 594 | dasd_profile_counter(sectors, dasd_io_secs, block); |
546 | dasd_profile_counter(tottime, dasd_io_times, device); | 595 | dasd_profile_counter(tottime, dasd_io_times, block); |
547 | dasd_profile_counter(tottimeps, dasd_io_timps, device); | 596 | dasd_profile_counter(tottimeps, dasd_io_timps, block); |
548 | dasd_profile_counter(strtime, dasd_io_time1, device); | 597 | dasd_profile_counter(strtime, dasd_io_time1, block); |
549 | dasd_profile_counter(irqtime, dasd_io_time2, device); | 598 | dasd_profile_counter(irqtime, dasd_io_time2, block); |
550 | dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); | 599 | dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); |
551 | dasd_profile_counter(endtime, dasd_io_time3, device); | 600 | dasd_profile_counter(endtime, dasd_io_time3, block); |
552 | } | 601 | } |
553 | #else | 602 | #else |
554 | #define dasd_profile_start(device, cqr, req) do {} while (0) | 603 | #define dasd_profile_start(block, cqr, req) do {} while (0) |
555 | #define dasd_profile_end(device, cqr, req) do {} while (0) | 604 | #define dasd_profile_end(block, cqr, req) do {} while (0) |
556 | #endif /* CONFIG_DASD_PROFILE */ | 605 | #endif /* CONFIG_DASD_PROFILE */ |
557 | 606 | ||
558 | /* | 607 | /* |
@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, | |||
562 | * memory and 2) dasd_smalloc_request uses the static ccw memory | 611 | * memory and 2) dasd_smalloc_request uses the static ccw memory |
563 | * that gets allocated for each device. | 612 | * that gets allocated for each device. |
564 | */ | 613 | */ |
565 | struct dasd_ccw_req * | 614 | struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, |
566 | dasd_kmalloc_request(char *magic, int cplength, int datasize, | 615 | int datasize, |
567 | struct dasd_device * device) | 616 | struct dasd_device *device) |
568 | { | 617 | { |
569 | struct dasd_ccw_req *cqr; | 618 | struct dasd_ccw_req *cqr; |
570 | 619 | ||
@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize, | |||
600 | return cqr; | 649 | return cqr; |
601 | } | 650 | } |
602 | 651 | ||
603 | struct dasd_ccw_req * | 652 | struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, |
604 | dasd_smalloc_request(char *magic, int cplength, int datasize, | 653 | int datasize, |
605 | struct dasd_device * device) | 654 | struct dasd_device *device) |
606 | { | 655 | { |
607 | unsigned long flags; | 656 | unsigned long flags; |
608 | struct dasd_ccw_req *cqr; | 657 | struct dasd_ccw_req *cqr; |
@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize, | |||
649 | * idal lists that might have been created by dasd_set_cda and the | 698 | * idal lists that might have been created by dasd_set_cda and the |
650 | * struct dasd_ccw_req itself. | 699 | * struct dasd_ccw_req itself. |
651 | */ | 700 | */ |
652 | void | 701 | void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
653 | dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | ||
654 | { | 702 | { |
655 | #ifdef CONFIG_64BIT | 703 | #ifdef CONFIG_64BIT |
656 | struct ccw1 *ccw; | 704 | struct ccw1 *ccw; |
@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | |||
667 | dasd_put_device(device); | 715 | dasd_put_device(device); |
668 | } | 716 | } |
669 | 717 | ||
670 | void | 718 | void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) |
671 | dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | ||
672 | { | 719 | { |
673 | unsigned long flags; | 720 | unsigned long flags; |
674 | 721 | ||
@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | |||
681 | /* | 728 | /* |
682 | * Check discipline magic in cqr. | 729 | * Check discipline magic in cqr. |
683 | */ | 730 | */ |
684 | static inline int | 731 | static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) |
685 | dasd_check_cqr(struct dasd_ccw_req *cqr) | ||
686 | { | 732 | { |
687 | struct dasd_device *device; | 733 | struct dasd_device *device; |
688 | 734 | ||
689 | if (cqr == NULL) | 735 | if (cqr == NULL) |
690 | return -EINVAL; | 736 | return -EINVAL; |
691 | device = cqr->device; | 737 | device = cqr->startdev; |
692 | if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { | 738 | if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { |
693 | DEV_MESSAGE(KERN_WARNING, device, | 739 | DEV_MESSAGE(KERN_WARNING, device, |
694 | " dasd_ccw_req 0x%08x magic doesn't match" | 740 | " dasd_ccw_req 0x%08x magic doesn't match" |
@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr) | |||
706 | * ccw_device_clear can fail if the i/o subsystem | 752 | * ccw_device_clear can fail if the i/o subsystem |
707 | * is in a bad mood. | 753 | * is in a bad mood. |
708 | */ | 754 | */ |
709 | int | 755 | int dasd_term_IO(struct dasd_ccw_req *cqr) |
710 | dasd_term_IO(struct dasd_ccw_req * cqr) | ||
711 | { | 756 | { |
712 | struct dasd_device *device; | 757 | struct dasd_device *device; |
713 | int retries, rc; | 758 | int retries, rc; |
@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr) | |||
717 | if (rc) | 762 | if (rc) |
718 | return rc; | 763 | return rc; |
719 | retries = 0; | 764 | retries = 0; |
720 | device = (struct dasd_device *) cqr->device; | 765 | device = (struct dasd_device *) cqr->startdev; |
721 | while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { | 766 | while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { |
722 | rc = ccw_device_clear(device->cdev, (long) cqr); | 767 | rc = ccw_device_clear(device->cdev, (long) cqr); |
723 | switch (rc) { | 768 | switch (rc) { |
724 | case 0: /* termination successful */ | 769 | case 0: /* termination successful */ |
725 | cqr->retries--; | 770 | cqr->retries--; |
726 | cqr->status = DASD_CQR_CLEAR; | 771 | cqr->status = DASD_CQR_CLEAR_PENDING; |
727 | cqr->stopclk = get_clock(); | 772 | cqr->stopclk = get_clock(); |
728 | cqr->starttime = 0; | 773 | cqr->starttime = 0; |
729 | DBF_DEV_EVENT(DBF_DEBUG, device, | 774 | DBF_DEV_EVENT(DBF_DEBUG, device, |
@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr) | |||
753 | } | 798 | } |
754 | retries++; | 799 | retries++; |
755 | } | 800 | } |
756 | dasd_schedule_bh(device); | 801 | dasd_schedule_device_bh(device); |
757 | return rc; | 802 | return rc; |
758 | } | 803 | } |
759 | 804 | ||
@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr) | |||
761 | * Start the i/o. This start_IO can fail if the channel is really busy. | 806 | * Start the i/o. This start_IO can fail if the channel is really busy. |
762 | * In that case set up a timer to start the request later. | 807 | * In that case set up a timer to start the request later. |
763 | */ | 808 | */ |
764 | int | 809 | int dasd_start_IO(struct dasd_ccw_req *cqr) |
765 | dasd_start_IO(struct dasd_ccw_req * cqr) | ||
766 | { | 810 | { |
767 | struct dasd_device *device; | 811 | struct dasd_device *device; |
768 | int rc; | 812 | int rc; |
@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr) | |||
771 | rc = dasd_check_cqr(cqr); | 815 | rc = dasd_check_cqr(cqr); |
772 | if (rc) | 816 | if (rc) |
773 | return rc; | 817 | return rc; |
774 | device = (struct dasd_device *) cqr->device; | 818 | device = (struct dasd_device *) cqr->startdev; |
775 | if (cqr->retries < 0) { | 819 | if (cqr->retries < 0) { |
776 | DEV_MESSAGE(KERN_DEBUG, device, | 820 | DEV_MESSAGE(KERN_DEBUG, device, |
777 | "start_IO: request %p (%02x/%i) - no retry left.", | 821 | "start_IO: request %p (%02x/%i) - no retry left.", |
778 | cqr, cqr->status, cqr->retries); | 822 | cqr, cqr->status, cqr->retries); |
779 | cqr->status = DASD_CQR_FAILED; | 823 | cqr->status = DASD_CQR_ERROR; |
780 | return -EIO; | 824 | return -EIO; |
781 | } | 825 | } |
782 | cqr->startclk = get_clock(); | 826 | cqr->startclk = get_clock(); |
@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr) | |||
833 | * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), | 877 | * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), |
834 | * DASD_CQR_QUEUED for 2) and 3). | 878 | * DASD_CQR_QUEUED for 2) and 3). |
835 | */ | 879 | */ |
836 | static void | 880 | static void dasd_device_timeout(unsigned long ptr) |
837 | dasd_timeout_device(unsigned long ptr) | ||
838 | { | 881 | { |
839 | unsigned long flags; | 882 | unsigned long flags; |
840 | struct dasd_device *device; | 883 | struct dasd_device *device; |
@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr) | |||
844 | /* re-activate request queue */ | 887 | /* re-activate request queue */ |
845 | device->stopped &= ~DASD_STOPPED_PENDING; | 888 | device->stopped &= ~DASD_STOPPED_PENDING; |
846 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 889 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
847 | dasd_schedule_bh(device); | 890 | dasd_schedule_device_bh(device); |
848 | } | 891 | } |
849 | 892 | ||
850 | /* | 893 | /* |
851 | * Setup timeout for a device in jiffies. | 894 | * Setup timeout for a device in jiffies. |
852 | */ | 895 | */ |
853 | void | 896 | void dasd_device_set_timer(struct dasd_device *device, int expires) |
854 | dasd_set_timer(struct dasd_device *device, int expires) | ||
855 | { | 897 | { |
856 | if (expires == 0) { | 898 | if (expires == 0) { |
857 | if (timer_pending(&device->timer)) | 899 | if (timer_pending(&device->timer)) |
@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires) | |||
862 | if (mod_timer(&device->timer, jiffies + expires)) | 904 | if (mod_timer(&device->timer, jiffies + expires)) |
863 | return; | 905 | return; |
864 | } | 906 | } |
865 | device->timer.function = dasd_timeout_device; | 907 | device->timer.function = dasd_device_timeout; |
866 | device->timer.data = (unsigned long) device; | 908 | device->timer.data = (unsigned long) device; |
867 | device->timer.expires = jiffies + expires; | 909 | device->timer.expires = jiffies + expires; |
868 | add_timer(&device->timer); | 910 | add_timer(&device->timer); |
@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires) | |||
871 | /* | 913 | /* |
872 | * Clear timeout for a device. | 914 | * Clear timeout for a device. |
873 | */ | 915 | */ |
874 | void | 916 | void dasd_device_clear_timer(struct dasd_device *device) |
875 | dasd_clear_timer(struct dasd_device *device) | ||
876 | { | 917 | { |
877 | if (timer_pending(&device->timer)) | 918 | if (timer_pending(&device->timer)) |
878 | del_timer(&device->timer); | 919 | del_timer(&device->timer); |
879 | } | 920 | } |
880 | 921 | ||
881 | static void | 922 | static void dasd_handle_killed_request(struct ccw_device *cdev, |
882 | dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) | 923 | unsigned long intparm) |
883 | { | 924 | { |
884 | struct dasd_ccw_req *cqr; | 925 | struct dasd_ccw_req *cqr; |
885 | struct dasd_device *device; | 926 | struct dasd_device *device; |
@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) | |||
893 | return; | 934 | return; |
894 | } | 935 | } |
895 | 936 | ||
896 | device = (struct dasd_device *) cqr->device; | 937 | device = (struct dasd_device *) cqr->startdev; |
897 | if (device == NULL || | 938 | if (device == NULL || |
898 | device != dasd_device_from_cdev_locked(cdev) || | 939 | device != dasd_device_from_cdev_locked(cdev) || |
899 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 940 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) | |||
905 | /* Schedule request to be retried. */ | 946 | /* Schedule request to be retried. */ |
906 | cqr->status = DASD_CQR_QUEUED; | 947 | cqr->status = DASD_CQR_QUEUED; |
907 | 948 | ||
908 | dasd_clear_timer(device); | 949 | dasd_device_clear_timer(device); |
909 | dasd_schedule_bh(device); | 950 | dasd_schedule_device_bh(device); |
910 | dasd_put_device(device); | 951 | dasd_put_device(device); |
911 | } | 952 | } |
912 | 953 | ||
913 | static void | 954 | void dasd_generic_handle_state_change(struct dasd_device *device) |
914 | dasd_handle_state_change_pending(struct dasd_device *device) | ||
915 | { | 955 | { |
916 | struct dasd_ccw_req *cqr; | ||
917 | struct list_head *l, *n; | ||
918 | |||
919 | /* First of all start sense subsystem status request. */ | 956 | /* First of all start sense subsystem status request. */ |
920 | dasd_eer_snss(device); | 957 | dasd_eer_snss(device); |
921 | 958 | ||
922 | device->stopped &= ~DASD_STOPPED_PENDING; | 959 | device->stopped &= ~DASD_STOPPED_PENDING; |
923 | 960 | dasd_schedule_device_bh(device); | |
924 | /* restart all 'running' IO on queue */ | 961 | if (device->block) |
925 | list_for_each_safe(l, n, &device->ccw_queue) { | 962 | dasd_schedule_block_bh(device->block); |
926 | cqr = list_entry(l, struct dasd_ccw_req, list); | ||
927 | if (cqr->status == DASD_CQR_IN_IO) { | ||
928 | cqr->status = DASD_CQR_QUEUED; | ||
929 | } | ||
930 | } | ||
931 | dasd_clear_timer(device); | ||
932 | dasd_schedule_bh(device); | ||
933 | } | 963 | } |
934 | 964 | ||
935 | /* | 965 | /* |
936 | * Interrupt handler for "normal" ssch-io based dasd devices. | 966 | * Interrupt handler for "normal" ssch-io based dasd devices. |
937 | */ | 967 | */ |
938 | void | 968 | void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, |
939 | dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | 969 | struct irb *irb) |
940 | struct irb *irb) | ||
941 | { | 970 | { |
942 | struct dasd_ccw_req *cqr, *next; | 971 | struct dasd_ccw_req *cqr, *next; |
943 | struct dasd_device *device; | 972 | struct dasd_device *device; |
944 | unsigned long long now; | 973 | unsigned long long now; |
945 | int expires; | 974 | int expires; |
946 | dasd_era_t era; | ||
947 | char mask; | ||
948 | 975 | ||
949 | if (IS_ERR(irb)) { | 976 | if (IS_ERR(irb)) { |
950 | switch (PTR_ERR(irb)) { | 977 | switch (PTR_ERR(irb)) { |
@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
969 | cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), | 996 | cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), |
970 | (unsigned int) intparm); | 997 | (unsigned int) intparm); |
971 | 998 | ||
972 | /* first of all check for state change pending interrupt */ | 999 | /* check for unsolicited interrupts */ |
973 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | 1000 | cqr = (struct dasd_ccw_req *) intparm; |
974 | if ((irb->scsw.dstat & mask) == mask) { | 1001 | if (!cqr || ((irb->scsw.cc == 1) && |
1002 | (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && | ||
1003 | (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { | ||
1004 | if (cqr && cqr->status == DASD_CQR_IN_IO) | ||
1005 | cqr->status = DASD_CQR_QUEUED; | ||
975 | device = dasd_device_from_cdev_locked(cdev); | 1006 | device = dasd_device_from_cdev_locked(cdev); |
976 | if (!IS_ERR(device)) { | 1007 | if (!IS_ERR(device)) { |
977 | dasd_handle_state_change_pending(device); | 1008 | dasd_device_clear_timer(device); |
1009 | device->discipline->handle_unsolicited_interrupt(device, | ||
1010 | irb); | ||
978 | dasd_put_device(device); | 1011 | dasd_put_device(device); |
979 | } | 1012 | } |
980 | return; | 1013 | return; |
981 | } | 1014 | } |
982 | 1015 | ||
983 | cqr = (struct dasd_ccw_req *) intparm; | 1016 | device = (struct dasd_device *) cqr->startdev; |
984 | 1017 | if (!device || | |
985 | /* check for unsolicited interrupts */ | ||
986 | if (cqr == NULL) { | ||
987 | MESSAGE(KERN_DEBUG, | ||
988 | "unsolicited interrupt received: bus_id %s", | ||
989 | cdev->dev.bus_id); | ||
990 | return; | ||
991 | } | ||
992 | |||
993 | device = (struct dasd_device *) cqr->device; | ||
994 | if (device == NULL || | ||
995 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1018 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
996 | MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", | 1019 | MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", |
997 | cdev->dev.bus_id); | 1020 | cdev->dev.bus_id); |
@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
999 | } | 1022 | } |
1000 | 1023 | ||
1001 | /* Check for clear pending */ | 1024 | /* Check for clear pending */ |
1002 | if (cqr->status == DASD_CQR_CLEAR && | 1025 | if (cqr->status == DASD_CQR_CLEAR_PENDING && |
1003 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1026 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { |
1004 | cqr->status = DASD_CQR_QUEUED; | 1027 | cqr->status = DASD_CQR_CLEARED; |
1005 | dasd_clear_timer(device); | 1028 | dasd_device_clear_timer(device); |
1006 | wake_up(&dasd_flush_wq); | 1029 | wake_up(&dasd_flush_wq); |
1007 | dasd_schedule_bh(device); | 1030 | dasd_schedule_device_bh(device); |
1008 | return; | 1031 | return; |
1009 | } | 1032 | } |
1010 | 1033 | ||
@@ -1017,277 +1040,170 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1017 | } | 1040 | } |
1018 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", | 1041 | DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", |
1019 | ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); | 1042 | ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); |
1020 | 1043 | next = NULL; | |
1021 | /* Find out the appropriate era_action. */ | ||
1022 | if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) | ||
1023 | era = dasd_era_fatal; | ||
1024 | else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | ||
1025 | irb->scsw.cstat == 0 && | ||
1026 | !irb->esw.esw0.erw.cons) | ||
1027 | era = dasd_era_none; | ||
1028 | else if (irb->esw.esw0.erw.cons) | ||
1029 | era = device->discipline->examine_error(cqr, irb); | ||
1030 | else | ||
1031 | era = dasd_era_recover; | ||
1032 | |||
1033 | DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); | ||
1034 | expires = 0; | 1044 | expires = 0; |
1035 | if (era == dasd_era_none) { | 1045 | if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
1036 | cqr->status = DASD_CQR_DONE; | 1046 | irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { |
1047 | /* request was completed successfully */ | ||
1048 | cqr->status = DASD_CQR_SUCCESS; | ||
1037 | cqr->stopclk = now; | 1049 | cqr->stopclk = now; |
1038 | /* Start first request on queue if possible -> fast_io. */ | 1050 | /* Start first request on queue if possible -> fast_io. */ |
1039 | if (cqr->list.next != &device->ccw_queue) { | 1051 | if (cqr->devlist.next != &device->ccw_queue) { |
1040 | next = list_entry(cqr->list.next, | 1052 | next = list_entry(cqr->devlist.next, |
1041 | struct dasd_ccw_req, list); | 1053 | struct dasd_ccw_req, devlist); |
1042 | if ((next->status == DASD_CQR_QUEUED) && | ||
1043 | (!device->stopped)) { | ||
1044 | if (device->discipline->start_IO(next) == 0) | ||
1045 | expires = next->expires; | ||
1046 | else | ||
1047 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | ||
1048 | "Interrupt fastpath " | ||
1049 | "failed!"); | ||
1050 | } | ||
1051 | } | 1054 | } |
1052 | } else { /* error */ | 1055 | } else { /* error */ |
1053 | memcpy(&cqr->irb, irb, sizeof (struct irb)); | 1056 | memcpy(&cqr->irb, irb, sizeof(struct irb)); |
1054 | if (device->features & DASD_FEATURE_ERPLOG) { | 1057 | if (device->features & DASD_FEATURE_ERPLOG) { |
1055 | /* dump sense data */ | ||
1056 | dasd_log_sense(cqr, irb); | 1058 | dasd_log_sense(cqr, irb); |
1057 | } | 1059 | } |
1058 | switch (era) { | 1060 | /* If we have no sense data, or we just don't want complex ERP |
1059 | case dasd_era_fatal: | 1061 | * for this request, but if we have retries left, then just |
1060 | cqr->status = DASD_CQR_FAILED; | 1062 | * reset this request and retry it in the fastpath |
1061 | cqr->stopclk = now; | 1063 | */ |
1062 | break; | 1064 | if (!(cqr->irb.esw.esw0.erw.cons && |
1063 | case dasd_era_recover: | 1065 | test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) && |
1066 | cqr->retries > 0) { | ||
1067 | DEV_MESSAGE(KERN_DEBUG, device, | ||
1068 | "default ERP in fastpath (%i retries left)", | ||
1069 | cqr->retries); | ||
1070 | cqr->lpm = LPM_ANYPATH; | ||
1071 | cqr->status = DASD_CQR_QUEUED; | ||
1072 | next = cqr; | ||
1073 | } else | ||
1064 | cqr->status = DASD_CQR_ERROR; | 1074 | cqr->status = DASD_CQR_ERROR; |
1065 | break; | 1075 | } |
1066 | default: | 1076 | if (next && (next->status == DASD_CQR_QUEUED) && |
1067 | BUG(); | 1077 | (!device->stopped)) { |
1068 | } | 1078 | if (device->discipline->start_IO(next) == 0) |
1079 | expires = next->expires; | ||
1080 | else | ||
1081 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | ||
1082 | "Interrupt fastpath " | ||
1083 | "failed!"); | ||
1069 | } | 1084 | } |
1070 | if (expires != 0) | 1085 | if (expires != 0) |
1071 | dasd_set_timer(device, expires); | 1086 | dasd_device_set_timer(device, expires); |
1072 | else | 1087 | else |
1073 | dasd_clear_timer(device); | 1088 | dasd_device_clear_timer(device); |
1074 | dasd_schedule_bh(device); | 1089 | dasd_schedule_device_bh(device); |
1075 | } | 1090 | } |
1076 | 1091 | ||
1077 | /* | 1092 | /* |
1078 | * posts the buffer_cache about a finalized request | 1093 | * If we have an error on a dasd_block layer request then we cancel |
1094 | * and return all further requests from the same dasd_block as well. | ||
1079 | */ | 1095 | */ |
1080 | static inline void | 1096 | static void __dasd_device_recovery(struct dasd_device *device, |
1081 | dasd_end_request(struct request *req, int uptodate) | 1097 | struct dasd_ccw_req *ref_cqr) |
1082 | { | 1098 | { |
1083 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | 1099 | struct list_head *l, *n; |
1084 | BUG(); | 1100 | struct dasd_ccw_req *cqr; |
1085 | add_disk_randomness(req->rq_disk); | ||
1086 | end_that_request_last(req, uptodate); | ||
1087 | } | ||
1088 | 1101 | ||
1089 | /* | 1102 | /* |
1090 | * Process finished error recovery ccw. | 1103 | * only requeue request that came from the dasd_block layer |
1091 | */ | 1104 | */ |
1092 | static inline void | 1105 | if (!ref_cqr->block) |
1093 | __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) | 1106 | return; |
1094 | { | ||
1095 | dasd_erp_fn_t erp_fn; | ||
1096 | 1107 | ||
1097 | if (cqr->status == DASD_CQR_DONE) | 1108 | list_for_each_safe(l, n, &device->ccw_queue) { |
1098 | DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); | 1109 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
1099 | else | 1110 | if (cqr->status == DASD_CQR_QUEUED && |
1100 | DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); | 1111 | ref_cqr->block == cqr->block) { |
1101 | erp_fn = device->discipline->erp_postaction(cqr); | 1112 | cqr->status = DASD_CQR_CLEARED; |
1102 | erp_fn(cqr); | 1113 | } |
1103 | } | 1114 | } |
1115 | }; | ||
1104 | 1116 | ||
1105 | /* | 1117 | /* |
1106 | * Process ccw request queue. | 1118 | * Remove those ccw requests from the queue that need to be returned |
1119 | * to the upper layer. | ||
1107 | */ | 1120 | */ |
1108 | static void | 1121 | static void __dasd_device_process_ccw_queue(struct dasd_device *device, |
1109 | __dasd_process_ccw_queue(struct dasd_device * device, | 1122 | struct list_head *final_queue) |
1110 | struct list_head *final_queue) | ||
1111 | { | 1123 | { |
1112 | struct list_head *l, *n; | 1124 | struct list_head *l, *n; |
1113 | struct dasd_ccw_req *cqr; | 1125 | struct dasd_ccw_req *cqr; |
1114 | dasd_erp_fn_t erp_fn; | ||
1115 | 1126 | ||
1116 | restart: | ||
1117 | /* Process request with final status. */ | 1127 | /* Process request with final status. */ |
1118 | list_for_each_safe(l, n, &device->ccw_queue) { | 1128 | list_for_each_safe(l, n, &device->ccw_queue) { |
1119 | cqr = list_entry(l, struct dasd_ccw_req, list); | 1129 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
1130 | |||
1120 | /* Stop list processing at the first non-final request. */ | 1131 | /* Stop list processing at the first non-final request. */ |
1121 | if (cqr->status != DASD_CQR_DONE && | 1132 | if (cqr->status == DASD_CQR_QUEUED || |
1122 | cqr->status != DASD_CQR_FAILED && | 1133 | cqr->status == DASD_CQR_IN_IO || |
1123 | cqr->status != DASD_CQR_ERROR) | 1134 | cqr->status == DASD_CQR_CLEAR_PENDING) |
1124 | break; | 1135 | break; |
1125 | /* Process requests with DASD_CQR_ERROR */ | ||
1126 | if (cqr->status == DASD_CQR_ERROR) { | 1136 | if (cqr->status == DASD_CQR_ERROR) { |
1127 | if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { | 1137 | __dasd_device_recovery(device, cqr); |
1128 | cqr->status = DASD_CQR_FAILED; | ||
1129 | cqr->stopclk = get_clock(); | ||
1130 | } else { | ||
1131 | if (cqr->irb.esw.esw0.erw.cons && | ||
1132 | test_bit(DASD_CQR_FLAGS_USE_ERP, | ||
1133 | &cqr->flags)) { | ||
1134 | erp_fn = device->discipline-> | ||
1135 | erp_action(cqr); | ||
1136 | erp_fn(cqr); | ||
1137 | } else | ||
1138 | dasd_default_erp_action(cqr); | ||
1139 | } | ||
1140 | goto restart; | ||
1141 | } | ||
1142 | |||
1143 | /* First of all call extended error reporting. */ | ||
1144 | if (dasd_eer_enabled(device) && | ||
1145 | cqr->status == DASD_CQR_FAILED) { | ||
1146 | dasd_eer_write(device, cqr, DASD_EER_FATALERROR); | ||
1147 | |||
1148 | /* restart request */ | ||
1149 | cqr->status = DASD_CQR_QUEUED; | ||
1150 | cqr->retries = 255; | ||
1151 | device->stopped |= DASD_STOPPED_QUIESCE; | ||
1152 | goto restart; | ||
1153 | } | 1138 | } |
1154 | |||
1155 | /* Process finished ERP request. */ | ||
1156 | if (cqr->refers) { | ||
1157 | __dasd_process_erp(device, cqr); | ||
1158 | goto restart; | ||
1159 | } | ||
1160 | |||
1161 | /* Rechain finished requests to final queue */ | 1139 | /* Rechain finished requests to final queue */ |
1162 | cqr->endclk = get_clock(); | 1140 | list_move_tail(&cqr->devlist, final_queue); |
1163 | list_move_tail(&cqr->list, final_queue); | ||
1164 | } | 1141 | } |
1165 | } | 1142 | } |
1166 | 1143 | ||
1167 | static void | ||
1168 | dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) | ||
1169 | { | ||
1170 | struct request *req; | ||
1171 | struct dasd_device *device; | ||
1172 | int status; | ||
1173 | |||
1174 | req = (struct request *) data; | ||
1175 | device = cqr->device; | ||
1176 | dasd_profile_end(device, cqr, req); | ||
1177 | status = cqr->device->discipline->free_cp(cqr,req); | ||
1178 | spin_lock_irq(&device->request_queue_lock); | ||
1179 | dasd_end_request(req, status); | ||
1180 | spin_unlock_irq(&device->request_queue_lock); | ||
1181 | } | ||
1182 | |||
1183 | |||
1184 | /* | 1144 | /* |
1185 | * Fetch requests from the block device queue. | 1145 | * the cqrs from the final queue are returned to the upper layer |
1146 | * by setting a dasd_block state and calling the callback function | ||
1186 | */ | 1147 | */ |
1187 | static void | 1148 | static void __dasd_device_process_final_queue(struct dasd_device *device, |
1188 | __dasd_process_blk_queue(struct dasd_device * device) | 1149 | struct list_head *final_queue) |
1189 | { | 1150 | { |
1190 | struct request_queue *queue; | 1151 | struct list_head *l, *n; |
1191 | struct request *req; | ||
1192 | struct dasd_ccw_req *cqr; | 1152 | struct dasd_ccw_req *cqr; |
1193 | int nr_queued; | ||
1194 | |||
1195 | queue = device->request_queue; | ||
1196 | /* No queue ? Then there is nothing to do. */ | ||
1197 | if (queue == NULL) | ||
1198 | return; | ||
1199 | |||
1200 | /* | ||
1201 | * We requeue request from the block device queue to the ccw | ||
1202 | * queue only in two states. In state DASD_STATE_READY the | ||
1203 | * partition detection is done and we need to requeue requests | ||
1204 | * for that. State DASD_STATE_ONLINE is normal block device | ||
1205 | * operation. | ||
1206 | */ | ||
1207 | if (device->state != DASD_STATE_READY && | ||
1208 | device->state != DASD_STATE_ONLINE) | ||
1209 | return; | ||
1210 | nr_queued = 0; | ||
1211 | /* Now we try to fetch requests from the request queue */ | ||
1212 | list_for_each_entry(cqr, &device->ccw_queue, list) | ||
1213 | if (cqr->status == DASD_CQR_QUEUED) | ||
1214 | nr_queued++; | ||
1215 | while (!blk_queue_plugged(queue) && | ||
1216 | elv_next_request(queue) && | ||
1217 | nr_queued < DASD_CHANQ_MAX_SIZE) { | ||
1218 | req = elv_next_request(queue); | ||
1219 | 1153 | ||
1220 | if (device->features & DASD_FEATURE_READONLY && | 1154 | list_for_each_safe(l, n, final_queue) { |
1221 | rq_data_dir(req) == WRITE) { | 1155 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
1222 | DBF_DEV_EVENT(DBF_ERR, device, | 1156 | list_del_init(&cqr->devlist); |
1223 | "Rejecting write request %p", | 1157 | if (cqr->block) |
1224 | req); | 1158 | spin_lock_bh(&cqr->block->queue_lock); |
1225 | blkdev_dequeue_request(req); | 1159 | switch (cqr->status) { |
1226 | dasd_end_request(req, 0); | 1160 | case DASD_CQR_SUCCESS: |
1227 | continue; | 1161 | cqr->status = DASD_CQR_DONE; |
1228 | } | 1162 | break; |
1229 | if (device->stopped & DASD_STOPPED_DC_EIO) { | 1163 | case DASD_CQR_ERROR: |
1230 | blkdev_dequeue_request(req); | 1164 | cqr->status = DASD_CQR_NEED_ERP; |
1231 | dasd_end_request(req, 0); | 1165 | break; |
1232 | continue; | 1166 | case DASD_CQR_CLEARED: |
1233 | } | 1167 | cqr->status = DASD_CQR_TERMINATED; |
1234 | cqr = device->discipline->build_cp(device, req); | 1168 | break; |
1235 | if (IS_ERR(cqr)) { | 1169 | default: |
1236 | if (PTR_ERR(cqr) == -ENOMEM) | 1170 | DEV_MESSAGE(KERN_ERR, device, |
1237 | break; /* terminate request queue loop */ | 1171 | "wrong cqr status in __dasd_process_final_queue " |
1238 | if (PTR_ERR(cqr) == -EAGAIN) { | 1172 | "for cqr %p, status %x", |
1239 | /* | 1173 | cqr, cqr->status); |
1240 | * The current request cannot be build right | 1174 | BUG(); |
1241 | * now, we have to try later. If this request | ||
1242 | * is the head-of-queue we stop the device | ||
1243 | * for 1/2 second. | ||
1244 | */ | ||
1245 | if (!list_empty(&device->ccw_queue)) | ||
1246 | break; | ||
1247 | device->stopped |= DASD_STOPPED_PENDING; | ||
1248 | dasd_set_timer(device, HZ/2); | ||
1249 | break; | ||
1250 | } | ||
1251 | DBF_DEV_EVENT(DBF_ERR, device, | ||
1252 | "CCW creation failed (rc=%ld) " | ||
1253 | "on request %p", | ||
1254 | PTR_ERR(cqr), req); | ||
1255 | blkdev_dequeue_request(req); | ||
1256 | dasd_end_request(req, 0); | ||
1257 | continue; | ||
1258 | } | 1175 | } |
1259 | cqr->callback = dasd_end_request_cb; | 1176 | if (cqr->block) |
1260 | cqr->callback_data = (void *) req; | 1177 | spin_unlock_bh(&cqr->block->queue_lock); |
1261 | cqr->status = DASD_CQR_QUEUED; | 1178 | if (cqr->callback != NULL) |
1262 | blkdev_dequeue_request(req); | 1179 | (cqr->callback)(cqr, cqr->callback_data); |
1263 | list_add_tail(&cqr->list, &device->ccw_queue); | ||
1264 | dasd_profile_start(device, cqr, req); | ||
1265 | nr_queued++; | ||
1266 | } | 1180 | } |
1267 | } | 1181 | } |
1268 | 1182 | ||
1183 | |||
1184 | |||
1269 | /* | 1185 | /* |
1270 | * Take a look at the first request on the ccw queue and check | 1186 | * Take a look at the first request on the ccw queue and check |
1271 | * if it reached its expire time. If so, terminate the IO. | 1187 | * if it reached its expire time. If so, terminate the IO. |
1272 | */ | 1188 | */ |
1273 | static void | 1189 | static void __dasd_device_check_expire(struct dasd_device *device) |
1274 | __dasd_check_expire(struct dasd_device * device) | ||
1275 | { | 1190 | { |
1276 | struct dasd_ccw_req *cqr; | 1191 | struct dasd_ccw_req *cqr; |
1277 | 1192 | ||
1278 | if (list_empty(&device->ccw_queue)) | 1193 | if (list_empty(&device->ccw_queue)) |
1279 | return; | 1194 | return; |
1280 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1195 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1281 | if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && | 1196 | if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && |
1282 | (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { | 1197 | (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { |
1283 | if (device->discipline->term_IO(cqr) != 0) { | 1198 | if (device->discipline->term_IO(cqr) != 0) { |
1284 | /* Hmpf, try again in 5 sec */ | 1199 | /* Hmpf, try again in 5 sec */ |
1285 | dasd_set_timer(device, 5*HZ); | ||
1286 | DEV_MESSAGE(KERN_ERR, device, | 1200 | DEV_MESSAGE(KERN_ERR, device, |
1287 | "internal error - timeout (%is) expired " | 1201 | "internal error - timeout (%is) expired " |
1288 | "for cqr %p, termination failed, " | 1202 | "for cqr %p, termination failed, " |
1289 | "retrying in 5s", | 1203 | "retrying in 5s", |
1290 | (cqr->expires/HZ), cqr); | 1204 | (cqr->expires/HZ), cqr); |
1205 | cqr->expires += 5*HZ; | ||
1206 | dasd_device_set_timer(device, 5*HZ); | ||
1291 | } else { | 1207 | } else { |
1292 | DEV_MESSAGE(KERN_ERR, device, | 1208 | DEV_MESSAGE(KERN_ERR, device, |
1293 | "internal error - timeout (%is) expired " | 1209 | "internal error - timeout (%is) expired " |
@@ -1301,77 +1217,53 @@ __dasd_check_expire(struct dasd_device * device) | |||
1301 | * Take a look at the first request on the ccw queue and check | 1217 | * Take a look at the first request on the ccw queue and check |
1302 | * if it needs to be started. | 1218 | * if it needs to be started. |
1303 | */ | 1219 | */ |
1304 | static void | 1220 | static void __dasd_device_start_head(struct dasd_device *device) |
1305 | __dasd_start_head(struct dasd_device * device) | ||
1306 | { | 1221 | { |
1307 | struct dasd_ccw_req *cqr; | 1222 | struct dasd_ccw_req *cqr; |
1308 | int rc; | 1223 | int rc; |
1309 | 1224 | ||
1310 | if (list_empty(&device->ccw_queue)) | 1225 | if (list_empty(&device->ccw_queue)) |
1311 | return; | 1226 | return; |
1312 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1227 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1313 | if (cqr->status != DASD_CQR_QUEUED) | 1228 | if (cqr->status != DASD_CQR_QUEUED) |
1314 | return; | 1229 | return; |
1315 | /* Non-temporary stop condition will trigger fail fast */ | 1230 | /* when device is stopped, return request to previous layer */ |
1316 | if (device->stopped & ~DASD_STOPPED_PENDING && | 1231 | if (device->stopped) { |
1317 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | 1232 | cqr->status = DASD_CQR_CLEARED; |
1318 | (!dasd_eer_enabled(device))) { | 1233 | dasd_schedule_device_bh(device); |
1319 | cqr->status = DASD_CQR_FAILED; | ||
1320 | dasd_schedule_bh(device); | ||
1321 | return; | 1234 | return; |
1322 | } | 1235 | } |
1323 | /* Don't try to start requests if device is stopped */ | ||
1324 | if (device->stopped) | ||
1325 | return; | ||
1326 | 1236 | ||
1327 | rc = device->discipline->start_IO(cqr); | 1237 | rc = device->discipline->start_IO(cqr); |
1328 | if (rc == 0) | 1238 | if (rc == 0) |
1329 | dasd_set_timer(device, cqr->expires); | 1239 | dasd_device_set_timer(device, cqr->expires); |
1330 | else if (rc == -EACCES) { | 1240 | else if (rc == -EACCES) { |
1331 | dasd_schedule_bh(device); | 1241 | dasd_schedule_device_bh(device); |
1332 | } else | 1242 | } else |
1333 | /* Hmpf, try again in 1/2 sec */ | 1243 | /* Hmpf, try again in 1/2 sec */ |
1334 | dasd_set_timer(device, 50); | 1244 | dasd_device_set_timer(device, 50); |
1335 | } | ||
1336 | |||
1337 | static inline int | ||
1338 | _wait_for_clear(struct dasd_ccw_req *cqr) | ||
1339 | { | ||
1340 | return (cqr->status == DASD_CQR_QUEUED); | ||
1341 | } | 1245 | } |
1342 | 1246 | ||
1343 | /* | 1247 | /* |
1344 | * Remove all requests from the ccw queue (all = '1') or only block device | 1248 | * Go through all request on the dasd_device request queue, |
1345 | * requests in case all = '0'. | 1249 | * terminate them on the cdev if necessary, and return them to the |
1346 | * Take care of the erp-chain (chained via cqr->refers) and remove either | 1250 | * submitting layer via callback. |
1347 | * the whole erp-chain or none of the erp-requests. | 1251 | * Note: |
1348 | * If a request is currently running, term_IO is called and the request | 1252 | * Make sure that all 'submitting layers' still exist when |
1349 | * is re-queued. Prior to removing the terminated request we need to wait | 1253 | * this function is called!. In other words, when 'device' is a base |
1350 | * for the clear-interrupt. | 1254 | * device then all block layer requests must have been removed before |
1351 | * In case termination is not possible we stop processing and just finishing | 1255 | * via dasd_flush_block_queue. |
1352 | * the already moved requests. | ||
1353 | */ | 1256 | */ |
1354 | static int | 1257 | int dasd_flush_device_queue(struct dasd_device *device) |
1355 | dasd_flush_ccw_queue(struct dasd_device * device, int all) | ||
1356 | { | 1258 | { |
1357 | struct dasd_ccw_req *cqr, *orig, *n; | 1259 | struct dasd_ccw_req *cqr, *n; |
1358 | int rc, i; | 1260 | int rc; |
1359 | |||
1360 | struct list_head flush_queue; | 1261 | struct list_head flush_queue; |
1361 | 1262 | ||
1362 | INIT_LIST_HEAD(&flush_queue); | 1263 | INIT_LIST_HEAD(&flush_queue); |
1363 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1264 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1364 | rc = 0; | 1265 | rc = 0; |
1365 | restart: | 1266 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { |
1366 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { | ||
1367 | /* get original request of erp request-chain */ | ||
1368 | for (orig = cqr; orig->refers != NULL; orig = orig->refers); | ||
1369 | |||
1370 | /* Flush all request or only block device requests? */ | ||
1371 | if (all == 0 && cqr->callback != dasd_end_request_cb && | ||
1372 | orig->callback != dasd_end_request_cb) { | ||
1373 | continue; | ||
1374 | } | ||
1375 | /* Check status and move request to flush_queue */ | 1267 | /* Check status and move request to flush_queue */ |
1376 | switch (cqr->status) { | 1268 | switch (cqr->status) { |
1377 | case DASD_CQR_IN_IO: | 1269 | case DASD_CQR_IN_IO: |
@@ -1387,90 +1279,60 @@ restart: | |||
1387 | } | 1279 | } |
1388 | break; | 1280 | break; |
1389 | case DASD_CQR_QUEUED: | 1281 | case DASD_CQR_QUEUED: |
1390 | case DASD_CQR_ERROR: | ||
1391 | /* set request to FAILED */ | ||
1392 | cqr->stopclk = get_clock(); | 1282 | cqr->stopclk = get_clock(); |
1393 | cqr->status = DASD_CQR_FAILED; | 1283 | cqr->status = DASD_CQR_CLEARED; |
1394 | break; | 1284 | break; |
1395 | default: /* do not touch the others */ | 1285 | default: /* no need to modify the others */ |
1396 | break; | 1286 | break; |
1397 | } | 1287 | } |
1398 | /* Rechain request (including erp chain) */ | 1288 | list_move_tail(&cqr->devlist, &flush_queue); |
1399 | for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { | ||
1400 | cqr->endclk = get_clock(); | ||
1401 | list_move_tail(&cqr->list, &flush_queue); | ||
1402 | } | ||
1403 | if (i > 1) | ||
1404 | /* moved more than one request - need to restart */ | ||
1405 | goto restart; | ||
1406 | } | 1289 | } |
1407 | |||
1408 | finished: | 1290 | finished: |
1409 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1291 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1410 | /* Now call the callback function of flushed requests */ | 1292 | /* |
1411 | restart_cb: | 1293 | * After this point all requests must be in state CLEAR_PENDING, |
1412 | list_for_each_entry_safe(cqr, n, &flush_queue, list) { | 1294 | * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become |
1413 | if (cqr->status == DASD_CQR_CLEAR) { | 1295 | * one of the others. |
1414 | /* wait for clear interrupt! */ | 1296 | */ |
1415 | wait_event(dasd_flush_wq, _wait_for_clear(cqr)); | 1297 | list_for_each_entry_safe(cqr, n, &flush_queue, devlist) |
1416 | cqr->status = DASD_CQR_FAILED; | 1298 | wait_event(dasd_flush_wq, |
1417 | } | 1299 | (cqr->status != DASD_CQR_CLEAR_PENDING)); |
1418 | /* Process finished ERP request. */ | 1300 | /* |
1419 | if (cqr->refers) { | 1301 | * Now set each request back to TERMINATED, DONE or NEED_ERP |
1420 | __dasd_process_erp(device, cqr); | 1302 | * and call the callback function of flushed requests |
1421 | /* restart list_for_xx loop since dasd_process_erp | 1303 | */ |
1422 | * might remove multiple elements */ | 1304 | __dasd_device_process_final_queue(device, &flush_queue); |
1423 | goto restart_cb; | ||
1424 | } | ||
1425 | /* call the callback function */ | ||
1426 | cqr->endclk = get_clock(); | ||
1427 | if (cqr->callback != NULL) | ||
1428 | (cqr->callback)(cqr, cqr->callback_data); | ||
1429 | } | ||
1430 | return rc; | 1305 | return rc; |
1431 | } | 1306 | } |
1432 | 1307 | ||
1433 | /* | 1308 | /* |
1434 | * Acquire the device lock and process queues for the device. | 1309 | * Acquire the device lock and process queues for the device. |
1435 | */ | 1310 | */ |
1436 | static void | 1311 | static void dasd_device_tasklet(struct dasd_device *device) |
1437 | dasd_tasklet(struct dasd_device * device) | ||
1438 | { | 1312 | { |
1439 | struct list_head final_queue; | 1313 | struct list_head final_queue; |
1440 | struct list_head *l, *n; | ||
1441 | struct dasd_ccw_req *cqr; | ||
1442 | 1314 | ||
1443 | atomic_set (&device->tasklet_scheduled, 0); | 1315 | atomic_set (&device->tasklet_scheduled, 0); |
1444 | INIT_LIST_HEAD(&final_queue); | 1316 | INIT_LIST_HEAD(&final_queue); |
1445 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1317 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1446 | /* Check expire time of first request on the ccw queue. */ | 1318 | /* Check expire time of first request on the ccw queue. */ |
1447 | __dasd_check_expire(device); | 1319 | __dasd_device_check_expire(device); |
1448 | /* Finish off requests on ccw queue */ | 1320 | /* find final requests on ccw queue */ |
1449 | __dasd_process_ccw_queue(device, &final_queue); | 1321 | __dasd_device_process_ccw_queue(device, &final_queue); |
1450 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1322 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1451 | /* Now call the callback function of requests with final status */ | 1323 | /* Now call the callback function of requests with final status */ |
1452 | list_for_each_safe(l, n, &final_queue) { | 1324 | __dasd_device_process_final_queue(device, &final_queue); |
1453 | cqr = list_entry(l, struct dasd_ccw_req, list); | 1325 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1454 | list_del_init(&cqr->list); | ||
1455 | if (cqr->callback != NULL) | ||
1456 | (cqr->callback)(cqr, cqr->callback_data); | ||
1457 | } | ||
1458 | spin_lock_irq(&device->request_queue_lock); | ||
1459 | spin_lock(get_ccwdev_lock(device->cdev)); | ||
1460 | /* Get new request from the block device request queue */ | ||
1461 | __dasd_process_blk_queue(device); | ||
1462 | /* Now check if the head of the ccw queue needs to be started. */ | 1326 | /* Now check if the head of the ccw queue needs to be started. */ |
1463 | __dasd_start_head(device); | 1327 | __dasd_device_start_head(device); |
1464 | spin_unlock(get_ccwdev_lock(device->cdev)); | 1328 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1465 | spin_unlock_irq(&device->request_queue_lock); | ||
1466 | dasd_put_device(device); | 1329 | dasd_put_device(device); |
1467 | } | 1330 | } |
1468 | 1331 | ||
1469 | /* | 1332 | /* |
1470 | * Schedules a call to dasd_tasklet over the device tasklet. | 1333 | * Schedules a call to dasd_tasklet over the device tasklet. |
1471 | */ | 1334 | */ |
1472 | void | 1335 | void dasd_schedule_device_bh(struct dasd_device *device) |
1473 | dasd_schedule_bh(struct dasd_device * device) | ||
1474 | { | 1336 | { |
1475 | /* Protect against rescheduling. */ | 1337 | /* Protect against rescheduling. */ |
1476 | if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) | 1338 | if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) |
@@ -1480,160 +1342,109 @@ dasd_schedule_bh(struct dasd_device * device) | |||
1480 | } | 1342 | } |
1481 | 1343 | ||
1482 | /* | 1344 | /* |
1483 | * Queue a request to the head of the ccw_queue. Start the I/O if | 1345 | * Queue a request to the head of the device ccw_queue. |
1484 | * possible. | 1346 | * Start the I/O if possible. |
1485 | */ | 1347 | */ |
1486 | void | 1348 | void dasd_add_request_head(struct dasd_ccw_req *cqr) |
1487 | dasd_add_request_head(struct dasd_ccw_req *req) | ||
1488 | { | 1349 | { |
1489 | struct dasd_device *device; | 1350 | struct dasd_device *device; |
1490 | unsigned long flags; | 1351 | unsigned long flags; |
1491 | 1352 | ||
1492 | device = req->device; | 1353 | device = cqr->startdev; |
1493 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 1354 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
1494 | req->status = DASD_CQR_QUEUED; | 1355 | cqr->status = DASD_CQR_QUEUED; |
1495 | req->device = device; | 1356 | list_add(&cqr->devlist, &device->ccw_queue); |
1496 | list_add(&req->list, &device->ccw_queue); | ||
1497 | /* let the bh start the request to keep them in order */ | 1357 | /* let the bh start the request to keep them in order */ |
1498 | dasd_schedule_bh(device); | 1358 | dasd_schedule_device_bh(device); |
1499 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 1359 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
1500 | } | 1360 | } |
1501 | 1361 | ||
1502 | /* | 1362 | /* |
1503 | * Queue a request to the tail of the ccw_queue. Start the I/O if | 1363 | * Queue a request to the tail of the device ccw_queue. |
1504 | * possible. | 1364 | * Start the I/O if possible. |
1505 | */ | 1365 | */ |
1506 | void | 1366 | void dasd_add_request_tail(struct dasd_ccw_req *cqr) |
1507 | dasd_add_request_tail(struct dasd_ccw_req *req) | ||
1508 | { | 1367 | { |
1509 | struct dasd_device *device; | 1368 | struct dasd_device *device; |
1510 | unsigned long flags; | 1369 | unsigned long flags; |
1511 | 1370 | ||
1512 | device = req->device; | 1371 | device = cqr->startdev; |
1513 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 1372 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
1514 | req->status = DASD_CQR_QUEUED; | 1373 | cqr->status = DASD_CQR_QUEUED; |
1515 | req->device = device; | 1374 | list_add_tail(&cqr->devlist, &device->ccw_queue); |
1516 | list_add_tail(&req->list, &device->ccw_queue); | ||
1517 | /* let the bh start the request to keep them in order */ | 1375 | /* let the bh start the request to keep them in order */ |
1518 | dasd_schedule_bh(device); | 1376 | dasd_schedule_device_bh(device); |
1519 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 1377 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
1520 | } | 1378 | } |
1521 | 1379 | ||
1522 | /* | 1380 | /* |
1523 | * Wakeup callback. | 1381 | * Wakeup helper for the 'sleep_on' functions. |
1524 | */ | 1382 | */ |
1525 | static void | 1383 | static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) |
1526 | dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) | ||
1527 | { | 1384 | { |
1528 | wake_up((wait_queue_head_t *) data); | 1385 | wake_up((wait_queue_head_t *) data); |
1529 | } | 1386 | } |
1530 | 1387 | ||
1531 | static inline int | 1388 | static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) |
1532 | _wait_for_wakeup(struct dasd_ccw_req *cqr) | ||
1533 | { | 1389 | { |
1534 | struct dasd_device *device; | 1390 | struct dasd_device *device; |
1535 | int rc; | 1391 | int rc; |
1536 | 1392 | ||
1537 | device = cqr->device; | 1393 | device = cqr->startdev; |
1538 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1394 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1539 | rc = ((cqr->status == DASD_CQR_DONE || | 1395 | rc = ((cqr->status == DASD_CQR_DONE || |
1540 | cqr->status == DASD_CQR_FAILED) && | 1396 | cqr->status == DASD_CQR_NEED_ERP || |
1541 | list_empty(&cqr->list)); | 1397 | cqr->status == DASD_CQR_TERMINATED) && |
1398 | list_empty(&cqr->devlist)); | ||
1542 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1399 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1543 | return rc; | 1400 | return rc; |
1544 | } | 1401 | } |
1545 | 1402 | ||
1546 | /* | 1403 | /* |
1547 | * Attempts to start a special ccw queue and waits for its completion. | 1404 | * Queue a request to the tail of the device ccw_queue and wait for |
1405 | * it's completion. | ||
1548 | */ | 1406 | */ |
1549 | int | 1407 | int dasd_sleep_on(struct dasd_ccw_req *cqr) |
1550 | dasd_sleep_on(struct dasd_ccw_req * cqr) | ||
1551 | { | 1408 | { |
1552 | wait_queue_head_t wait_q; | 1409 | wait_queue_head_t wait_q; |
1553 | struct dasd_device *device; | 1410 | struct dasd_device *device; |
1554 | int rc; | 1411 | int rc; |
1555 | 1412 | ||
1556 | device = cqr->device; | 1413 | device = cqr->startdev; |
1557 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1558 | 1414 | ||
1559 | init_waitqueue_head (&wait_q); | 1415 | init_waitqueue_head (&wait_q); |
1560 | cqr->callback = dasd_wakeup_cb; | 1416 | cqr->callback = dasd_wakeup_cb; |
1561 | cqr->callback_data = (void *) &wait_q; | 1417 | cqr->callback_data = (void *) &wait_q; |
1562 | cqr->status = DASD_CQR_QUEUED; | 1418 | dasd_add_request_tail(cqr); |
1563 | list_add_tail(&cqr->list, &device->ccw_queue); | ||
1564 | |||
1565 | /* let the bh start the request to keep them in order */ | ||
1566 | dasd_schedule_bh(device); | ||
1567 | |||
1568 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1569 | |||
1570 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1419 | wait_event(wait_q, _wait_for_wakeup(cqr)); |
1571 | 1420 | ||
1572 | /* Request status is either done or failed. */ | 1421 | /* Request status is either done or failed. */ |
1573 | rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | 1422 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
1574 | return rc; | 1423 | return rc; |
1575 | } | 1424 | } |
1576 | 1425 | ||
1577 | /* | 1426 | /* |
1578 | * Attempts to start a special ccw queue and wait interruptible | 1427 | * Queue a request to the tail of the device ccw_queue and wait |
1579 | * for its completion. | 1428 | * interruptible for it's completion. |
1580 | */ | 1429 | */ |
1581 | int | 1430 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) |
1582 | dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) | ||
1583 | { | 1431 | { |
1584 | wait_queue_head_t wait_q; | 1432 | wait_queue_head_t wait_q; |
1585 | struct dasd_device *device; | 1433 | struct dasd_device *device; |
1586 | int rc, finished; | 1434 | int rc; |
1587 | |||
1588 | device = cqr->device; | ||
1589 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1590 | 1435 | ||
1436 | device = cqr->startdev; | ||
1591 | init_waitqueue_head (&wait_q); | 1437 | init_waitqueue_head (&wait_q); |
1592 | cqr->callback = dasd_wakeup_cb; | 1438 | cqr->callback = dasd_wakeup_cb; |
1593 | cqr->callback_data = (void *) &wait_q; | 1439 | cqr->callback_data = (void *) &wait_q; |
1594 | cqr->status = DASD_CQR_QUEUED; | 1440 | dasd_add_request_tail(cqr); |
1595 | list_add_tail(&cqr->list, &device->ccw_queue); | 1441 | rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); |
1596 | 1442 | if (rc == -ERESTARTSYS) { | |
1597 | /* let the bh start the request to keep them in order */ | 1443 | dasd_cancel_req(cqr); |
1598 | dasd_schedule_bh(device); | 1444 | /* wait (non-interruptible) for final status */ |
1599 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1445 | wait_event(wait_q, _wait_for_wakeup(cqr)); |
1600 | |||
1601 | finished = 0; | ||
1602 | while (!finished) { | ||
1603 | rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); | ||
1604 | if (rc != -ERESTARTSYS) { | ||
1605 | /* Request is final (done or failed) */ | ||
1606 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | ||
1607 | break; | ||
1608 | } | ||
1609 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1610 | switch (cqr->status) { | ||
1611 | case DASD_CQR_IN_IO: | ||
1612 | /* terminate runnig cqr */ | ||
1613 | if (device->discipline->term_IO) { | ||
1614 | cqr->retries = -1; | ||
1615 | device->discipline->term_IO(cqr); | ||
1616 | /* wait (non-interruptible) for final status | ||
1617 | * because signal ist still pending */ | ||
1618 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1619 | wait_event(wait_q, _wait_for_wakeup(cqr)); | ||
1620 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
1621 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | ||
1622 | finished = 1; | ||
1623 | } | ||
1624 | break; | ||
1625 | case DASD_CQR_QUEUED: | ||
1626 | /* request */ | ||
1627 | list_del_init(&cqr->list); | ||
1628 | rc = -EIO; | ||
1629 | finished = 1; | ||
1630 | break; | ||
1631 | default: | ||
1632 | /* cqr with 'non-interruptable' status - just wait */ | ||
1633 | break; | ||
1634 | } | ||
1635 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1636 | } | 1446 | } |
1447 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | ||
1637 | return rc; | 1448 | return rc; |
1638 | } | 1449 | } |
1639 | 1450 | ||
@@ -1643,25 +1454,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) | |||
1643 | * and be put back to status queued, before the special request is added | 1454 | * and be put back to status queued, before the special request is added |
1644 | * to the head of the queue. Then the special request is waited on normally. | 1455 | * to the head of the queue. Then the special request is waited on normally. |
1645 | */ | 1456 | */ |
1646 | static inline int | 1457 | static inline int _dasd_term_running_cqr(struct dasd_device *device) |
1647 | _dasd_term_running_cqr(struct dasd_device *device) | ||
1648 | { | 1458 | { |
1649 | struct dasd_ccw_req *cqr; | 1459 | struct dasd_ccw_req *cqr; |
1650 | 1460 | ||
1651 | if (list_empty(&device->ccw_queue)) | 1461 | if (list_empty(&device->ccw_queue)) |
1652 | return 0; | 1462 | return 0; |
1653 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1463 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); |
1654 | return device->discipline->term_IO(cqr); | 1464 | return device->discipline->term_IO(cqr); |
1655 | } | 1465 | } |
1656 | 1466 | ||
1657 | int | 1467 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
1658 | dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) | ||
1659 | { | 1468 | { |
1660 | wait_queue_head_t wait_q; | 1469 | wait_queue_head_t wait_q; |
1661 | struct dasd_device *device; | 1470 | struct dasd_device *device; |
1662 | int rc; | 1471 | int rc; |
1663 | 1472 | ||
1664 | device = cqr->device; | 1473 | device = cqr->startdev; |
1665 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1474 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1666 | rc = _dasd_term_running_cqr(device); | 1475 | rc = _dasd_term_running_cqr(device); |
1667 | if (rc) { | 1476 | if (rc) { |
@@ -1673,17 +1482,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) | |||
1673 | cqr->callback = dasd_wakeup_cb; | 1482 | cqr->callback = dasd_wakeup_cb; |
1674 | cqr->callback_data = (void *) &wait_q; | 1483 | cqr->callback_data = (void *) &wait_q; |
1675 | cqr->status = DASD_CQR_QUEUED; | 1484 | cqr->status = DASD_CQR_QUEUED; |
1676 | list_add(&cqr->list, &device->ccw_queue); | 1485 | list_add(&cqr->devlist, &device->ccw_queue); |
1677 | 1486 | ||
1678 | /* let the bh start the request to keep them in order */ | 1487 | /* let the bh start the request to keep them in order */ |
1679 | dasd_schedule_bh(device); | 1488 | dasd_schedule_device_bh(device); |
1680 | 1489 | ||
1681 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1490 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1682 | 1491 | ||
1683 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1492 | wait_event(wait_q, _wait_for_wakeup(cqr)); |
1684 | 1493 | ||
1685 | /* Request status is either done or failed. */ | 1494 | /* Request status is either done or failed. */ |
1686 | rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | 1495 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
1687 | return rc; | 1496 | return rc; |
1688 | } | 1497 | } |
1689 | 1498 | ||
@@ -1692,11 +1501,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) | |||
1692 | * This is useful to timeout requests. The request will be | 1501 | * This is useful to timeout requests. The request will be |
1693 | * terminated if it is currently in i/o. | 1502 | * terminated if it is currently in i/o. |
1694 | * Returns 1 if the request has been terminated. | 1503 | * Returns 1 if the request has been terminated. |
1504 | * 0 if there was no need to terminate the request (not started yet) | ||
1505 | * negative error code if termination failed | ||
1506 | * Cancellation of a request is an asynchronous operation! The calling | ||
1507 | * function has to wait until the request is properly returned via callback. | ||
1695 | */ | 1508 | */ |
1696 | int | 1509 | int dasd_cancel_req(struct dasd_ccw_req *cqr) |
1697 | dasd_cancel_req(struct dasd_ccw_req *cqr) | ||
1698 | { | 1510 | { |
1699 | struct dasd_device *device = cqr->device; | 1511 | struct dasd_device *device = cqr->startdev; |
1700 | unsigned long flags; | 1512 | unsigned long flags; |
1701 | int rc; | 1513 | int rc; |
1702 | 1514 | ||
@@ -1704,74 +1516,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr) | |||
1704 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 1516 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
1705 | switch (cqr->status) { | 1517 | switch (cqr->status) { |
1706 | case DASD_CQR_QUEUED: | 1518 | case DASD_CQR_QUEUED: |
1707 | /* request was not started - just set to failed */ | 1519 | /* request was not started - just set to cleared */ |
1708 | cqr->status = DASD_CQR_FAILED; | 1520 | cqr->status = DASD_CQR_CLEARED; |
1709 | break; | 1521 | break; |
1710 | case DASD_CQR_IN_IO: | 1522 | case DASD_CQR_IN_IO: |
1711 | /* request in IO - terminate IO and release again */ | 1523 | /* request in IO - terminate IO and release again */ |
1712 | if (device->discipline->term_IO(cqr) != 0) | 1524 | rc = device->discipline->term_IO(cqr); |
1713 | /* what to do if unable to terminate ?????? | 1525 | if (rc) { |
1714 | e.g. not _IN_IO */ | 1526 | DEV_MESSAGE(KERN_ERR, device, |
1715 | cqr->status = DASD_CQR_FAILED; | 1527 | "dasd_cancel_req is unable " |
1716 | cqr->stopclk = get_clock(); | 1528 | " to terminate request %p, rc = %d", |
1717 | rc = 1; | 1529 | cqr, rc); |
1530 | } else { | ||
1531 | cqr->stopclk = get_clock(); | ||
1532 | rc = 1; | ||
1533 | } | ||
1718 | break; | 1534 | break; |
1719 | case DASD_CQR_DONE: | 1535 | default: /* already finished or clear pending - do nothing */ |
1720 | case DASD_CQR_FAILED: | ||
1721 | /* already finished - do nothing */ | ||
1722 | break; | 1536 | break; |
1723 | default: | 1537 | } |
1724 | DEV_MESSAGE(KERN_ALERT, device, | 1538 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
1725 | "invalid status %02x in request", | 1539 | dasd_schedule_device_bh(device); |
1726 | cqr->status); | 1540 | return rc; |
1541 | } | ||
1542 | |||
1543 | |||
1544 | /* | ||
1545 | * SECTION: Operations of the dasd_block layer. | ||
1546 | */ | ||
1547 | |||
1548 | /* | ||
1549 | * Timeout function for dasd_block. This is used when the block layer | ||
1550 | * is waiting for something that may not come reliably, (e.g. a state | ||
1551 | * change interrupt) | ||
1552 | */ | ||
1553 | static void dasd_block_timeout(unsigned long ptr) | ||
1554 | { | ||
1555 | unsigned long flags; | ||
1556 | struct dasd_block *block; | ||
1557 | |||
1558 | block = (struct dasd_block *) ptr; | ||
1559 | spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); | ||
1560 | /* re-activate request queue */ | ||
1561 | block->base->stopped &= ~DASD_STOPPED_PENDING; | ||
1562 | spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); | ||
1563 | dasd_schedule_block_bh(block); | ||
1564 | } | ||
1565 | |||
1566 | /* | ||
1567 | * Setup timeout for a dasd_block in jiffies. | ||
1568 | */ | ||
1569 | void dasd_block_set_timer(struct dasd_block *block, int expires) | ||
1570 | { | ||
1571 | if (expires == 0) { | ||
1572 | if (timer_pending(&block->timer)) | ||
1573 | del_timer(&block->timer); | ||
1574 | return; | ||
1575 | } | ||
1576 | if (timer_pending(&block->timer)) { | ||
1577 | if (mod_timer(&block->timer, jiffies + expires)) | ||
1578 | return; | ||
1579 | } | ||
1580 | block->timer.function = dasd_block_timeout; | ||
1581 | block->timer.data = (unsigned long) block; | ||
1582 | block->timer.expires = jiffies + expires; | ||
1583 | add_timer(&block->timer); | ||
1584 | } | ||
1585 | |||
1586 | /* | ||
1587 | * Clear timeout for a dasd_block. | ||
1588 | */ | ||
1589 | void dasd_block_clear_timer(struct dasd_block *block) | ||
1590 | { | ||
1591 | if (timer_pending(&block->timer)) | ||
1592 | del_timer(&block->timer); | ||
1593 | } | ||
1594 | |||
1595 | /* | ||
1596 | * posts the buffer_cache about a finalized request | ||
1597 | */ | ||
1598 | static inline void dasd_end_request(struct request *req, int uptodate) | ||
1599 | { | ||
1600 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | ||
1727 | BUG(); | 1601 | BUG(); |
1602 | add_disk_randomness(req->rq_disk); | ||
1603 | end_that_request_last(req, uptodate); | ||
1604 | } | ||
1605 | |||
1606 | /* | ||
1607 | * Process finished error recovery ccw. | ||
1608 | */ | ||
1609 | static inline void __dasd_block_process_erp(struct dasd_block *block, | ||
1610 | struct dasd_ccw_req *cqr) | ||
1611 | { | ||
1612 | dasd_erp_fn_t erp_fn; | ||
1613 | struct dasd_device *device = block->base; | ||
1728 | 1614 | ||
1615 | if (cqr->status == DASD_CQR_DONE) | ||
1616 | DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); | ||
1617 | else | ||
1618 | DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); | ||
1619 | erp_fn = device->discipline->erp_postaction(cqr); | ||
1620 | erp_fn(cqr); | ||
1621 | } | ||
1622 | |||
1623 | /* | ||
1624 | * Fetch requests from the block device queue. | ||
1625 | */ | ||
1626 | static void __dasd_process_request_queue(struct dasd_block *block) | ||
1627 | { | ||
1628 | struct request_queue *queue; | ||
1629 | struct request *req; | ||
1630 | struct dasd_ccw_req *cqr; | ||
1631 | struct dasd_device *basedev; | ||
1632 | unsigned long flags; | ||
1633 | queue = block->request_queue; | ||
1634 | basedev = block->base; | ||
1635 | /* No queue ? Then there is nothing to do. */ | ||
1636 | if (queue == NULL) | ||
1637 | return; | ||
1638 | |||
1639 | /* | ||
1640 | * We requeue request from the block device queue to the ccw | ||
1641 | * queue only in two states. In state DASD_STATE_READY the | ||
1642 | * partition detection is done and we need to requeue requests | ||
1643 | * for that. State DASD_STATE_ONLINE is normal block device | ||
1644 | * operation. | ||
1645 | */ | ||
1646 | if (basedev->state < DASD_STATE_READY) | ||
1647 | return; | ||
1648 | /* Now we try to fetch requests from the request queue */ | ||
1649 | while (!blk_queue_plugged(queue) && | ||
1650 | elv_next_request(queue)) { | ||
1651 | |||
1652 | req = elv_next_request(queue); | ||
1653 | |||
1654 | if (basedev->features & DASD_FEATURE_READONLY && | ||
1655 | rq_data_dir(req) == WRITE) { | ||
1656 | DBF_DEV_EVENT(DBF_ERR, basedev, | ||
1657 | "Rejecting write request %p", | ||
1658 | req); | ||
1659 | blkdev_dequeue_request(req); | ||
1660 | dasd_end_request(req, 0); | ||
1661 | continue; | ||
1662 | } | ||
1663 | cqr = basedev->discipline->build_cp(basedev, block, req); | ||
1664 | if (IS_ERR(cqr)) { | ||
1665 | if (PTR_ERR(cqr) == -EBUSY) | ||
1666 | break; /* normal end condition */ | ||
1667 | if (PTR_ERR(cqr) == -ENOMEM) | ||
1668 | break; /* terminate request queue loop */ | ||
1669 | if (PTR_ERR(cqr) == -EAGAIN) { | ||
1670 | /* | ||
1671 | * The current request cannot be build right | ||
1672 | * now, we have to try later. If this request | ||
1673 | * is the head-of-queue we stop the device | ||
1674 | * for 1/2 second. | ||
1675 | */ | ||
1676 | if (!list_empty(&block->ccw_queue)) | ||
1677 | break; | ||
1678 | spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); | ||
1679 | basedev->stopped |= DASD_STOPPED_PENDING; | ||
1680 | spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); | ||
1681 | dasd_block_set_timer(block, HZ/2); | ||
1682 | break; | ||
1683 | } | ||
1684 | DBF_DEV_EVENT(DBF_ERR, basedev, | ||
1685 | "CCW creation failed (rc=%ld) " | ||
1686 | "on request %p", | ||
1687 | PTR_ERR(cqr), req); | ||
1688 | blkdev_dequeue_request(req); | ||
1689 | dasd_end_request(req, 0); | ||
1690 | continue; | ||
1691 | } | ||
1692 | /* | ||
1693 | * Note: callback is set to dasd_return_cqr_cb in | ||
1694 | * __dasd_block_start_head to cover erp requests as well | ||
1695 | */ | ||
1696 | cqr->callback_data = (void *) req; | ||
1697 | cqr->status = DASD_CQR_FILLED; | ||
1698 | blkdev_dequeue_request(req); | ||
1699 | list_add_tail(&cqr->blocklist, &block->ccw_queue); | ||
1700 | dasd_profile_start(block, cqr, req); | ||
1701 | } | ||
1702 | } | ||
1703 | |||
1704 | static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) | ||
1705 | { | ||
1706 | struct request *req; | ||
1707 | int status; | ||
1708 | |||
1709 | req = (struct request *) cqr->callback_data; | ||
1710 | dasd_profile_end(cqr->block, cqr, req); | ||
1711 | status = cqr->memdev->discipline->free_cp(cqr, req); | ||
1712 | dasd_end_request(req, status); | ||
1713 | } | ||
1714 | |||
1715 | /* | ||
1716 | * Process ccw request queue. | ||
1717 | */ | ||
1718 | static void __dasd_process_block_ccw_queue(struct dasd_block *block, | ||
1719 | struct list_head *final_queue) | ||
1720 | { | ||
1721 | struct list_head *l, *n; | ||
1722 | struct dasd_ccw_req *cqr; | ||
1723 | dasd_erp_fn_t erp_fn; | ||
1724 | unsigned long flags; | ||
1725 | struct dasd_device *base = block->base; | ||
1726 | |||
1727 | restart: | ||
1728 | /* Process request with final status. */ | ||
1729 | list_for_each_safe(l, n, &block->ccw_queue) { | ||
1730 | cqr = list_entry(l, struct dasd_ccw_req, blocklist); | ||
1731 | if (cqr->status != DASD_CQR_DONE && | ||
1732 | cqr->status != DASD_CQR_FAILED && | ||
1733 | cqr->status != DASD_CQR_NEED_ERP && | ||
1734 | cqr->status != DASD_CQR_TERMINATED) | ||
1735 | continue; | ||
1736 | |||
1737 | if (cqr->status == DASD_CQR_TERMINATED) { | ||
1738 | base->discipline->handle_terminated_request(cqr); | ||
1739 | goto restart; | ||
1740 | } | ||
1741 | |||
1742 | /* Process requests that may be recovered */ | ||
1743 | if (cqr->status == DASD_CQR_NEED_ERP) { | ||
1744 | if (cqr->irb.esw.esw0.erw.cons && | ||
1745 | test_bit(DASD_CQR_FLAGS_USE_ERP, | ||
1746 | &cqr->flags)) { | ||
1747 | erp_fn = base->discipline->erp_action(cqr); | ||
1748 | erp_fn(cqr); | ||
1749 | } | ||
1750 | goto restart; | ||
1751 | } | ||
1752 | |||
1753 | /* First of all call extended error reporting. */ | ||
1754 | if (dasd_eer_enabled(base) && | ||
1755 | cqr->status == DASD_CQR_FAILED) { | ||
1756 | dasd_eer_write(base, cqr, DASD_EER_FATALERROR); | ||
1757 | |||
1758 | /* restart request */ | ||
1759 | cqr->status = DASD_CQR_FILLED; | ||
1760 | cqr->retries = 255; | ||
1761 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); | ||
1762 | base->stopped |= DASD_STOPPED_QUIESCE; | ||
1763 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), | ||
1764 | flags); | ||
1765 | goto restart; | ||
1766 | } | ||
1767 | |||
1768 | /* Process finished ERP request. */ | ||
1769 | if (cqr->refers) { | ||
1770 | __dasd_block_process_erp(block, cqr); | ||
1771 | goto restart; | ||
1772 | } | ||
1773 | |||
1774 | /* Rechain finished requests to final queue */ | ||
1775 | cqr->endclk = get_clock(); | ||
1776 | list_move_tail(&cqr->blocklist, final_queue); | ||
1777 | } | ||
1778 | } | ||
1779 | |||
1780 | static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) | ||
1781 | { | ||
1782 | dasd_schedule_block_bh(cqr->block); | ||
1783 | } | ||
1784 | |||
1785 | static void __dasd_block_start_head(struct dasd_block *block) | ||
1786 | { | ||
1787 | struct dasd_ccw_req *cqr; | ||
1788 | |||
1789 | if (list_empty(&block->ccw_queue)) | ||
1790 | return; | ||
1791 | /* We allways begin with the first requests on the queue, as some | ||
1792 | * of previously started requests have to be enqueued on a | ||
1793 | * dasd_device again for error recovery. | ||
1794 | */ | ||
1795 | list_for_each_entry(cqr, &block->ccw_queue, blocklist) { | ||
1796 | if (cqr->status != DASD_CQR_FILLED) | ||
1797 | continue; | ||
1798 | /* Non-temporary stop condition will trigger fail fast */ | ||
1799 | if (block->base->stopped & ~DASD_STOPPED_PENDING && | ||
1800 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | ||
1801 | (!dasd_eer_enabled(block->base))) { | ||
1802 | cqr->status = DASD_CQR_FAILED; | ||
1803 | dasd_schedule_block_bh(block); | ||
1804 | continue; | ||
1805 | } | ||
1806 | /* Don't try to start requests if device is stopped */ | ||
1807 | if (block->base->stopped) | ||
1808 | return; | ||
1809 | |||
1810 | /* just a fail safe check, should not happen */ | ||
1811 | if (!cqr->startdev) | ||
1812 | cqr->startdev = block->base; | ||
1813 | |||
1814 | /* make sure that the requests we submit find their way back */ | ||
1815 | cqr->callback = dasd_return_cqr_cb; | ||
1816 | |||
1817 | dasd_add_request_tail(cqr); | ||
1818 | } | ||
1819 | } | ||
1820 | |||
1821 | /* | ||
1822 | * Central dasd_block layer routine. Takes requests from the generic | ||
1823 | * block layer request queue, creates ccw requests, enqueues them on | ||
1824 | * a dasd_device and processes ccw requests that have been returned. | ||
1825 | */ | ||
1826 | static void dasd_block_tasklet(struct dasd_block *block) | ||
1827 | { | ||
1828 | struct list_head final_queue; | ||
1829 | struct list_head *l, *n; | ||
1830 | struct dasd_ccw_req *cqr; | ||
1831 | |||
1832 | atomic_set(&block->tasklet_scheduled, 0); | ||
1833 | INIT_LIST_HEAD(&final_queue); | ||
1834 | spin_lock(&block->queue_lock); | ||
1835 | /* Finish off requests on ccw queue */ | ||
1836 | __dasd_process_block_ccw_queue(block, &final_queue); | ||
1837 | spin_unlock(&block->queue_lock); | ||
1838 | /* Now call the callback function of requests with final status */ | ||
1839 | spin_lock_irq(&block->request_queue_lock); | ||
1840 | list_for_each_safe(l, n, &final_queue) { | ||
1841 | cqr = list_entry(l, struct dasd_ccw_req, blocklist); | ||
1842 | list_del_init(&cqr->blocklist); | ||
1843 | __dasd_cleanup_cqr(cqr); | ||
1844 | } | ||
1845 | spin_lock(&block->queue_lock); | ||
1846 | /* Get new request from the block device request queue */ | ||
1847 | __dasd_process_request_queue(block); | ||
1848 | /* Now check if the head of the ccw queue needs to be started. */ | ||
1849 | __dasd_block_start_head(block); | ||
1850 | spin_unlock(&block->queue_lock); | ||
1851 | spin_unlock_irq(&block->request_queue_lock); | ||
1852 | dasd_put_device(block->base); | ||
1853 | } | ||
1854 | |||
1855 | static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) | ||
1856 | { | ||
1857 | wake_up(&dasd_flush_wq); | ||
1858 | } | ||
1859 | |||
1860 | /* | ||
1861 | * Go through all request on the dasd_block request queue, cancel them | ||
1862 | * on the respective dasd_device, and return them to the generic | ||
1863 | * block layer. | ||
1864 | */ | ||
1865 | static int dasd_flush_block_queue(struct dasd_block *block) | ||
1866 | { | ||
1867 | struct dasd_ccw_req *cqr, *n; | ||
1868 | int rc, i; | ||
1869 | struct list_head flush_queue; | ||
1870 | |||
1871 | INIT_LIST_HEAD(&flush_queue); | ||
1872 | spin_lock_bh(&block->queue_lock); | ||
1873 | rc = 0; | ||
1874 | restart: | ||
1875 | list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { | ||
1876 | /* if this request currently owned by a dasd_device cancel it */ | ||
1877 | if (cqr->status >= DASD_CQR_QUEUED) | ||
1878 | rc = dasd_cancel_req(cqr); | ||
1879 | if (rc < 0) | ||
1880 | break; | ||
1881 | /* Rechain request (including erp chain) so it won't be | ||
1882 | * touched by the dasd_block_tasklet anymore. | ||
1883 | * Replace the callback so we notice when the request | ||
1884 | * is returned from the dasd_device layer. | ||
1885 | */ | ||
1886 | cqr->callback = _dasd_wake_block_flush_cb; | ||
1887 | for (i = 0; cqr != NULL; cqr = cqr->refers, i++) | ||
1888 | list_move_tail(&cqr->blocklist, &flush_queue); | ||
1889 | if (i > 1) | ||
1890 | /* moved more than one request - need to restart */ | ||
1891 | goto restart; | ||
1892 | } | ||
1893 | spin_unlock_bh(&block->queue_lock); | ||
1894 | /* Now call the callback function of flushed requests */ | ||
1895 | restart_cb: | ||
1896 | list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { | ||
1897 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | ||
1898 | /* Process finished ERP request. */ | ||
1899 | if (cqr->refers) { | ||
1900 | __dasd_block_process_erp(block, cqr); | ||
1901 | /* restart list_for_xx loop since dasd_process_erp | ||
1902 | * might remove multiple elements */ | ||
1903 | goto restart_cb; | ||
1904 | } | ||
1905 | /* call the callback function */ | ||
1906 | cqr->endclk = get_clock(); | ||
1907 | list_del_init(&cqr->blocklist); | ||
1908 | __dasd_cleanup_cqr(cqr); | ||
1729 | } | 1909 | } |
1730 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
1731 | dasd_schedule_bh(device); | ||
1732 | return rc; | 1910 | return rc; |
1733 | } | 1911 | } |
1734 | 1912 | ||
1735 | /* | 1913 | /* |
1736 | * SECTION: Block device operations (request queue, partitions, open, release). | 1914 | * Schedules a call to dasd_tasklet over the device tasklet. |
1915 | */ | ||
1916 | void dasd_schedule_block_bh(struct dasd_block *block) | ||
1917 | { | ||
1918 | /* Protect against rescheduling. */ | ||
1919 | if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) | ||
1920 | return; | ||
1921 | /* life cycle of block is bound to it's base device */ | ||
1922 | dasd_get_device(block->base); | ||
1923 | tasklet_hi_schedule(&block->tasklet); | ||
1924 | } | ||
1925 | |||
1926 | |||
1927 | /* | ||
1928 | * SECTION: external block device operations | ||
1929 | * (request queue handling, open, release, etc.) | ||
1737 | */ | 1930 | */ |
1738 | 1931 | ||
1739 | /* | 1932 | /* |
1740 | * Dasd request queue function. Called from ll_rw_blk.c | 1933 | * Dasd request queue function. Called from ll_rw_blk.c |
1741 | */ | 1934 | */ |
1742 | static void | 1935 | static void do_dasd_request(struct request_queue *queue) |
1743 | do_dasd_request(struct request_queue * queue) | ||
1744 | { | 1936 | { |
1745 | struct dasd_device *device; | 1937 | struct dasd_block *block; |
1746 | 1938 | ||
1747 | device = (struct dasd_device *) queue->queuedata; | 1939 | block = queue->queuedata; |
1748 | spin_lock(get_ccwdev_lock(device->cdev)); | 1940 | spin_lock(&block->queue_lock); |
1749 | /* Get new request from the block device request queue */ | 1941 | /* Get new request from the block device request queue */ |
1750 | __dasd_process_blk_queue(device); | 1942 | __dasd_process_request_queue(block); |
1751 | /* Now check if the head of the ccw queue needs to be started. */ | 1943 | /* Now check if the head of the ccw queue needs to be started. */ |
1752 | __dasd_start_head(device); | 1944 | __dasd_block_start_head(block); |
1753 | spin_unlock(get_ccwdev_lock(device->cdev)); | 1945 | spin_unlock(&block->queue_lock); |
1754 | } | 1946 | } |
1755 | 1947 | ||
1756 | /* | 1948 | /* |
1757 | * Allocate and initialize request queue and default I/O scheduler. | 1949 | * Allocate and initialize request queue and default I/O scheduler. |
1758 | */ | 1950 | */ |
1759 | static int | 1951 | static int dasd_alloc_queue(struct dasd_block *block) |
1760 | dasd_alloc_queue(struct dasd_device * device) | ||
1761 | { | 1952 | { |
1762 | int rc; | 1953 | int rc; |
1763 | 1954 | ||
1764 | device->request_queue = blk_init_queue(do_dasd_request, | 1955 | block->request_queue = blk_init_queue(do_dasd_request, |
1765 | &device->request_queue_lock); | 1956 | &block->request_queue_lock); |
1766 | if (device->request_queue == NULL) | 1957 | if (block->request_queue == NULL) |
1767 | return -ENOMEM; | 1958 | return -ENOMEM; |
1768 | 1959 | ||
1769 | device->request_queue->queuedata = device; | 1960 | block->request_queue->queuedata = block; |
1770 | 1961 | ||
1771 | elevator_exit(device->request_queue->elevator); | 1962 | elevator_exit(block->request_queue->elevator); |
1772 | rc = elevator_init(device->request_queue, "deadline"); | 1963 | rc = elevator_init(block->request_queue, "deadline"); |
1773 | if (rc) { | 1964 | if (rc) { |
1774 | blk_cleanup_queue(device->request_queue); | 1965 | blk_cleanup_queue(block->request_queue); |
1775 | return rc; | 1966 | return rc; |
1776 | } | 1967 | } |
1777 | return 0; | 1968 | return 0; |
@@ -1780,79 +1971,76 @@ dasd_alloc_queue(struct dasd_device * device) | |||
1780 | /* | 1971 | /* |
1781 | * Allocate and initialize request queue. | 1972 | * Allocate and initialize request queue. |
1782 | */ | 1973 | */ |
1783 | static void | 1974 | static void dasd_setup_queue(struct dasd_block *block) |
1784 | dasd_setup_queue(struct dasd_device * device) | ||
1785 | { | 1975 | { |
1786 | int max; | 1976 | int max; |
1787 | 1977 | ||
1788 | blk_queue_hardsect_size(device->request_queue, device->bp_block); | 1978 | blk_queue_hardsect_size(block->request_queue, block->bp_block); |
1789 | max = device->discipline->max_blocks << device->s2b_shift; | 1979 | max = block->base->discipline->max_blocks << block->s2b_shift; |
1790 | blk_queue_max_sectors(device->request_queue, max); | 1980 | blk_queue_max_sectors(block->request_queue, max); |
1791 | blk_queue_max_phys_segments(device->request_queue, -1L); | 1981 | blk_queue_max_phys_segments(block->request_queue, -1L); |
1792 | blk_queue_max_hw_segments(device->request_queue, -1L); | 1982 | blk_queue_max_hw_segments(block->request_queue, -1L); |
1793 | blk_queue_max_segment_size(device->request_queue, -1L); | 1983 | blk_queue_max_segment_size(block->request_queue, -1L); |
1794 | blk_queue_segment_boundary(device->request_queue, -1L); | 1984 | blk_queue_segment_boundary(block->request_queue, -1L); |
1795 | blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); | 1985 | blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); |
1796 | } | 1986 | } |
1797 | 1987 | ||
1798 | /* | 1988 | /* |
1799 | * Deactivate and free request queue. | 1989 | * Deactivate and free request queue. |
1800 | */ | 1990 | */ |
1801 | static void | 1991 | static void dasd_free_queue(struct dasd_block *block) |
1802 | dasd_free_queue(struct dasd_device * device) | ||
1803 | { | 1992 | { |
1804 | if (device->request_queue) { | 1993 | if (block->request_queue) { |
1805 | blk_cleanup_queue(device->request_queue); | 1994 | blk_cleanup_queue(block->request_queue); |
1806 | device->request_queue = NULL; | 1995 | block->request_queue = NULL; |
1807 | } | 1996 | } |
1808 | } | 1997 | } |
1809 | 1998 | ||
1810 | /* | 1999 | /* |
1811 | * Flush request on the request queue. | 2000 | * Flush request on the request queue. |
1812 | */ | 2001 | */ |
1813 | static void | 2002 | static void dasd_flush_request_queue(struct dasd_block *block) |
1814 | dasd_flush_request_queue(struct dasd_device * device) | ||
1815 | { | 2003 | { |
1816 | struct request *req; | 2004 | struct request *req; |
1817 | 2005 | ||
1818 | if (!device->request_queue) | 2006 | if (!block->request_queue) |
1819 | return; | 2007 | return; |
1820 | 2008 | ||
1821 | spin_lock_irq(&device->request_queue_lock); | 2009 | spin_lock_irq(&block->request_queue_lock); |
1822 | while ((req = elv_next_request(device->request_queue))) { | 2010 | while ((req = elv_next_request(block->request_queue))) { |
1823 | blkdev_dequeue_request(req); | 2011 | blkdev_dequeue_request(req); |
1824 | dasd_end_request(req, 0); | 2012 | dasd_end_request(req, 0); |
1825 | } | 2013 | } |
1826 | spin_unlock_irq(&device->request_queue_lock); | 2014 | spin_unlock_irq(&block->request_queue_lock); |
1827 | } | 2015 | } |
1828 | 2016 | ||
1829 | static int | 2017 | static int dasd_open(struct inode *inp, struct file *filp) |
1830 | dasd_open(struct inode *inp, struct file *filp) | ||
1831 | { | 2018 | { |
1832 | struct gendisk *disk = inp->i_bdev->bd_disk; | 2019 | struct gendisk *disk = inp->i_bdev->bd_disk; |
1833 | struct dasd_device *device = disk->private_data; | 2020 | struct dasd_block *block = disk->private_data; |
2021 | struct dasd_device *base = block->base; | ||
1834 | int rc; | 2022 | int rc; |
1835 | 2023 | ||
1836 | atomic_inc(&device->open_count); | 2024 | atomic_inc(&block->open_count); |
1837 | if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { | 2025 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { |
1838 | rc = -ENODEV; | 2026 | rc = -ENODEV; |
1839 | goto unlock; | 2027 | goto unlock; |
1840 | } | 2028 | } |
1841 | 2029 | ||
1842 | if (!try_module_get(device->discipline->owner)) { | 2030 | if (!try_module_get(base->discipline->owner)) { |
1843 | rc = -EINVAL; | 2031 | rc = -EINVAL; |
1844 | goto unlock; | 2032 | goto unlock; |
1845 | } | 2033 | } |
1846 | 2034 | ||
1847 | if (dasd_probeonly) { | 2035 | if (dasd_probeonly) { |
1848 | DEV_MESSAGE(KERN_INFO, device, "%s", | 2036 | DEV_MESSAGE(KERN_INFO, base, "%s", |
1849 | "No access to device due to probeonly mode"); | 2037 | "No access to device due to probeonly mode"); |
1850 | rc = -EPERM; | 2038 | rc = -EPERM; |
1851 | goto out; | 2039 | goto out; |
1852 | } | 2040 | } |
1853 | 2041 | ||
1854 | if (device->state <= DASD_STATE_BASIC) { | 2042 | if (base->state <= DASD_STATE_BASIC) { |
1855 | DBF_DEV_EVENT(DBF_ERR, device, " %s", | 2043 | DBF_DEV_EVENT(DBF_ERR, base, " %s", |
1856 | " Cannot open unrecognized device"); | 2044 | " Cannot open unrecognized device"); |
1857 | rc = -ENODEV; | 2045 | rc = -ENODEV; |
1858 | goto out; | 2046 | goto out; |
@@ -1861,41 +2049,41 @@ dasd_open(struct inode *inp, struct file *filp) | |||
1861 | return 0; | 2049 | return 0; |
1862 | 2050 | ||
1863 | out: | 2051 | out: |
1864 | module_put(device->discipline->owner); | 2052 | module_put(base->discipline->owner); |
1865 | unlock: | 2053 | unlock: |
1866 | atomic_dec(&device->open_count); | 2054 | atomic_dec(&block->open_count); |
1867 | return rc; | 2055 | return rc; |
1868 | } | 2056 | } |
1869 | 2057 | ||
1870 | static int | 2058 | static int dasd_release(struct inode *inp, struct file *filp) |
1871 | dasd_release(struct inode *inp, struct file *filp) | ||
1872 | { | 2059 | { |
1873 | struct gendisk *disk = inp->i_bdev->bd_disk; | 2060 | struct gendisk *disk = inp->i_bdev->bd_disk; |
1874 | struct dasd_device *device = disk->private_data; | 2061 | struct dasd_block *block = disk->private_data; |
1875 | 2062 | ||
1876 | atomic_dec(&device->open_count); | 2063 | atomic_dec(&block->open_count); |
1877 | module_put(device->discipline->owner); | 2064 | module_put(block->base->discipline->owner); |
1878 | return 0; | 2065 | return 0; |
1879 | } | 2066 | } |
1880 | 2067 | ||
1881 | /* | 2068 | /* |
1882 | * Return disk geometry. | 2069 | * Return disk geometry. |
1883 | */ | 2070 | */ |
1884 | static int | 2071 | static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
1885 | dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
1886 | { | 2072 | { |
1887 | struct dasd_device *device; | 2073 | struct dasd_block *block; |
2074 | struct dasd_device *base; | ||
1888 | 2075 | ||
1889 | device = bdev->bd_disk->private_data; | 2076 | block = bdev->bd_disk->private_data; |
1890 | if (!device) | 2077 | base = block->base; |
2078 | if (!block) | ||
1891 | return -ENODEV; | 2079 | return -ENODEV; |
1892 | 2080 | ||
1893 | if (!device->discipline || | 2081 | if (!base->discipline || |
1894 | !device->discipline->fill_geometry) | 2082 | !base->discipline->fill_geometry) |
1895 | return -EINVAL; | 2083 | return -EINVAL; |
1896 | 2084 | ||
1897 | device->discipline->fill_geometry(device, geo); | 2085 | base->discipline->fill_geometry(block, geo); |
1898 | geo->start = get_start_sect(bdev) >> device->s2b_shift; | 2086 | geo->start = get_start_sect(bdev) >> block->s2b_shift; |
1899 | return 0; | 2087 | return 0; |
1900 | } | 2088 | } |
1901 | 2089 | ||
@@ -1909,6 +2097,9 @@ dasd_device_operations = { | |||
1909 | .getgeo = dasd_getgeo, | 2097 | .getgeo = dasd_getgeo, |
1910 | }; | 2098 | }; |
1911 | 2099 | ||
2100 | /******************************************************************************* | ||
2101 | * end of block device operations | ||
2102 | */ | ||
1912 | 2103 | ||
1913 | static void | 2104 | static void |
1914 | dasd_exit(void) | 2105 | dasd_exit(void) |
@@ -1937,9 +2128,8 @@ dasd_exit(void) | |||
1937 | * Initial attempt at a probe function. this can be simplified once | 2128 | * Initial attempt at a probe function. this can be simplified once |
1938 | * the other detection code is gone. | 2129 | * the other detection code is gone. |
1939 | */ | 2130 | */ |
1940 | int | 2131 | int dasd_generic_probe(struct ccw_device *cdev, |
1941 | dasd_generic_probe (struct ccw_device *cdev, | 2132 | struct dasd_discipline *discipline) |
1942 | struct dasd_discipline *discipline) | ||
1943 | { | 2133 | { |
1944 | int ret; | 2134 | int ret; |
1945 | 2135 | ||
@@ -1969,19 +2159,20 @@ dasd_generic_probe (struct ccw_device *cdev, | |||
1969 | ret = ccw_device_set_online(cdev); | 2159 | ret = ccw_device_set_online(cdev); |
1970 | if (ret) | 2160 | if (ret) |
1971 | printk(KERN_WARNING | 2161 | printk(KERN_WARNING |
1972 | "dasd_generic_probe: could not initially online " | 2162 | "dasd_generic_probe: could not initially " |
1973 | "ccw-device %s\n", cdev->dev.bus_id); | 2163 | "online ccw-device %s; return code: %d\n", |
1974 | return ret; | 2164 | cdev->dev.bus_id, ret); |
2165 | return 0; | ||
1975 | } | 2166 | } |
1976 | 2167 | ||
1977 | /* | 2168 | /* |
1978 | * This will one day be called from a global not_oper handler. | 2169 | * This will one day be called from a global not_oper handler. |
1979 | * It is also used by driver_unregister during module unload. | 2170 | * It is also used by driver_unregister during module unload. |
1980 | */ | 2171 | */ |
1981 | void | 2172 | void dasd_generic_remove(struct ccw_device *cdev) |
1982 | dasd_generic_remove (struct ccw_device *cdev) | ||
1983 | { | 2173 | { |
1984 | struct dasd_device *device; | 2174 | struct dasd_device *device; |
2175 | struct dasd_block *block; | ||
1985 | 2176 | ||
1986 | cdev->handler = NULL; | 2177 | cdev->handler = NULL; |
1987 | 2178 | ||
@@ -2001,7 +2192,15 @@ dasd_generic_remove (struct ccw_device *cdev) | |||
2001 | */ | 2192 | */ |
2002 | dasd_set_target_state(device, DASD_STATE_NEW); | 2193 | dasd_set_target_state(device, DASD_STATE_NEW); |
2003 | /* dasd_delete_device destroys the device reference. */ | 2194 | /* dasd_delete_device destroys the device reference. */ |
2195 | block = device->block; | ||
2196 | device->block = NULL; | ||
2004 | dasd_delete_device(device); | 2197 | dasd_delete_device(device); |
2198 | /* | ||
2199 | * life cycle of block is bound to device, so delete it after | ||
2200 | * device was safely removed | ||
2201 | */ | ||
2202 | if (block) | ||
2203 | dasd_free_block(block); | ||
2005 | } | 2204 | } |
2006 | 2205 | ||
2007 | /* | 2206 | /* |
@@ -2009,10 +2208,8 @@ dasd_generic_remove (struct ccw_device *cdev) | |||
2009 | * the device is detected for the first time and is supposed to be used | 2208 | * the device is detected for the first time and is supposed to be used |
2010 | * or the user has started activation through sysfs. | 2209 | * or the user has started activation through sysfs. |
2011 | */ | 2210 | */ |
2012 | int | 2211 | int dasd_generic_set_online(struct ccw_device *cdev, |
2013 | dasd_generic_set_online (struct ccw_device *cdev, | 2212 | struct dasd_discipline *base_discipline) |
2014 | struct dasd_discipline *base_discipline) | ||
2015 | |||
2016 | { | 2213 | { |
2017 | struct dasd_discipline *discipline; | 2214 | struct dasd_discipline *discipline; |
2018 | struct dasd_device *device; | 2215 | struct dasd_device *device; |
@@ -2048,6 +2245,7 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
2048 | device->base_discipline = base_discipline; | 2245 | device->base_discipline = base_discipline; |
2049 | device->discipline = discipline; | 2246 | device->discipline = discipline; |
2050 | 2247 | ||
2248 | /* check_device will allocate block device if necessary */ | ||
2051 | rc = discipline->check_device(device); | 2249 | rc = discipline->check_device(device); |
2052 | if (rc) { | 2250 | if (rc) { |
2053 | printk (KERN_WARNING | 2251 | printk (KERN_WARNING |
@@ -2067,6 +2265,8 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
2067 | cdev->dev.bus_id); | 2265 | cdev->dev.bus_id); |
2068 | rc = -ENODEV; | 2266 | rc = -ENODEV; |
2069 | dasd_set_target_state(device, DASD_STATE_NEW); | 2267 | dasd_set_target_state(device, DASD_STATE_NEW); |
2268 | if (device->block) | ||
2269 | dasd_free_block(device->block); | ||
2070 | dasd_delete_device(device); | 2270 | dasd_delete_device(device); |
2071 | } else | 2271 | } else |
2072 | pr_debug("dasd_generic device %s found\n", | 2272 | pr_debug("dasd_generic device %s found\n", |
@@ -2081,10 +2281,10 @@ dasd_generic_set_online (struct ccw_device *cdev, | |||
2081 | return rc; | 2281 | return rc; |
2082 | } | 2282 | } |
2083 | 2283 | ||
2084 | int | 2284 | int dasd_generic_set_offline(struct ccw_device *cdev) |
2085 | dasd_generic_set_offline (struct ccw_device *cdev) | ||
2086 | { | 2285 | { |
2087 | struct dasd_device *device; | 2286 | struct dasd_device *device; |
2287 | struct dasd_block *block; | ||
2088 | int max_count, open_count; | 2288 | int max_count, open_count; |
2089 | 2289 | ||
2090 | device = dasd_device_from_cdev(cdev); | 2290 | device = dasd_device_from_cdev(cdev); |
@@ -2101,30 +2301,39 @@ dasd_generic_set_offline (struct ccw_device *cdev) | |||
2101 | * the blkdev_get in dasd_scan_partitions. We are only interested | 2301 | * the blkdev_get in dasd_scan_partitions. We are only interested |
2102 | * in the other openers. | 2302 | * in the other openers. |
2103 | */ | 2303 | */ |
2104 | max_count = device->bdev ? 0 : -1; | 2304 | if (device->block) { |
2105 | open_count = (int) atomic_read(&device->open_count); | 2305 | struct dasd_block *block = device->block; |
2106 | if (open_count > max_count) { | 2306 | max_count = block->bdev ? 0 : -1; |
2107 | if (open_count > 0) | 2307 | open_count = (int) atomic_read(&block->open_count); |
2108 | printk (KERN_WARNING "Can't offline dasd device with " | 2308 | if (open_count > max_count) { |
2109 | "open count = %i.\n", | 2309 | if (open_count > 0) |
2110 | open_count); | 2310 | printk(KERN_WARNING "Can't offline dasd " |
2111 | else | 2311 | "device with open count = %i.\n", |
2112 | printk (KERN_WARNING "%s", | 2312 | open_count); |
2113 | "Can't offline dasd device due to internal " | 2313 | else |
2114 | "use\n"); | 2314 | printk(KERN_WARNING "%s", |
2115 | clear_bit(DASD_FLAG_OFFLINE, &device->flags); | 2315 | "Can't offline dasd device due " |
2116 | dasd_put_device(device); | 2316 | "to internal use\n"); |
2117 | return -EBUSY; | 2317 | clear_bit(DASD_FLAG_OFFLINE, &device->flags); |
2318 | dasd_put_device(device); | ||
2319 | return -EBUSY; | ||
2320 | } | ||
2118 | } | 2321 | } |
2119 | dasd_set_target_state(device, DASD_STATE_NEW); | 2322 | dasd_set_target_state(device, DASD_STATE_NEW); |
2120 | /* dasd_delete_device destroys the device reference. */ | 2323 | /* dasd_delete_device destroys the device reference. */ |
2324 | block = device->block; | ||
2325 | device->block = NULL; | ||
2121 | dasd_delete_device(device); | 2326 | dasd_delete_device(device); |
2122 | 2327 | /* | |
2328 | * life cycle of block is bound to device, so delete it after | ||
2329 | * device was safely removed | ||
2330 | */ | ||
2331 | if (block) | ||
2332 | dasd_free_block(block); | ||
2123 | return 0; | 2333 | return 0; |
2124 | } | 2334 | } |
2125 | 2335 | ||
2126 | int | 2336 | int dasd_generic_notify(struct ccw_device *cdev, int event) |
2127 | dasd_generic_notify(struct ccw_device *cdev, int event) | ||
2128 | { | 2337 | { |
2129 | struct dasd_device *device; | 2338 | struct dasd_device *device; |
2130 | struct dasd_ccw_req *cqr; | 2339 | struct dasd_ccw_req *cqr; |
@@ -2145,27 +2354,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2145 | if (device->state < DASD_STATE_BASIC) | 2354 | if (device->state < DASD_STATE_BASIC) |
2146 | break; | 2355 | break; |
2147 | /* Device is active. We want to keep it. */ | 2356 | /* Device is active. We want to keep it. */ |
2148 | if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { | 2357 | list_for_each_entry(cqr, &device->ccw_queue, devlist) |
2149 | list_for_each_entry(cqr, &device->ccw_queue, list) | 2358 | if (cqr->status == DASD_CQR_IN_IO) { |
2150 | if (cqr->status == DASD_CQR_IN_IO) | 2359 | cqr->status = DASD_CQR_QUEUED; |
2151 | cqr->status = DASD_CQR_FAILED; | 2360 | cqr->retries++; |
2152 | device->stopped |= DASD_STOPPED_DC_EIO; | 2361 | } |
2153 | } else { | 2362 | device->stopped |= DASD_STOPPED_DC_WAIT; |
2154 | list_for_each_entry(cqr, &device->ccw_queue, list) | 2363 | dasd_device_clear_timer(device); |
2155 | if (cqr->status == DASD_CQR_IN_IO) { | 2364 | dasd_schedule_device_bh(device); |
2156 | cqr->status = DASD_CQR_QUEUED; | ||
2157 | cqr->retries++; | ||
2158 | } | ||
2159 | device->stopped |= DASD_STOPPED_DC_WAIT; | ||
2160 | dasd_set_timer(device, 0); | ||
2161 | } | ||
2162 | dasd_schedule_bh(device); | ||
2163 | ret = 1; | 2365 | ret = 1; |
2164 | break; | 2366 | break; |
2165 | case CIO_OPER: | 2367 | case CIO_OPER: |
2166 | /* FIXME: add a sanity check. */ | 2368 | /* FIXME: add a sanity check. */ |
2167 | device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); | 2369 | device->stopped &= ~DASD_STOPPED_DC_WAIT; |
2168 | dasd_schedule_bh(device); | 2370 | dasd_schedule_device_bh(device); |
2371 | if (device->block) | ||
2372 | dasd_schedule_block_bh(device->block); | ||
2169 | ret = 1; | 2373 | ret = 1; |
2170 | break; | 2374 | break; |
2171 | } | 2375 | } |
@@ -2195,7 +2399,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | |||
2195 | ccw->cda = (__u32)(addr_t)rdc_buffer; | 2399 | ccw->cda = (__u32)(addr_t)rdc_buffer; |
2196 | ccw->count = rdc_buffer_size; | 2400 | ccw->count = rdc_buffer_size; |
2197 | 2401 | ||
2198 | cqr->device = device; | 2402 | cqr->startdev = device; |
2403 | cqr->memdev = device; | ||
2199 | cqr->expires = 10*HZ; | 2404 | cqr->expires = 10*HZ; |
2200 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 2405 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
2201 | cqr->retries = 2; | 2406 | cqr->retries = 2; |
@@ -2217,13 +2422,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, | |||
2217 | return PTR_ERR(cqr); | 2422 | return PTR_ERR(cqr); |
2218 | 2423 | ||
2219 | ret = dasd_sleep_on(cqr); | 2424 | ret = dasd_sleep_on(cqr); |
2220 | dasd_sfree_request(cqr, cqr->device); | 2425 | dasd_sfree_request(cqr, cqr->memdev); |
2221 | return ret; | 2426 | return ret; |
2222 | } | 2427 | } |
2223 | EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); | 2428 | EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); |
2224 | 2429 | ||
2225 | static int __init | 2430 | static int __init dasd_init(void) |
2226 | dasd_init(void) | ||
2227 | { | 2431 | { |
2228 | int rc; | 2432 | int rc; |
2229 | 2433 | ||
@@ -2231,7 +2435,7 @@ dasd_init(void) | |||
2231 | init_waitqueue_head(&dasd_flush_wq); | 2435 | init_waitqueue_head(&dasd_flush_wq); |
2232 | 2436 | ||
2233 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2437 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
2234 | dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); | 2438 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); |
2235 | if (dasd_debug_area == NULL) { | 2439 | if (dasd_debug_area == NULL) { |
2236 | rc = -ENOMEM; | 2440 | rc = -ENOMEM; |
2237 | goto failed; | 2441 | goto failed; |
@@ -2277,15 +2481,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer); | |||
2277 | EXPORT_SYMBOL(dasd_add_request_head); | 2481 | EXPORT_SYMBOL(dasd_add_request_head); |
2278 | EXPORT_SYMBOL(dasd_add_request_tail); | 2482 | EXPORT_SYMBOL(dasd_add_request_tail); |
2279 | EXPORT_SYMBOL(dasd_cancel_req); | 2483 | EXPORT_SYMBOL(dasd_cancel_req); |
2280 | EXPORT_SYMBOL(dasd_clear_timer); | 2484 | EXPORT_SYMBOL(dasd_device_clear_timer); |
2485 | EXPORT_SYMBOL(dasd_block_clear_timer); | ||
2281 | EXPORT_SYMBOL(dasd_enable_device); | 2486 | EXPORT_SYMBOL(dasd_enable_device); |
2282 | EXPORT_SYMBOL(dasd_int_handler); | 2487 | EXPORT_SYMBOL(dasd_int_handler); |
2283 | EXPORT_SYMBOL(dasd_kfree_request); | 2488 | EXPORT_SYMBOL(dasd_kfree_request); |
2284 | EXPORT_SYMBOL(dasd_kick_device); | 2489 | EXPORT_SYMBOL(dasd_kick_device); |
2285 | EXPORT_SYMBOL(dasd_kmalloc_request); | 2490 | EXPORT_SYMBOL(dasd_kmalloc_request); |
2286 | EXPORT_SYMBOL(dasd_schedule_bh); | 2491 | EXPORT_SYMBOL(dasd_schedule_device_bh); |
2492 | EXPORT_SYMBOL(dasd_schedule_block_bh); | ||
2287 | EXPORT_SYMBOL(dasd_set_target_state); | 2493 | EXPORT_SYMBOL(dasd_set_target_state); |
2288 | EXPORT_SYMBOL(dasd_set_timer); | 2494 | EXPORT_SYMBOL(dasd_device_set_timer); |
2495 | EXPORT_SYMBOL(dasd_block_set_timer); | ||
2289 | EXPORT_SYMBOL(dasd_sfree_request); | 2496 | EXPORT_SYMBOL(dasd_sfree_request); |
2290 | EXPORT_SYMBOL(dasd_sleep_on); | 2497 | EXPORT_SYMBOL(dasd_sleep_on); |
2291 | EXPORT_SYMBOL(dasd_sleep_on_immediatly); | 2498 | EXPORT_SYMBOL(dasd_sleep_on_immediatly); |
@@ -2299,4 +2506,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove); | |||
2299 | EXPORT_SYMBOL_GPL(dasd_generic_notify); | 2506 | EXPORT_SYMBOL_GPL(dasd_generic_notify); |
2300 | EXPORT_SYMBOL_GPL(dasd_generic_set_online); | 2507 | EXPORT_SYMBOL_GPL(dasd_generic_set_online); |
2301 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); | 2508 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); |
2302 | 2509 | EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); | |
2510 | EXPORT_SYMBOL_GPL(dasd_flush_device_queue); | ||
2511 | EXPORT_SYMBOL_GPL(dasd_alloc_block); | ||
2512 | EXPORT_SYMBOL_GPL(dasd_free_block); | ||
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c deleted file mode 100644 index 1ddab8991d92..000000000000 --- a/drivers/s390/block/dasd_3370_erp.c +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* | ||
2 | * File...........: linux/drivers/s390/block/dasd_3370_erp.c | ||
3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | ||
4 | * Bugreports.to..: <Linux390@de.ibm.com> | ||
5 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #define PRINTK_HEADER "dasd_erp(3370)" | ||
10 | |||
11 | #include "dasd_int.h" | ||
12 | |||
13 | |||
14 | /* | ||
15 | * DASD_3370_ERP_EXAMINE | ||
16 | * | ||
17 | * DESCRIPTION | ||
18 | * Checks only for fatal/no/recover error. | ||
19 | * A detailed examination of the sense data is done later outside | ||
20 | * the interrupt handler. | ||
21 | * | ||
22 | * The logic is based on the 'IBM 3880 Storage Control Reference' manual | ||
23 | * 'Chapter 7. 3370 Sense Data'. | ||
24 | * | ||
25 | * RETURN VALUES | ||
26 | * dasd_era_none no error | ||
27 | * dasd_era_fatal for all fatal (unrecoverable errors) | ||
28 | * dasd_era_recover for all others. | ||
29 | */ | ||
30 | dasd_era_t | ||
31 | dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | ||
32 | { | ||
33 | char *sense = irb->ecw; | ||
34 | |||
35 | /* check for successful execution first */ | ||
36 | if (irb->scsw.cstat == 0x00 && | ||
37 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
38 | return dasd_era_none; | ||
39 | if (sense[0] & 0x80) { /* CMD reject */ | ||
40 | return dasd_era_fatal; | ||
41 | } | ||
42 | if (sense[0] & 0x40) { /* Drive offline */ | ||
43 | return dasd_era_recover; | ||
44 | } | ||
45 | if (sense[0] & 0x20) { /* Bus out parity */ | ||
46 | return dasd_era_recover; | ||
47 | } | ||
48 | if (sense[0] & 0x10) { /* equipment check */ | ||
49 | if (sense[1] & 0x80) { | ||
50 | return dasd_era_fatal; | ||
51 | } | ||
52 | return dasd_era_recover; | ||
53 | } | ||
54 | if (sense[0] & 0x08) { /* data check */ | ||
55 | if (sense[1] & 0x80) { | ||
56 | return dasd_era_fatal; | ||
57 | } | ||
58 | return dasd_era_recover; | ||
59 | } | ||
60 | if (sense[0] & 0x04) { /* overrun */ | ||
61 | if (sense[1] & 0x80) { | ||
62 | return dasd_era_fatal; | ||
63 | } | ||
64 | return dasd_era_recover; | ||
65 | } | ||
66 | if (sense[1] & 0x40) { /* invalid blocksize */ | ||
67 | return dasd_era_fatal; | ||
68 | } | ||
69 | if (sense[1] & 0x04) { /* file protected */ | ||
70 | return dasd_era_recover; | ||
71 | } | ||
72 | if (sense[1] & 0x01) { /* operation incomplete */ | ||
73 | return dasd_era_recover; | ||
74 | } | ||
75 | if (sense[2] & 0x80) { /* check data erroor */ | ||
76 | return dasd_era_recover; | ||
77 | } | ||
78 | if (sense[2] & 0x10) { /* Env. data present */ | ||
79 | return dasd_era_recover; | ||
80 | } | ||
81 | /* examine the 24 byte sense data */ | ||
82 | return dasd_era_recover; | ||
83 | |||
84 | } /* END dasd_3370_erp_examine */ | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 5b7385e430ea..c361ab69ec00 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -26,158 +26,6 @@ struct DCTL_data { | |||
26 | 26 | ||
27 | /* | 27 | /* |
28 | ***************************************************************************** | 28 | ***************************************************************************** |
29 | * SECTION ERP EXAMINATION | ||
30 | ***************************************************************************** | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * DASD_3990_ERP_EXAMINE_24 | ||
35 | * | ||
36 | * DESCRIPTION | ||
37 | * Checks only for fatal (unrecoverable) error. | ||
38 | * A detailed examination of the sense data is done later outside | ||
39 | * the interrupt handler. | ||
40 | * | ||
41 | * Each bit configuration leading to an action code 2 (Exit with | ||
42 | * programming error or unusual condition indication) | ||
43 | * are handled as fatal errors. | ||
44 | * | ||
45 | * All other configurations are handled as recoverable errors. | ||
46 | * | ||
47 | * RETURN VALUES | ||
48 | * dasd_era_fatal for all fatal (unrecoverable errors) | ||
49 | * dasd_era_recover for all others. | ||
50 | */ | ||
51 | static dasd_era_t | ||
52 | dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense) | ||
53 | { | ||
54 | |||
55 | struct dasd_device *device = cqr->device; | ||
56 | |||
57 | /* check for 'Command Reject' */ | ||
58 | if ((sense[0] & SNS0_CMD_REJECT) && | ||
59 | (!(sense[2] & SNS2_ENV_DATA_PRESENT))) { | ||
60 | |||
61 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
62 | "EXAMINE 24: Command Reject detected - " | ||
63 | "fatal error"); | ||
64 | |||
65 | return dasd_era_fatal; | ||
66 | } | ||
67 | |||
68 | /* check for 'Invalid Track Format' */ | ||
69 | if ((sense[1] & SNS1_INV_TRACK_FORMAT) && | ||
70 | (!(sense[2] & SNS2_ENV_DATA_PRESENT))) { | ||
71 | |||
72 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
73 | "EXAMINE 24: Invalid Track Format detected " | ||
74 | "- fatal error"); | ||
75 | |||
76 | return dasd_era_fatal; | ||
77 | } | ||
78 | |||
79 | /* check for 'No Record Found' */ | ||
80 | if (sense[1] & SNS1_NO_REC_FOUND) { | ||
81 | |||
82 | /* FIXME: fatal error ?!? */ | ||
83 | DEV_MESSAGE(KERN_ERR, device, | ||
84 | "EXAMINE 24: No Record Found detected %s", | ||
85 | device->state <= DASD_STATE_BASIC ? | ||
86 | " " : "- fatal error"); | ||
87 | |||
88 | return dasd_era_fatal; | ||
89 | } | ||
90 | |||
91 | /* return recoverable for all others */ | ||
92 | return dasd_era_recover; | ||
93 | } /* END dasd_3990_erp_examine_24 */ | ||
94 | |||
95 | /* | ||
96 | * DASD_3990_ERP_EXAMINE_32 | ||
97 | * | ||
98 | * DESCRIPTION | ||
99 | * Checks only for fatal/no/recoverable error. | ||
100 | * A detailed examination of the sense data is done later outside | ||
101 | * the interrupt handler. | ||
102 | * | ||
103 | * RETURN VALUES | ||
104 | * dasd_era_none no error | ||
105 | * dasd_era_fatal for all fatal (unrecoverable errors) | ||
106 | * dasd_era_recover for recoverable others. | ||
107 | */ | ||
108 | static dasd_era_t | ||
109 | dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense) | ||
110 | { | ||
111 | |||
112 | struct dasd_device *device = cqr->device; | ||
113 | |||
114 | switch (sense[25]) { | ||
115 | case 0x00: | ||
116 | return dasd_era_none; | ||
117 | |||
118 | case 0x01: | ||
119 | DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error"); | ||
120 | |||
121 | return dasd_era_fatal; | ||
122 | |||
123 | default: | ||
124 | |||
125 | return dasd_era_recover; | ||
126 | } | ||
127 | |||
128 | } /* end dasd_3990_erp_examine_32 */ | ||
129 | |||
130 | /* | ||
131 | * DASD_3990_ERP_EXAMINE | ||
132 | * | ||
133 | * DESCRIPTION | ||
134 | * Checks only for fatal/no/recover error. | ||
135 | * A detailed examination of the sense data is done later outside | ||
136 | * the interrupt handler. | ||
137 | * | ||
138 | * The logic is based on the 'IBM 3990 Storage Control Reference' manual | ||
139 | * 'Chapter 7. Error Recovery Procedures'. | ||
140 | * | ||
141 | * RETURN VALUES | ||
142 | * dasd_era_none no error | ||
143 | * dasd_era_fatal for all fatal (unrecoverable errors) | ||
144 | * dasd_era_recover for all others. | ||
145 | */ | ||
146 | dasd_era_t | ||
147 | dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | ||
148 | { | ||
149 | |||
150 | char *sense = irb->ecw; | ||
151 | dasd_era_t era = dasd_era_recover; | ||
152 | struct dasd_device *device = cqr->device; | ||
153 | |||
154 | /* check for successful execution first */ | ||
155 | if (irb->scsw.cstat == 0x00 && | ||
156 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
157 | return dasd_era_none; | ||
158 | |||
159 | /* distinguish between 24 and 32 byte sense data */ | ||
160 | if (sense[27] & DASD_SENSE_BIT_0) { | ||
161 | |||
162 | era = dasd_3990_erp_examine_24(cqr, sense); | ||
163 | |||
164 | } else { | ||
165 | |||
166 | era = dasd_3990_erp_examine_32(cqr, sense); | ||
167 | |||
168 | } | ||
169 | |||
170 | /* log the erp chain if fatal error occurred */ | ||
171 | if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { | ||
172 | dasd_log_sense(cqr, irb); | ||
173 | } | ||
174 | |||
175 | return era; | ||
176 | |||
177 | } /* END dasd_3990_erp_examine */ | ||
178 | |||
179 | /* | ||
180 | ***************************************************************************** | ||
181 | * SECTION ERP HANDLING | 29 | * SECTION ERP HANDLING |
182 | ***************************************************************************** | 30 | ***************************************************************************** |
183 | */ | 31 | */ |
@@ -206,7 +54,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status) | |||
206 | { | 54 | { |
207 | struct dasd_ccw_req *cqr = erp->refers; | 55 | struct dasd_ccw_req *cqr = erp->refers; |
208 | 56 | ||
209 | dasd_free_erp_request(erp, erp->device); | 57 | dasd_free_erp_request(erp, erp->memdev); |
210 | cqr->status = final_status; | 58 | cqr->status = final_status; |
211 | return cqr; | 59 | return cqr; |
212 | 60 | ||
@@ -224,15 +72,17 @@ static void | |||
224 | dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires) | 72 | dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires) |
225 | { | 73 | { |
226 | 74 | ||
227 | struct dasd_device *device = erp->device; | 75 | struct dasd_device *device = erp->startdev; |
76 | unsigned long flags; | ||
228 | 77 | ||
229 | DEV_MESSAGE(KERN_INFO, device, | 78 | DEV_MESSAGE(KERN_INFO, device, |
230 | "blocking request queue for %is", expires/HZ); | 79 | "blocking request queue for %is", expires/HZ); |
231 | 80 | ||
81 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
232 | device->stopped |= DASD_STOPPED_PENDING; | 82 | device->stopped |= DASD_STOPPED_PENDING; |
233 | erp->status = DASD_CQR_QUEUED; | 83 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
234 | 84 | erp->status = DASD_CQR_FILLED; | |
235 | dasd_set_timer(device, expires); | 85 | dasd_block_set_timer(device->block, expires); |
236 | } | 86 | } |
237 | 87 | ||
238 | /* | 88 | /* |
@@ -251,7 +101,7 @@ static struct dasd_ccw_req * | |||
251 | dasd_3990_erp_int_req(struct dasd_ccw_req * erp) | 101 | dasd_3990_erp_int_req(struct dasd_ccw_req * erp) |
252 | { | 102 | { |
253 | 103 | ||
254 | struct dasd_device *device = erp->device; | 104 | struct dasd_device *device = erp->startdev; |
255 | 105 | ||
256 | /* first time set initial retry counter and erp_function */ | 106 | /* first time set initial retry counter and erp_function */ |
257 | /* and retry once without blocking queue */ | 107 | /* and retry once without blocking queue */ |
@@ -292,11 +142,14 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp) | |||
292 | static void | 142 | static void |
293 | dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) | 143 | dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) |
294 | { | 144 | { |
295 | struct dasd_device *device = erp->device; | 145 | struct dasd_device *device = erp->startdev; |
296 | __u8 opm; | 146 | __u8 opm; |
147 | unsigned long flags; | ||
297 | 148 | ||
298 | /* try alternate valid path */ | 149 | /* try alternate valid path */ |
150 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
299 | opm = ccw_device_get_path_mask(device->cdev); | 151 | opm = ccw_device_get_path_mask(device->cdev); |
152 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
300 | //FIXME: start with get_opm ? | 153 | //FIXME: start with get_opm ? |
301 | if (erp->lpm == 0) | 154 | if (erp->lpm == 0) |
302 | erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); | 155 | erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); |
@@ -309,9 +162,8 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) | |||
309 | "try alternate lpm=%x (lpum=%x / opm=%x)", | 162 | "try alternate lpm=%x (lpum=%x / opm=%x)", |
310 | erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm); | 163 | erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm); |
311 | 164 | ||
312 | /* reset status to queued to handle the request again... */ | 165 | /* reset status to submit the request again... */ |
313 | if (erp->status > DASD_CQR_QUEUED) | 166 | erp->status = DASD_CQR_FILLED; |
314 | erp->status = DASD_CQR_QUEUED; | ||
315 | erp->retries = 1; | 167 | erp->retries = 1; |
316 | } else { | 168 | } else { |
317 | DEV_MESSAGE(KERN_ERR, device, | 169 | DEV_MESSAGE(KERN_ERR, device, |
@@ -320,8 +172,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) | |||
320 | erp->irb.esw.esw0.sublog.lpum, opm); | 172 | erp->irb.esw.esw0.sublog.lpum, opm); |
321 | 173 | ||
322 | /* post request with permanent error */ | 174 | /* post request with permanent error */ |
323 | if (erp->status > DASD_CQR_QUEUED) | 175 | erp->status = DASD_CQR_FAILED; |
324 | erp->status = DASD_CQR_FAILED; | ||
325 | } | 176 | } |
326 | } /* end dasd_3990_erp_alternate_path */ | 177 | } /* end dasd_3990_erp_alternate_path */ |
327 | 178 | ||
@@ -344,14 +195,14 @@ static struct dasd_ccw_req * | |||
344 | dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) | 195 | dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) |
345 | { | 196 | { |
346 | 197 | ||
347 | struct dasd_device *device = erp->device; | 198 | struct dasd_device *device = erp->startdev; |
348 | struct DCTL_data *DCTL_data; | 199 | struct DCTL_data *DCTL_data; |
349 | struct ccw1 *ccw; | 200 | struct ccw1 *ccw; |
350 | struct dasd_ccw_req *dctl_cqr; | 201 | struct dasd_ccw_req *dctl_cqr; |
351 | 202 | ||
352 | dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1, | 203 | dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1, |
353 | sizeof (struct DCTL_data), | 204 | sizeof(struct DCTL_data), |
354 | erp->device); | 205 | device); |
355 | if (IS_ERR(dctl_cqr)) { | 206 | if (IS_ERR(dctl_cqr)) { |
356 | DEV_MESSAGE(KERN_ERR, device, "%s", | 207 | DEV_MESSAGE(KERN_ERR, device, "%s", |
357 | "Unable to allocate DCTL-CQR"); | 208 | "Unable to allocate DCTL-CQR"); |
@@ -365,13 +216,14 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) | |||
365 | DCTL_data->modifier = modifier; | 216 | DCTL_data->modifier = modifier; |
366 | 217 | ||
367 | ccw = dctl_cqr->cpaddr; | 218 | ccw = dctl_cqr->cpaddr; |
368 | memset(ccw, 0, sizeof (struct ccw1)); | 219 | memset(ccw, 0, sizeof(struct ccw1)); |
369 | ccw->cmd_code = CCW_CMD_DCTL; | 220 | ccw->cmd_code = CCW_CMD_DCTL; |
370 | ccw->count = 4; | 221 | ccw->count = 4; |
371 | ccw->cda = (__u32)(addr_t) DCTL_data; | 222 | ccw->cda = (__u32)(addr_t) DCTL_data; |
372 | dctl_cqr->function = dasd_3990_erp_DCTL; | 223 | dctl_cqr->function = dasd_3990_erp_DCTL; |
373 | dctl_cqr->refers = erp; | 224 | dctl_cqr->refers = erp; |
374 | dctl_cqr->device = erp->device; | 225 | dctl_cqr->startdev = device; |
226 | dctl_cqr->memdev = device; | ||
375 | dctl_cqr->magic = erp->magic; | 227 | dctl_cqr->magic = erp->magic; |
376 | dctl_cqr->expires = 5 * 60 * HZ; | 228 | dctl_cqr->expires = 5 * 60 * HZ; |
377 | dctl_cqr->retries = 2; | 229 | dctl_cqr->retries = 2; |
@@ -435,7 +287,7 @@ static struct dasd_ccw_req * | |||
435 | dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) | 287 | dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) |
436 | { | 288 | { |
437 | 289 | ||
438 | struct dasd_device *device = erp->device; | 290 | struct dasd_device *device = erp->startdev; |
439 | 291 | ||
440 | /* first time set initial retry counter and erp_function */ | 292 | /* first time set initial retry counter and erp_function */ |
441 | /* and retry once without waiting for state change pending */ | 293 | /* and retry once without waiting for state change pending */ |
@@ -472,7 +324,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) | |||
472 | "redriving request immediately, " | 324 | "redriving request immediately, " |
473 | "%d retries left", | 325 | "%d retries left", |
474 | erp->retries); | 326 | erp->retries); |
475 | erp->status = DASD_CQR_QUEUED; | 327 | erp->status = DASD_CQR_FILLED; |
476 | } | 328 | } |
477 | } | 329 | } |
478 | 330 | ||
@@ -530,7 +382,7 @@ static void | |||
530 | dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) | 382 | dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) |
531 | { | 383 | { |
532 | 384 | ||
533 | struct dasd_device *device = erp->device; | 385 | struct dasd_device *device = erp->startdev; |
534 | char msg_format = (sense[7] & 0xF0); | 386 | char msg_format = (sense[7] & 0xF0); |
535 | char msg_no = (sense[7] & 0x0F); | 387 | char msg_no = (sense[7] & 0x0F); |
536 | 388 | ||
@@ -1157,7 +1009,7 @@ static struct dasd_ccw_req * | |||
1157 | dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) | 1009 | dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) |
1158 | { | 1010 | { |
1159 | 1011 | ||
1160 | struct dasd_device *device = erp->device; | 1012 | struct dasd_device *device = erp->startdev; |
1161 | 1013 | ||
1162 | erp->function = dasd_3990_erp_com_rej; | 1014 | erp->function = dasd_3990_erp_com_rej; |
1163 | 1015 | ||
@@ -1198,7 +1050,7 @@ static struct dasd_ccw_req * | |||
1198 | dasd_3990_erp_bus_out(struct dasd_ccw_req * erp) | 1050 | dasd_3990_erp_bus_out(struct dasd_ccw_req * erp) |
1199 | { | 1051 | { |
1200 | 1052 | ||
1201 | struct dasd_device *device = erp->device; | 1053 | struct dasd_device *device = erp->startdev; |
1202 | 1054 | ||
1203 | /* first time set initial retry counter and erp_function */ | 1055 | /* first time set initial retry counter and erp_function */ |
1204 | /* and retry once without blocking queue */ | 1056 | /* and retry once without blocking queue */ |
@@ -1237,7 +1089,7 @@ static struct dasd_ccw_req * | |||
1237 | dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) | 1089 | dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) |
1238 | { | 1090 | { |
1239 | 1091 | ||
1240 | struct dasd_device *device = erp->device; | 1092 | struct dasd_device *device = erp->startdev; |
1241 | 1093 | ||
1242 | erp->function = dasd_3990_erp_equip_check; | 1094 | erp->function = dasd_3990_erp_equip_check; |
1243 | 1095 | ||
@@ -1279,7 +1131,6 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) | |||
1279 | 1131 | ||
1280 | erp = dasd_3990_erp_action_5(erp); | 1132 | erp = dasd_3990_erp_action_5(erp); |
1281 | } | 1133 | } |
1282 | |||
1283 | return erp; | 1134 | return erp; |
1284 | 1135 | ||
1285 | } /* end dasd_3990_erp_equip_check */ | 1136 | } /* end dasd_3990_erp_equip_check */ |
@@ -1299,7 +1150,7 @@ static struct dasd_ccw_req * | |||
1299 | dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense) | 1150 | dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense) |
1300 | { | 1151 | { |
1301 | 1152 | ||
1302 | struct dasd_device *device = erp->device; | 1153 | struct dasd_device *device = erp->startdev; |
1303 | 1154 | ||
1304 | erp->function = dasd_3990_erp_data_check; | 1155 | erp->function = dasd_3990_erp_data_check; |
1305 | 1156 | ||
@@ -1358,7 +1209,7 @@ static struct dasd_ccw_req * | |||
1358 | dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense) | 1209 | dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense) |
1359 | { | 1210 | { |
1360 | 1211 | ||
1361 | struct dasd_device *device = erp->device; | 1212 | struct dasd_device *device = erp->startdev; |
1362 | 1213 | ||
1363 | erp->function = dasd_3990_erp_overrun; | 1214 | erp->function = dasd_3990_erp_overrun; |
1364 | 1215 | ||
@@ -1387,7 +1238,7 @@ static struct dasd_ccw_req * | |||
1387 | dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) | 1238 | dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) |
1388 | { | 1239 | { |
1389 | 1240 | ||
1390 | struct dasd_device *device = erp->device; | 1241 | struct dasd_device *device = erp->startdev; |
1391 | 1242 | ||
1392 | erp->function = dasd_3990_erp_inv_format; | 1243 | erp->function = dasd_3990_erp_inv_format; |
1393 | 1244 | ||
@@ -1403,8 +1254,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) | |||
1403 | 1254 | ||
1404 | } else { | 1255 | } else { |
1405 | DEV_MESSAGE(KERN_ERR, device, "%s", | 1256 | DEV_MESSAGE(KERN_ERR, device, "%s", |
1406 | "Invalid Track Format - Fatal error should have " | 1257 | "Invalid Track Format - Fatal error"); |
1407 | "been handled within the interrupt handler"); | ||
1408 | 1258 | ||
1409 | erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); | 1259 | erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); |
1410 | } | 1260 | } |
@@ -1428,7 +1278,7 @@ static struct dasd_ccw_req * | |||
1428 | dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense) | 1278 | dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense) |
1429 | { | 1279 | { |
1430 | 1280 | ||
1431 | struct dasd_device *device = default_erp->device; | 1281 | struct dasd_device *device = default_erp->startdev; |
1432 | 1282 | ||
1433 | DEV_MESSAGE(KERN_ERR, device, "%s", | 1283 | DEV_MESSAGE(KERN_ERR, device, "%s", |
1434 | "End-of-Cylinder - must never happen"); | 1284 | "End-of-Cylinder - must never happen"); |
@@ -1453,7 +1303,7 @@ static struct dasd_ccw_req * | |||
1453 | dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense) | 1303 | dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense) |
1454 | { | 1304 | { |
1455 | 1305 | ||
1456 | struct dasd_device *device = erp->device; | 1306 | struct dasd_device *device = erp->startdev; |
1457 | 1307 | ||
1458 | erp->function = dasd_3990_erp_env_data; | 1308 | erp->function = dasd_3990_erp_env_data; |
1459 | 1309 | ||
@@ -1463,11 +1313,9 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense) | |||
1463 | 1313 | ||
1464 | /* don't retry on disabled interface */ | 1314 | /* don't retry on disabled interface */ |
1465 | if (sense[7] != 0x0F) { | 1315 | if (sense[7] != 0x0F) { |
1466 | |||
1467 | erp = dasd_3990_erp_action_4(erp, sense); | 1316 | erp = dasd_3990_erp_action_4(erp, sense); |
1468 | } else { | 1317 | } else { |
1469 | 1318 | erp->status = DASD_CQR_FILLED; | |
1470 | erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO); | ||
1471 | } | 1319 | } |
1472 | 1320 | ||
1473 | return erp; | 1321 | return erp; |
@@ -1490,11 +1338,10 @@ static struct dasd_ccw_req * | |||
1490 | dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense) | 1338 | dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense) |
1491 | { | 1339 | { |
1492 | 1340 | ||
1493 | struct dasd_device *device = default_erp->device; | 1341 | struct dasd_device *device = default_erp->startdev; |
1494 | 1342 | ||
1495 | DEV_MESSAGE(KERN_ERR, device, "%s", | 1343 | DEV_MESSAGE(KERN_ERR, device, "%s", |
1496 | "No Record Found - Fatal error should " | 1344 | "No Record Found - Fatal error "); |
1497 | "have been handled within the interrupt handler"); | ||
1498 | 1345 | ||
1499 | return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); | 1346 | return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); |
1500 | 1347 | ||
@@ -1517,7 +1364,7 @@ static struct dasd_ccw_req * | |||
1517 | dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) | 1364 | dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) |
1518 | { | 1365 | { |
1519 | 1366 | ||
1520 | struct dasd_device *device = erp->device; | 1367 | struct dasd_device *device = erp->startdev; |
1521 | 1368 | ||
1522 | DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected"); | 1369 | DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected"); |
1523 | 1370 | ||
@@ -1526,6 +1373,43 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) | |||
1526 | } /* end dasd_3990_erp_file_prot */ | 1373 | } /* end dasd_3990_erp_file_prot */ |
1527 | 1374 | ||
1528 | /* | 1375 | /* |
1376 | * DASD_3990_ERP_INSPECT_ALIAS | ||
1377 | * | ||
1378 | * DESCRIPTION | ||
1379 | * Checks if the original request was started on an alias device. | ||
1380 | * If yes, it modifies the original and the erp request so that | ||
1381 | * the erp request can be started on a base device. | ||
1382 | * | ||
1383 | * PARAMETER | ||
1384 | * erp pointer to the currently created default ERP | ||
1385 | * | ||
1386 | * RETURN VALUES | ||
1387 | * erp pointer to the modified ERP, or NULL | ||
1388 | */ | ||
1389 | |||
1390 | static struct dasd_ccw_req *dasd_3990_erp_inspect_alias( | ||
1391 | struct dasd_ccw_req *erp) | ||
1392 | { | ||
1393 | struct dasd_ccw_req *cqr = erp->refers; | ||
1394 | |||
1395 | if (cqr->block && | ||
1396 | (cqr->block->base != cqr->startdev)) { | ||
1397 | if (cqr->startdev->features & DASD_FEATURE_ERPLOG) { | ||
1398 | DEV_MESSAGE(KERN_ERR, cqr->startdev, | ||
1399 | "ERP on alias device for request %p," | ||
1400 | " recover on base device %s", cqr, | ||
1401 | cqr->block->base->cdev->dev.bus_id); | ||
1402 | } | ||
1403 | dasd_eckd_reset_ccw_to_base_io(cqr); | ||
1404 | erp->startdev = cqr->block->base; | ||
1405 | erp->function = dasd_3990_erp_inspect_alias; | ||
1406 | return erp; | ||
1407 | } else | ||
1408 | return NULL; | ||
1409 | } | ||
1410 | |||
1411 | |||
1412 | /* | ||
1529 | * DASD_3990_ERP_INSPECT_24 | 1413 | * DASD_3990_ERP_INSPECT_24 |
1530 | * | 1414 | * |
1531 | * DESCRIPTION | 1415 | * DESCRIPTION |
@@ -1623,7 +1507,7 @@ static struct dasd_ccw_req * | |||
1623 | dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense) | 1507 | dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense) |
1624 | { | 1508 | { |
1625 | 1509 | ||
1626 | struct dasd_device *device = erp->device; | 1510 | struct dasd_device *device = erp->startdev; |
1627 | 1511 | ||
1628 | erp->retries = 256; | 1512 | erp->retries = 256; |
1629 | erp->function = dasd_3990_erp_action_10_32; | 1513 | erp->function = dasd_3990_erp_action_10_32; |
@@ -1657,13 +1541,14 @@ static struct dasd_ccw_req * | |||
1657 | dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | 1541 | dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) |
1658 | { | 1542 | { |
1659 | 1543 | ||
1660 | struct dasd_device *device = default_erp->device; | 1544 | struct dasd_device *device = default_erp->startdev; |
1661 | __u32 cpa = 0; | 1545 | __u32 cpa = 0; |
1662 | struct dasd_ccw_req *cqr; | 1546 | struct dasd_ccw_req *cqr; |
1663 | struct dasd_ccw_req *erp; | 1547 | struct dasd_ccw_req *erp; |
1664 | struct DE_eckd_data *DE_data; | 1548 | struct DE_eckd_data *DE_data; |
1549 | struct PFX_eckd_data *PFX_data; | ||
1665 | char *LO_data; /* LO_eckd_data_t */ | 1550 | char *LO_data; /* LO_eckd_data_t */ |
1666 | struct ccw1 *ccw; | 1551 | struct ccw1 *ccw, *oldccw; |
1667 | 1552 | ||
1668 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | 1553 | DEV_MESSAGE(KERN_DEBUG, device, "%s", |
1669 | "Write not finished because of unexpected condition"); | 1554 | "Write not finished because of unexpected condition"); |
@@ -1702,8 +1587,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1702 | /* Build new ERP request including DE/LO */ | 1587 | /* Build new ERP request including DE/LO */ |
1703 | erp = dasd_alloc_erp_request((char *) &cqr->magic, | 1588 | erp = dasd_alloc_erp_request((char *) &cqr->magic, |
1704 | 2 + 1,/* DE/LO + TIC */ | 1589 | 2 + 1,/* DE/LO + TIC */ |
1705 | sizeof (struct DE_eckd_data) + | 1590 | sizeof(struct DE_eckd_data) + |
1706 | sizeof (struct LO_eckd_data), device); | 1591 | sizeof(struct LO_eckd_data), device); |
1707 | 1592 | ||
1708 | if (IS_ERR(erp)) { | 1593 | if (IS_ERR(erp)) { |
1709 | DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP"); | 1594 | DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP"); |
@@ -1712,10 +1597,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1712 | 1597 | ||
1713 | /* use original DE */ | 1598 | /* use original DE */ |
1714 | DE_data = erp->data; | 1599 | DE_data = erp->data; |
1715 | memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data)); | 1600 | oldccw = cqr->cpaddr; |
1601 | if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) { | ||
1602 | PFX_data = cqr->data; | ||
1603 | memcpy(DE_data, &PFX_data->define_extend, | ||
1604 | sizeof(struct DE_eckd_data)); | ||
1605 | } else | ||
1606 | memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data)); | ||
1716 | 1607 | ||
1717 | /* create LO */ | 1608 | /* create LO */ |
1718 | LO_data = erp->data + sizeof (struct DE_eckd_data); | 1609 | LO_data = erp->data + sizeof(struct DE_eckd_data); |
1719 | 1610 | ||
1720 | if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { | 1611 | if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { |
1721 | 1612 | ||
@@ -1748,7 +1639,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1748 | 1639 | ||
1749 | /* create DE ccw */ | 1640 | /* create DE ccw */ |
1750 | ccw = erp->cpaddr; | 1641 | ccw = erp->cpaddr; |
1751 | memset(ccw, 0, sizeof (struct ccw1)); | 1642 | memset(ccw, 0, sizeof(struct ccw1)); |
1752 | ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; | 1643 | ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; |
1753 | ccw->flags = CCW_FLAG_CC; | 1644 | ccw->flags = CCW_FLAG_CC; |
1754 | ccw->count = 16; | 1645 | ccw->count = 16; |
@@ -1756,7 +1647,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1756 | 1647 | ||
1757 | /* create LO ccw */ | 1648 | /* create LO ccw */ |
1758 | ccw++; | 1649 | ccw++; |
1759 | memset(ccw, 0, sizeof (struct ccw1)); | 1650 | memset(ccw, 0, sizeof(struct ccw1)); |
1760 | ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; | 1651 | ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; |
1761 | ccw->flags = CCW_FLAG_CC; | 1652 | ccw->flags = CCW_FLAG_CC; |
1762 | ccw->count = 16; | 1653 | ccw->count = 16; |
@@ -1770,7 +1661,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1770 | /* fill erp related fields */ | 1661 | /* fill erp related fields */ |
1771 | erp->function = dasd_3990_erp_action_1B_32; | 1662 | erp->function = dasd_3990_erp_action_1B_32; |
1772 | erp->refers = default_erp->refers; | 1663 | erp->refers = default_erp->refers; |
1773 | erp->device = device; | 1664 | erp->startdev = device; |
1665 | erp->memdev = device; | ||
1774 | erp->magic = default_erp->magic; | 1666 | erp->magic = default_erp->magic; |
1775 | erp->expires = 0; | 1667 | erp->expires = 0; |
1776 | erp->retries = 256; | 1668 | erp->retries = 256; |
@@ -1803,7 +1695,7 @@ static struct dasd_ccw_req * | |||
1803 | dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | 1695 | dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) |
1804 | { | 1696 | { |
1805 | 1697 | ||
1806 | struct dasd_device *device = previous_erp->device; | 1698 | struct dasd_device *device = previous_erp->startdev; |
1807 | __u32 cpa = 0; | 1699 | __u32 cpa = 0; |
1808 | struct dasd_ccw_req *cqr; | 1700 | struct dasd_ccw_req *cqr; |
1809 | struct dasd_ccw_req *erp; | 1701 | struct dasd_ccw_req *erp; |
@@ -1827,7 +1719,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | |||
1827 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | 1719 | DEV_MESSAGE(KERN_DEBUG, device, "%s", |
1828 | "Imprecise ending is set - just retry"); | 1720 | "Imprecise ending is set - just retry"); |
1829 | 1721 | ||
1830 | previous_erp->status = DASD_CQR_QUEUED; | 1722 | previous_erp->status = DASD_CQR_FILLED; |
1831 | 1723 | ||
1832 | return previous_erp; | 1724 | return previous_erp; |
1833 | } | 1725 | } |
@@ -1850,7 +1742,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | |||
1850 | erp = previous_erp; | 1742 | erp = previous_erp; |
1851 | 1743 | ||
1852 | /* update the LO with the new returned sense data */ | 1744 | /* update the LO with the new returned sense data */ |
1853 | LO_data = erp->data + sizeof (struct DE_eckd_data); | 1745 | LO_data = erp->data + sizeof(struct DE_eckd_data); |
1854 | 1746 | ||
1855 | if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { | 1747 | if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { |
1856 | 1748 | ||
@@ -1889,7 +1781,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) | |||
1889 | ccw++; /* addr of TIC ccw */ | 1781 | ccw++; /* addr of TIC ccw */ |
1890 | ccw->cda = cpa; | 1782 | ccw->cda = cpa; |
1891 | 1783 | ||
1892 | erp->status = DASD_CQR_QUEUED; | 1784 | erp->status = DASD_CQR_FILLED; |
1893 | 1785 | ||
1894 | return erp; | 1786 | return erp; |
1895 | 1787 | ||
@@ -1968,9 +1860,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) | |||
1968 | * try further actions. */ | 1860 | * try further actions. */ |
1969 | 1861 | ||
1970 | erp->lpm = 0; | 1862 | erp->lpm = 0; |
1971 | 1863 | erp->status = DASD_CQR_NEED_ERP; | |
1972 | erp->status = DASD_CQR_ERROR; | ||
1973 | |||
1974 | } | 1864 | } |
1975 | } | 1865 | } |
1976 | 1866 | ||
@@ -2047,7 +1937,7 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense) | |||
2047 | if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { | 1937 | if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { |
2048 | 1938 | ||
2049 | /* set to suspended duplex state then restart */ | 1939 | /* set to suspended duplex state then restart */ |
2050 | struct dasd_device *device = erp->device; | 1940 | struct dasd_device *device = erp->startdev; |
2051 | 1941 | ||
2052 | DEV_MESSAGE(KERN_ERR, device, "%s", | 1942 | DEV_MESSAGE(KERN_ERR, device, "%s", |
2053 | "Set device to suspended duplex state should be " | 1943 | "Set device to suspended duplex state should be " |
@@ -2081,28 +1971,26 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense) | |||
2081 | { | 1971 | { |
2082 | 1972 | ||
2083 | if ((erp->function == dasd_3990_erp_compound_retry) && | 1973 | if ((erp->function == dasd_3990_erp_compound_retry) && |
2084 | (erp->status == DASD_CQR_ERROR)) { | 1974 | (erp->status == DASD_CQR_NEED_ERP)) { |
2085 | 1975 | ||
2086 | dasd_3990_erp_compound_path(erp, sense); | 1976 | dasd_3990_erp_compound_path(erp, sense); |
2087 | } | 1977 | } |
2088 | 1978 | ||
2089 | if ((erp->function == dasd_3990_erp_compound_path) && | 1979 | if ((erp->function == dasd_3990_erp_compound_path) && |
2090 | (erp->status == DASD_CQR_ERROR)) { | 1980 | (erp->status == DASD_CQR_NEED_ERP)) { |
2091 | 1981 | ||
2092 | erp = dasd_3990_erp_compound_code(erp, sense); | 1982 | erp = dasd_3990_erp_compound_code(erp, sense); |
2093 | } | 1983 | } |
2094 | 1984 | ||
2095 | if ((erp->function == dasd_3990_erp_compound_code) && | 1985 | if ((erp->function == dasd_3990_erp_compound_code) && |
2096 | (erp->status == DASD_CQR_ERROR)) { | 1986 | (erp->status == DASD_CQR_NEED_ERP)) { |
2097 | 1987 | ||
2098 | dasd_3990_erp_compound_config(erp, sense); | 1988 | dasd_3990_erp_compound_config(erp, sense); |
2099 | } | 1989 | } |
2100 | 1990 | ||
2101 | /* if no compound action ERP specified, the request failed */ | 1991 | /* if no compound action ERP specified, the request failed */ |
2102 | if (erp->status == DASD_CQR_ERROR) { | 1992 | if (erp->status == DASD_CQR_NEED_ERP) |
2103 | |||
2104 | erp->status = DASD_CQR_FAILED; | 1993 | erp->status = DASD_CQR_FAILED; |
2105 | } | ||
2106 | 1994 | ||
2107 | return erp; | 1995 | return erp; |
2108 | 1996 | ||
@@ -2127,7 +2015,7 @@ static struct dasd_ccw_req * | |||
2127 | dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) | 2015 | dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) |
2128 | { | 2016 | { |
2129 | 2017 | ||
2130 | struct dasd_device *device = erp->device; | 2018 | struct dasd_device *device = erp->startdev; |
2131 | 2019 | ||
2132 | erp->function = dasd_3990_erp_inspect_32; | 2020 | erp->function = dasd_3990_erp_inspect_32; |
2133 | 2021 | ||
@@ -2149,8 +2037,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) | |||
2149 | 2037 | ||
2150 | case 0x01: /* fatal error */ | 2038 | case 0x01: /* fatal error */ |
2151 | DEV_MESSAGE(KERN_ERR, device, "%s", | 2039 | DEV_MESSAGE(KERN_ERR, device, "%s", |
2152 | "Fatal error should have been " | 2040 | "Retry not recommended - Fatal error"); |
2153 | "handled within the interrupt handler"); | ||
2154 | 2041 | ||
2155 | erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); | 2042 | erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); |
2156 | break; | 2043 | break; |
@@ -2253,6 +2140,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp) | |||
2253 | /* already set up new ERP ! */ | 2140 | /* already set up new ERP ! */ |
2254 | char *sense = erp->refers->irb.ecw; | 2141 | char *sense = erp->refers->irb.ecw; |
2255 | 2142 | ||
2143 | /* if this problem occured on an alias retry on base */ | ||
2144 | erp_new = dasd_3990_erp_inspect_alias(erp); | ||
2145 | if (erp_new) | ||
2146 | return erp_new; | ||
2147 | |||
2256 | /* distinguish between 24 and 32 byte sense data */ | 2148 | /* distinguish between 24 and 32 byte sense data */ |
2257 | if (sense[27] & DASD_SENSE_BIT_0) { | 2149 | if (sense[27] & DASD_SENSE_BIT_0) { |
2258 | 2150 | ||
@@ -2287,13 +2179,13 @@ static struct dasd_ccw_req * | |||
2287 | dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) | 2179 | dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) |
2288 | { | 2180 | { |
2289 | 2181 | ||
2290 | struct dasd_device *device = cqr->device; | 2182 | struct dasd_device *device = cqr->startdev; |
2291 | struct ccw1 *ccw; | 2183 | struct ccw1 *ccw; |
2292 | 2184 | ||
2293 | /* allocate additional request block */ | 2185 | /* allocate additional request block */ |
2294 | struct dasd_ccw_req *erp; | 2186 | struct dasd_ccw_req *erp; |
2295 | 2187 | ||
2296 | erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device); | 2188 | erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device); |
2297 | if (IS_ERR(erp)) { | 2189 | if (IS_ERR(erp)) { |
2298 | if (cqr->retries <= 0) { | 2190 | if (cqr->retries <= 0) { |
2299 | DEV_MESSAGE(KERN_ERR, device, "%s", | 2191 | DEV_MESSAGE(KERN_ERR, device, "%s", |
@@ -2305,7 +2197,7 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) | |||
2305 | "Unable to allocate ERP request " | 2197 | "Unable to allocate ERP request " |
2306 | "(%i retries left)", | 2198 | "(%i retries left)", |
2307 | cqr->retries); | 2199 | cqr->retries); |
2308 | dasd_set_timer(device, (HZ << 3)); | 2200 | dasd_block_set_timer(device->block, (HZ << 3)); |
2309 | } | 2201 | } |
2310 | return cqr; | 2202 | return cqr; |
2311 | } | 2203 | } |
@@ -2319,7 +2211,9 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) | |||
2319 | ccw->cda = (long)(cqr->cpaddr); | 2211 | ccw->cda = (long)(cqr->cpaddr); |
2320 | erp->function = dasd_3990_erp_add_erp; | 2212 | erp->function = dasd_3990_erp_add_erp; |
2321 | erp->refers = cqr; | 2213 | erp->refers = cqr; |
2322 | erp->device = cqr->device; | 2214 | erp->startdev = device; |
2215 | erp->memdev = device; | ||
2216 | erp->block = cqr->block; | ||
2323 | erp->magic = cqr->magic; | 2217 | erp->magic = cqr->magic; |
2324 | erp->expires = 0; | 2218 | erp->expires = 0; |
2325 | erp->retries = 256; | 2219 | erp->retries = 256; |
@@ -2466,7 +2360,7 @@ static struct dasd_ccw_req * | |||
2466 | dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) | 2360 | dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) |
2467 | { | 2361 | { |
2468 | 2362 | ||
2469 | struct dasd_device *device = erp->device; | 2363 | struct dasd_device *device = erp->startdev; |
2470 | char *sense = erp->irb.ecw; | 2364 | char *sense = erp->irb.ecw; |
2471 | 2365 | ||
2472 | /* check for 24 byte sense ERP */ | 2366 | /* check for 24 byte sense ERP */ |
@@ -2557,7 +2451,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, | |||
2557 | struct dasd_ccw_req *erp) | 2451 | struct dasd_ccw_req *erp) |
2558 | { | 2452 | { |
2559 | 2453 | ||
2560 | struct dasd_device *device = erp_head->device; | 2454 | struct dasd_device *device = erp_head->startdev; |
2561 | struct dasd_ccw_req *erp_done = erp_head; /* finished req */ | 2455 | struct dasd_ccw_req *erp_done = erp_head; /* finished req */ |
2562 | struct dasd_ccw_req *erp_free = NULL; /* req to be freed */ | 2456 | struct dasd_ccw_req *erp_free = NULL; /* req to be freed */ |
2563 | 2457 | ||
@@ -2569,13 +2463,13 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, | |||
2569 | "original request was lost\n"); | 2463 | "original request was lost\n"); |
2570 | 2464 | ||
2571 | /* remove the request from the device queue */ | 2465 | /* remove the request from the device queue */ |
2572 | list_del(&erp_done->list); | 2466 | list_del(&erp_done->blocklist); |
2573 | 2467 | ||
2574 | erp_free = erp_done; | 2468 | erp_free = erp_done; |
2575 | erp_done = erp_done->refers; | 2469 | erp_done = erp_done->refers; |
2576 | 2470 | ||
2577 | /* free the finished erp request */ | 2471 | /* free the finished erp request */ |
2578 | dasd_free_erp_request(erp_free, erp_free->device); | 2472 | dasd_free_erp_request(erp_free, erp_free->memdev); |
2579 | 2473 | ||
2580 | } /* end while */ | 2474 | } /* end while */ |
2581 | 2475 | ||
@@ -2603,7 +2497,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, | |||
2603 | erp->retries, erp); | 2497 | erp->retries, erp); |
2604 | 2498 | ||
2605 | /* handle the request again... */ | 2499 | /* handle the request again... */ |
2606 | erp->status = DASD_CQR_QUEUED; | 2500 | erp->status = DASD_CQR_FILLED; |
2607 | } | 2501 | } |
2608 | 2502 | ||
2609 | } else { | 2503 | } else { |
@@ -2620,7 +2514,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, | |||
2620 | * DASD_3990_ERP_ACTION | 2514 | * DASD_3990_ERP_ACTION |
2621 | * | 2515 | * |
2622 | * DESCRIPTION | 2516 | * DESCRIPTION |
2623 | * controll routine for 3990 erp actions. | 2517 | * control routine for 3990 erp actions. |
2624 | * Has to be called with the queue lock (namely the s390_irq_lock) acquired. | 2518 | * Has to be called with the queue lock (namely the s390_irq_lock) acquired. |
2625 | * | 2519 | * |
2626 | * PARAMETER | 2520 | * PARAMETER |
@@ -2636,9 +2530,8 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head, | |||
2636 | struct dasd_ccw_req * | 2530 | struct dasd_ccw_req * |
2637 | dasd_3990_erp_action(struct dasd_ccw_req * cqr) | 2531 | dasd_3990_erp_action(struct dasd_ccw_req * cqr) |
2638 | { | 2532 | { |
2639 | |||
2640 | struct dasd_ccw_req *erp = NULL; | 2533 | struct dasd_ccw_req *erp = NULL; |
2641 | struct dasd_device *device = cqr->device; | 2534 | struct dasd_device *device = cqr->startdev; |
2642 | struct dasd_ccw_req *temp_erp = NULL; | 2535 | struct dasd_ccw_req *temp_erp = NULL; |
2643 | 2536 | ||
2644 | if (device->features & DASD_FEATURE_ERPLOG) { | 2537 | if (device->features & DASD_FEATURE_ERPLOG) { |
@@ -2704,10 +2597,11 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2704 | } | 2597 | } |
2705 | } | 2598 | } |
2706 | 2599 | ||
2707 | /* enqueue added ERP request */ | 2600 | /* enqueue ERP request if it's a new one */ |
2708 | if (erp->status == DASD_CQR_FILLED) { | 2601 | if (list_empty(&erp->blocklist)) { |
2709 | erp->status = DASD_CQR_QUEUED; | 2602 | cqr->status = DASD_CQR_IN_ERP; |
2710 | list_add(&erp->list, &device->ccw_queue); | 2603 | /* add erp request before the cqr */ |
2604 | list_add_tail(&erp->blocklist, &cqr->blocklist); | ||
2711 | } | 2605 | } |
2712 | 2606 | ||
2713 | return erp; | 2607 | return erp; |
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c deleted file mode 100644 index 6e082688475a..000000000000 --- a/drivers/s390/block/dasd_9336_erp.c +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * File...........: linux/drivers/s390/block/dasd_9336_erp.c | ||
3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | ||
4 | * Bugreports.to..: <Linux390@de.ibm.com> | ||
5 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #define PRINTK_HEADER "dasd_erp(9336)" | ||
10 | |||
11 | #include "dasd_int.h" | ||
12 | |||
13 | |||
14 | /* | ||
15 | * DASD_9336_ERP_EXAMINE | ||
16 | * | ||
17 | * DESCRIPTION | ||
18 | * Checks only for fatal/no/recover error. | ||
19 | * A detailed examination of the sense data is done later outside | ||
20 | * the interrupt handler. | ||
21 | * | ||
22 | * The logic is based on the 'IBM 3880 Storage Control Reference' manual | ||
23 | * 'Chapter 7. 9336 Sense Data'. | ||
24 | * | ||
25 | * RETURN VALUES | ||
26 | * dasd_era_none no error | ||
27 | * dasd_era_fatal for all fatal (unrecoverable errors) | ||
28 | * dasd_era_recover for all others. | ||
29 | */ | ||
30 | dasd_era_t | ||
31 | dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | ||
32 | { | ||
33 | /* check for successful execution first */ | ||
34 | if (irb->scsw.cstat == 0x00 && | ||
35 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
36 | return dasd_era_none; | ||
37 | |||
38 | /* examine the 24 byte sense data */ | ||
39 | return dasd_era_recover; | ||
40 | |||
41 | } /* END dasd_9336_erp_examine */ | ||
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c deleted file mode 100644 index ddecb9808ed4..000000000000 --- a/drivers/s390/block/dasd_9343_erp.c +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | /* | ||
2 | * File...........: linux/drivers/s390/block/dasd_9345_erp.c | ||
3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | ||
4 | * Bugreports.to..: <Linux390@de.ibm.com> | ||
5 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000 | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #define PRINTK_HEADER "dasd_erp(9343)" | ||
10 | |||
11 | #include "dasd_int.h" | ||
12 | |||
13 | dasd_era_t | ||
14 | dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | ||
15 | { | ||
16 | if (irb->scsw.cstat == 0x00 && | ||
17 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
18 | return dasd_era_none; | ||
19 | |||
20 | return dasd_era_recover; | ||
21 | } | ||
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c new file mode 100644 index 000000000000..3a40bee9d358 --- /dev/null +++ b/drivers/s390/block/dasd_alias.c | |||
@@ -0,0 +1,903 @@ | |||
1 | /* | ||
2 | * PAV alias management for the DASD ECKD discipline | ||
3 | * | ||
4 | * Copyright IBM Corporation, 2007 | ||
5 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/list.h> | ||
9 | #include <asm/ebcdic.h> | ||
10 | #include "dasd_int.h" | ||
11 | #include "dasd_eckd.h" | ||
12 | |||
13 | #ifdef PRINTK_HEADER | ||
14 | #undef PRINTK_HEADER | ||
15 | #endif /* PRINTK_HEADER */ | ||
16 | #define PRINTK_HEADER "dasd(eckd):" | ||
17 | |||
18 | |||
19 | /* | ||
20 | * General concept of alias management: | ||
21 | * - PAV and DASD alias management is specific to the eckd discipline. | ||
22 | * - A device is connected to an lcu as long as the device exists. | ||
23 | * dasd_alias_make_device_known_to_lcu will be called wenn the | ||
24 | * device is checked by the eckd discipline and | ||
25 | * dasd_alias_disconnect_device_from_lcu will be called | ||
26 | * before the device is deleted. | ||
27 | * - The dasd_alias_add_device / dasd_alias_remove_device | ||
28 | * functions mark the point when a device is 'ready for service'. | ||
29 | * - A summary unit check is a rare occasion, but it is mandatory to | ||
30 | * support it. It requires some complex recovery actions before the | ||
31 | * devices can be used again (see dasd_alias_handle_summary_unit_check). | ||
32 | * - dasd_alias_get_start_dev will find an alias device that can be used | ||
33 | * instead of the base device and does some (very simple) load balancing. | ||
34 | * This is the function that gets called for each I/O, so when improving | ||
35 | * something, this function should get faster or better, the rest has just | ||
36 | * to be correct. | ||
37 | */ | ||
38 | |||
39 | |||
40 | static void summary_unit_check_handling_work(struct work_struct *); | ||
41 | static void lcu_update_work(struct work_struct *); | ||
42 | static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *); | ||
43 | |||
44 | static struct alias_root aliastree = { | ||
45 | .serverlist = LIST_HEAD_INIT(aliastree.serverlist), | ||
46 | .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock), | ||
47 | }; | ||
48 | |||
49 | static struct alias_server *_find_server(struct dasd_uid *uid) | ||
50 | { | ||
51 | struct alias_server *pos; | ||
52 | list_for_each_entry(pos, &aliastree.serverlist, server) { | ||
53 | if (!strncmp(pos->uid.vendor, uid->vendor, | ||
54 | sizeof(uid->vendor)) | ||
55 | && !strncmp(pos->uid.serial, uid->serial, | ||
56 | sizeof(uid->serial))) | ||
57 | return pos; | ||
58 | }; | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | static struct alias_lcu *_find_lcu(struct alias_server *server, | ||
63 | struct dasd_uid *uid) | ||
64 | { | ||
65 | struct alias_lcu *pos; | ||
66 | list_for_each_entry(pos, &server->lculist, lcu) { | ||
67 | if (pos->uid.ssid == uid->ssid) | ||
68 | return pos; | ||
69 | }; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static struct alias_pav_group *_find_group(struct alias_lcu *lcu, | ||
74 | struct dasd_uid *uid) | ||
75 | { | ||
76 | struct alias_pav_group *pos; | ||
77 | __u8 search_unit_addr; | ||
78 | |||
79 | /* for hyper pav there is only one group */ | ||
80 | if (lcu->pav == HYPER_PAV) { | ||
81 | if (list_empty(&lcu->grouplist)) | ||
82 | return NULL; | ||
83 | else | ||
84 | return list_first_entry(&lcu->grouplist, | ||
85 | struct alias_pav_group, group); | ||
86 | } | ||
87 | |||
88 | /* for base pav we have to find the group that matches the base */ | ||
89 | if (uid->type == UA_BASE_DEVICE) | ||
90 | search_unit_addr = uid->real_unit_addr; | ||
91 | else | ||
92 | search_unit_addr = uid->base_unit_addr; | ||
93 | list_for_each_entry(pos, &lcu->grouplist, group) { | ||
94 | if (pos->uid.base_unit_addr == search_unit_addr) | ||
95 | return pos; | ||
96 | }; | ||
97 | return NULL; | ||
98 | } | ||
99 | |||
100 | static struct alias_server *_allocate_server(struct dasd_uid *uid) | ||
101 | { | ||
102 | struct alias_server *server; | ||
103 | |||
104 | server = kzalloc(sizeof(*server), GFP_KERNEL); | ||
105 | if (!server) | ||
106 | return ERR_PTR(-ENOMEM); | ||
107 | memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor)); | ||
108 | memcpy(server->uid.serial, uid->serial, sizeof(uid->serial)); | ||
109 | INIT_LIST_HEAD(&server->server); | ||
110 | INIT_LIST_HEAD(&server->lculist); | ||
111 | return server; | ||
112 | } | ||
113 | |||
114 | static void _free_server(struct alias_server *server) | ||
115 | { | ||
116 | kfree(server); | ||
117 | } | ||
118 | |||
119 | static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid) | ||
120 | { | ||
121 | struct alias_lcu *lcu; | ||
122 | |||
123 | lcu = kzalloc(sizeof(*lcu), GFP_KERNEL); | ||
124 | if (!lcu) | ||
125 | return ERR_PTR(-ENOMEM); | ||
126 | lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA); | ||
127 | if (!lcu->uac) | ||
128 | goto out_err1; | ||
129 | lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA); | ||
130 | if (!lcu->rsu_cqr) | ||
131 | goto out_err2; | ||
132 | lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1), | ||
133 | GFP_KERNEL | GFP_DMA); | ||
134 | if (!lcu->rsu_cqr->cpaddr) | ||
135 | goto out_err3; | ||
136 | lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA); | ||
137 | if (!lcu->rsu_cqr->data) | ||
138 | goto out_err4; | ||
139 | |||
140 | memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor)); | ||
141 | memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial)); | ||
142 | lcu->uid.ssid = uid->ssid; | ||
143 | lcu->pav = NO_PAV; | ||
144 | lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING; | ||
145 | INIT_LIST_HEAD(&lcu->lcu); | ||
146 | INIT_LIST_HEAD(&lcu->inactive_devices); | ||
147 | INIT_LIST_HEAD(&lcu->active_devices); | ||
148 | INIT_LIST_HEAD(&lcu->grouplist); | ||
149 | INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); | ||
150 | INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); | ||
151 | spin_lock_init(&lcu->lock); | ||
152 | return lcu; | ||
153 | |||
154 | out_err4: | ||
155 | kfree(lcu->rsu_cqr->cpaddr); | ||
156 | out_err3: | ||
157 | kfree(lcu->rsu_cqr); | ||
158 | out_err2: | ||
159 | kfree(lcu->uac); | ||
160 | out_err1: | ||
161 | kfree(lcu); | ||
162 | return ERR_PTR(-ENOMEM); | ||
163 | } | ||
164 | |||
165 | static void _free_lcu(struct alias_lcu *lcu) | ||
166 | { | ||
167 | kfree(lcu->rsu_cqr->data); | ||
168 | kfree(lcu->rsu_cqr->cpaddr); | ||
169 | kfree(lcu->rsu_cqr); | ||
170 | kfree(lcu->uac); | ||
171 | kfree(lcu); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * This is the function that will allocate all the server and lcu data, | ||
176 | * so this function must be called first for a new device. | ||
177 | * If the return value is 1, the lcu was already known before, if it | ||
178 | * is 0, this is a new lcu. | ||
179 | * Negative return code indicates that something went wrong (e.g. -ENOMEM) | ||
180 | */ | ||
181 | int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) | ||
182 | { | ||
183 | struct dasd_eckd_private *private; | ||
184 | unsigned long flags; | ||
185 | struct alias_server *server, *newserver; | ||
186 | struct alias_lcu *lcu, *newlcu; | ||
187 | int is_lcu_known; | ||
188 | struct dasd_uid *uid; | ||
189 | |||
190 | private = (struct dasd_eckd_private *) device->private; | ||
191 | uid = &private->uid; | ||
192 | spin_lock_irqsave(&aliastree.lock, flags); | ||
193 | is_lcu_known = 1; | ||
194 | server = _find_server(uid); | ||
195 | if (!server) { | ||
196 | spin_unlock_irqrestore(&aliastree.lock, flags); | ||
197 | newserver = _allocate_server(uid); | ||
198 | if (IS_ERR(newserver)) | ||
199 | return PTR_ERR(newserver); | ||
200 | spin_lock_irqsave(&aliastree.lock, flags); | ||
201 | server = _find_server(uid); | ||
202 | if (!server) { | ||
203 | list_add(&newserver->server, &aliastree.serverlist); | ||
204 | server = newserver; | ||
205 | is_lcu_known = 0; | ||
206 | } else { | ||
207 | /* someone was faster */ | ||
208 | _free_server(newserver); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | lcu = _find_lcu(server, uid); | ||
213 | if (!lcu) { | ||
214 | spin_unlock_irqrestore(&aliastree.lock, flags); | ||
215 | newlcu = _allocate_lcu(uid); | ||
216 | if (IS_ERR(newlcu)) | ||
217 | return PTR_ERR(lcu); | ||
218 | spin_lock_irqsave(&aliastree.lock, flags); | ||
219 | lcu = _find_lcu(server, uid); | ||
220 | if (!lcu) { | ||
221 | list_add(&newlcu->lcu, &server->lculist); | ||
222 | lcu = newlcu; | ||
223 | is_lcu_known = 0; | ||
224 | } else { | ||
225 | /* someone was faster */ | ||
226 | _free_lcu(newlcu); | ||
227 | } | ||
228 | is_lcu_known = 0; | ||
229 | } | ||
230 | spin_lock(&lcu->lock); | ||
231 | list_add(&device->alias_list, &lcu->inactive_devices); | ||
232 | private->lcu = lcu; | ||
233 | spin_unlock(&lcu->lock); | ||
234 | spin_unlock_irqrestore(&aliastree.lock, flags); | ||
235 | |||
236 | return is_lcu_known; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * This function removes a device from the scope of alias management. | ||
241 | * The complicated part is to make sure that it is not in use by | ||
242 | * any of the workers. If necessary cancel the work. | ||
243 | */ | ||
244 | void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) | ||
245 | { | ||
246 | struct dasd_eckd_private *private; | ||
247 | unsigned long flags; | ||
248 | struct alias_lcu *lcu; | ||
249 | struct alias_server *server; | ||
250 | int was_pending; | ||
251 | |||
252 | private = (struct dasd_eckd_private *) device->private; | ||
253 | lcu = private->lcu; | ||
254 | spin_lock_irqsave(&lcu->lock, flags); | ||
255 | list_del_init(&device->alias_list); | ||
256 | /* make sure that the workers don't use this device */ | ||
257 | if (device == lcu->suc_data.device) { | ||
258 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
259 | cancel_work_sync(&lcu->suc_data.worker); | ||
260 | spin_lock_irqsave(&lcu->lock, flags); | ||
261 | if (device == lcu->suc_data.device) | ||
262 | lcu->suc_data.device = NULL; | ||
263 | } | ||
264 | was_pending = 0; | ||
265 | if (device == lcu->ruac_data.device) { | ||
266 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
267 | was_pending = 1; | ||
268 | cancel_delayed_work_sync(&lcu->ruac_data.dwork); | ||
269 | spin_lock_irqsave(&lcu->lock, flags); | ||
270 | if (device == lcu->ruac_data.device) | ||
271 | lcu->ruac_data.device = NULL; | ||
272 | } | ||
273 | private->lcu = NULL; | ||
274 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
275 | |||
276 | spin_lock_irqsave(&aliastree.lock, flags); | ||
277 | spin_lock(&lcu->lock); | ||
278 | if (list_empty(&lcu->grouplist) && | ||
279 | list_empty(&lcu->active_devices) && | ||
280 | list_empty(&lcu->inactive_devices)) { | ||
281 | list_del(&lcu->lcu); | ||
282 | spin_unlock(&lcu->lock); | ||
283 | _free_lcu(lcu); | ||
284 | lcu = NULL; | ||
285 | } else { | ||
286 | if (was_pending) | ||
287 | _schedule_lcu_update(lcu, NULL); | ||
288 | spin_unlock(&lcu->lock); | ||
289 | } | ||
290 | server = _find_server(&private->uid); | ||
291 | if (server && list_empty(&server->lculist)) { | ||
292 | list_del(&server->server); | ||
293 | _free_server(server); | ||
294 | } | ||
295 | spin_unlock_irqrestore(&aliastree.lock, flags); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * This function assumes that the unit address configuration stored | ||
300 | * in the lcu is up to date and will update the device uid before | ||
301 | * adding it to a pav group. | ||
302 | */ | ||
303 | static int _add_device_to_lcu(struct alias_lcu *lcu, | ||
304 | struct dasd_device *device) | ||
305 | { | ||
306 | |||
307 | struct dasd_eckd_private *private; | ||
308 | struct alias_pav_group *group; | ||
309 | struct dasd_uid *uid; | ||
310 | |||
311 | private = (struct dasd_eckd_private *) device->private; | ||
312 | uid = &private->uid; | ||
313 | uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type; | ||
314 | uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua; | ||
315 | dasd_set_uid(device->cdev, &private->uid); | ||
316 | |||
317 | /* if we have no PAV anyway, we don't need to bother with PAV groups */ | ||
318 | if (lcu->pav == NO_PAV) { | ||
319 | list_move(&device->alias_list, &lcu->active_devices); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | group = _find_group(lcu, uid); | ||
324 | if (!group) { | ||
325 | group = kzalloc(sizeof(*group), GFP_ATOMIC); | ||
326 | if (!group) | ||
327 | return -ENOMEM; | ||
328 | memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor)); | ||
329 | memcpy(group->uid.serial, uid->serial, sizeof(uid->serial)); | ||
330 | group->uid.ssid = uid->ssid; | ||
331 | if (uid->type == UA_BASE_DEVICE) | ||
332 | group->uid.base_unit_addr = uid->real_unit_addr; | ||
333 | else | ||
334 | group->uid.base_unit_addr = uid->base_unit_addr; | ||
335 | INIT_LIST_HEAD(&group->group); | ||
336 | INIT_LIST_HEAD(&group->baselist); | ||
337 | INIT_LIST_HEAD(&group->aliaslist); | ||
338 | list_add(&group->group, &lcu->grouplist); | ||
339 | } | ||
340 | if (uid->type == UA_BASE_DEVICE) | ||
341 | list_move(&device->alias_list, &group->baselist); | ||
342 | else | ||
343 | list_move(&device->alias_list, &group->aliaslist); | ||
344 | private->pavgroup = group; | ||
345 | return 0; | ||
346 | }; | ||
347 | |||
348 | static void _remove_device_from_lcu(struct alias_lcu *lcu, | ||
349 | struct dasd_device *device) | ||
350 | { | ||
351 | struct dasd_eckd_private *private; | ||
352 | struct alias_pav_group *group; | ||
353 | |||
354 | private = (struct dasd_eckd_private *) device->private; | ||
355 | list_move(&device->alias_list, &lcu->inactive_devices); | ||
356 | group = private->pavgroup; | ||
357 | if (!group) | ||
358 | return; | ||
359 | private->pavgroup = NULL; | ||
360 | if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) { | ||
361 | list_del(&group->group); | ||
362 | kfree(group); | ||
363 | return; | ||
364 | } | ||
365 | if (group->next == device) | ||
366 | group->next = NULL; | ||
367 | }; | ||
368 | |||
369 | static int read_unit_address_configuration(struct dasd_device *device, | ||
370 | struct alias_lcu *lcu) | ||
371 | { | ||
372 | struct dasd_psf_prssd_data *prssdp; | ||
373 | struct dasd_ccw_req *cqr; | ||
374 | struct ccw1 *ccw; | ||
375 | int rc; | ||
376 | unsigned long flags; | ||
377 | |||
378 | cqr = dasd_kmalloc_request("ECKD", | ||
379 | 1 /* PSF */ + 1 /* RSSD */ , | ||
380 | (sizeof(struct dasd_psf_prssd_data)), | ||
381 | device); | ||
382 | if (IS_ERR(cqr)) | ||
383 | return PTR_ERR(cqr); | ||
384 | cqr->startdev = device; | ||
385 | cqr->memdev = device; | ||
386 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
387 | cqr->retries = 10; | ||
388 | cqr->expires = 20 * HZ; | ||
389 | |||
390 | /* Prepare for Read Subsystem Data */ | ||
391 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | ||
392 | memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); | ||
393 | prssdp->order = PSF_ORDER_PRSSD; | ||
394 | prssdp->suborder = 0x0e; /* Read unit address configuration */ | ||
395 | /* all other bytes of prssdp must be zero */ | ||
396 | |||
397 | ccw = cqr->cpaddr; | ||
398 | ccw->cmd_code = DASD_ECKD_CCW_PSF; | ||
399 | ccw->count = sizeof(struct dasd_psf_prssd_data); | ||
400 | ccw->flags |= CCW_FLAG_CC; | ||
401 | ccw->cda = (__u32)(addr_t) prssdp; | ||
402 | |||
403 | /* Read Subsystem Data - feature codes */ | ||
404 | memset(lcu->uac, 0, sizeof(*(lcu->uac))); | ||
405 | |||
406 | ccw++; | ||
407 | ccw->cmd_code = DASD_ECKD_CCW_RSSD; | ||
408 | ccw->count = sizeof(*(lcu->uac)); | ||
409 | ccw->cda = (__u32)(addr_t) lcu->uac; | ||
410 | |||
411 | cqr->buildclk = get_clock(); | ||
412 | cqr->status = DASD_CQR_FILLED; | ||
413 | |||
414 | /* need to unset flag here to detect race with summary unit check */ | ||
415 | spin_lock_irqsave(&lcu->lock, flags); | ||
416 | lcu->flags &= ~NEED_UAC_UPDATE; | ||
417 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
418 | |||
419 | do { | ||
420 | rc = dasd_sleep_on(cqr); | ||
421 | } while (rc && (cqr->retries > 0)); | ||
422 | if (rc) { | ||
423 | spin_lock_irqsave(&lcu->lock, flags); | ||
424 | lcu->flags |= NEED_UAC_UPDATE; | ||
425 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
426 | } | ||
427 | dasd_kfree_request(cqr, cqr->memdev); | ||
428 | return rc; | ||
429 | } | ||
430 | |||
431 | static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) | ||
432 | { | ||
433 | unsigned long flags; | ||
434 | struct alias_pav_group *pavgroup, *tempgroup; | ||
435 | struct dasd_device *device, *tempdev; | ||
436 | int i, rc; | ||
437 | struct dasd_eckd_private *private; | ||
438 | |||
439 | spin_lock_irqsave(&lcu->lock, flags); | ||
440 | list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) { | ||
441 | list_for_each_entry_safe(device, tempdev, &pavgroup->baselist, | ||
442 | alias_list) { | ||
443 | list_move(&device->alias_list, &lcu->active_devices); | ||
444 | private = (struct dasd_eckd_private *) device->private; | ||
445 | private->pavgroup = NULL; | ||
446 | } | ||
447 | list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist, | ||
448 | alias_list) { | ||
449 | list_move(&device->alias_list, &lcu->active_devices); | ||
450 | private = (struct dasd_eckd_private *) device->private; | ||
451 | private->pavgroup = NULL; | ||
452 | } | ||
453 | list_del(&pavgroup->group); | ||
454 | kfree(pavgroup); | ||
455 | } | ||
456 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
457 | |||
458 | rc = read_unit_address_configuration(refdev, lcu); | ||
459 | if (rc) | ||
460 | return rc; | ||
461 | |||
462 | spin_lock_irqsave(&lcu->lock, flags); | ||
463 | lcu->pav = NO_PAV; | ||
464 | for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { | ||
465 | switch (lcu->uac->unit[i].ua_type) { | ||
466 | case UA_BASE_PAV_ALIAS: | ||
467 | lcu->pav = BASE_PAV; | ||
468 | break; | ||
469 | case UA_HYPER_PAV_ALIAS: | ||
470 | lcu->pav = HYPER_PAV; | ||
471 | break; | ||
472 | } | ||
473 | if (lcu->pav != NO_PAV) | ||
474 | break; | ||
475 | } | ||
476 | |||
477 | list_for_each_entry_safe(device, tempdev, &lcu->active_devices, | ||
478 | alias_list) { | ||
479 | _add_device_to_lcu(lcu, device); | ||
480 | } | ||
481 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static void lcu_update_work(struct work_struct *work) | ||
486 | { | ||
487 | struct alias_lcu *lcu; | ||
488 | struct read_uac_work_data *ruac_data; | ||
489 | struct dasd_device *device; | ||
490 | unsigned long flags; | ||
491 | int rc; | ||
492 | |||
493 | ruac_data = container_of(work, struct read_uac_work_data, dwork.work); | ||
494 | lcu = container_of(ruac_data, struct alias_lcu, ruac_data); | ||
495 | device = ruac_data->device; | ||
496 | rc = _lcu_update(device, lcu); | ||
497 | /* | ||
498 | * Need to check flags again, as there could have been another | ||
499 | * prepare_update or a new device a new device while we were still | ||
500 | * processing the data | ||
501 | */ | ||
502 | spin_lock_irqsave(&lcu->lock, flags); | ||
503 | if (rc || (lcu->flags & NEED_UAC_UPDATE)) { | ||
504 | DEV_MESSAGE(KERN_WARNING, device, "could not update" | ||
505 | " alias data in lcu (rc = %d), retry later", rc); | ||
506 | schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); | ||
507 | } else { | ||
508 | lcu->ruac_data.device = NULL; | ||
509 | lcu->flags &= ~UPDATE_PENDING; | ||
510 | } | ||
511 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
512 | } | ||
513 | |||
514 | static int _schedule_lcu_update(struct alias_lcu *lcu, | ||
515 | struct dasd_device *device) | ||
516 | { | ||
517 | struct dasd_device *usedev = NULL; | ||
518 | struct alias_pav_group *group; | ||
519 | |||
520 | lcu->flags |= NEED_UAC_UPDATE; | ||
521 | if (lcu->ruac_data.device) { | ||
522 | /* already scheduled or running */ | ||
523 | return 0; | ||
524 | } | ||
525 | if (device && !list_empty(&device->alias_list)) | ||
526 | usedev = device; | ||
527 | |||
528 | if (!usedev && !list_empty(&lcu->grouplist)) { | ||
529 | group = list_first_entry(&lcu->grouplist, | ||
530 | struct alias_pav_group, group); | ||
531 | if (!list_empty(&group->baselist)) | ||
532 | usedev = list_first_entry(&group->baselist, | ||
533 | struct dasd_device, | ||
534 | alias_list); | ||
535 | else if (!list_empty(&group->aliaslist)) | ||
536 | usedev = list_first_entry(&group->aliaslist, | ||
537 | struct dasd_device, | ||
538 | alias_list); | ||
539 | } | ||
540 | if (!usedev && !list_empty(&lcu->active_devices)) { | ||
541 | usedev = list_first_entry(&lcu->active_devices, | ||
542 | struct dasd_device, alias_list); | ||
543 | } | ||
544 | /* | ||
545 | * if we haven't found a proper device yet, give up for now, the next | ||
546 | * device that will be set active will trigger an lcu update | ||
547 | */ | ||
548 | if (!usedev) | ||
549 | return -EINVAL; | ||
550 | lcu->ruac_data.device = usedev; | ||
551 | schedule_delayed_work(&lcu->ruac_data.dwork, 0); | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | int dasd_alias_add_device(struct dasd_device *device) | ||
556 | { | ||
557 | struct dasd_eckd_private *private; | ||
558 | struct alias_lcu *lcu; | ||
559 | unsigned long flags; | ||
560 | int rc; | ||
561 | |||
562 | private = (struct dasd_eckd_private *) device->private; | ||
563 | lcu = private->lcu; | ||
564 | rc = 0; | ||
565 | spin_lock_irqsave(&lcu->lock, flags); | ||
566 | if (!(lcu->flags & UPDATE_PENDING)) { | ||
567 | rc = _add_device_to_lcu(lcu, device); | ||
568 | if (rc) | ||
569 | lcu->flags |= UPDATE_PENDING; | ||
570 | } | ||
571 | if (lcu->flags & UPDATE_PENDING) { | ||
572 | list_move(&device->alias_list, &lcu->active_devices); | ||
573 | _schedule_lcu_update(lcu, device); | ||
574 | } | ||
575 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
576 | return rc; | ||
577 | } | ||
578 | |||
579 | int dasd_alias_remove_device(struct dasd_device *device) | ||
580 | { | ||
581 | struct dasd_eckd_private *private; | ||
582 | struct alias_lcu *lcu; | ||
583 | unsigned long flags; | ||
584 | |||
585 | private = (struct dasd_eckd_private *) device->private; | ||
586 | lcu = private->lcu; | ||
587 | spin_lock_irqsave(&lcu->lock, flags); | ||
588 | _remove_device_from_lcu(lcu, device); | ||
589 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) | ||
594 | { | ||
595 | |||
596 | struct dasd_device *alias_device; | ||
597 | struct alias_pav_group *group; | ||
598 | struct alias_lcu *lcu; | ||
599 | struct dasd_eckd_private *private, *alias_priv; | ||
600 | unsigned long flags; | ||
601 | |||
602 | private = (struct dasd_eckd_private *) base_device->private; | ||
603 | group = private->pavgroup; | ||
604 | lcu = private->lcu; | ||
605 | if (!group || !lcu) | ||
606 | return NULL; | ||
607 | if (lcu->pav == NO_PAV || | ||
608 | lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) | ||
609 | return NULL; | ||
610 | |||
611 | spin_lock_irqsave(&lcu->lock, flags); | ||
612 | alias_device = group->next; | ||
613 | if (!alias_device) { | ||
614 | if (list_empty(&group->aliaslist)) { | ||
615 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
616 | return NULL; | ||
617 | } else { | ||
618 | alias_device = list_first_entry(&group->aliaslist, | ||
619 | struct dasd_device, | ||
620 | alias_list); | ||
621 | } | ||
622 | } | ||
623 | if (list_is_last(&alias_device->alias_list, &group->aliaslist)) | ||
624 | group->next = list_first_entry(&group->aliaslist, | ||
625 | struct dasd_device, alias_list); | ||
626 | else | ||
627 | group->next = list_first_entry(&alias_device->alias_list, | ||
628 | struct dasd_device, alias_list); | ||
629 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
630 | alias_priv = (struct dasd_eckd_private *) alias_device->private; | ||
631 | if ((alias_priv->count < private->count) && !alias_device->stopped) | ||
632 | return alias_device; | ||
633 | else | ||
634 | return NULL; | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * Summary unit check handling depends on the way alias devices | ||
639 | * are handled so it is done here rather then in dasd_eckd.c | ||
640 | */ | ||
641 | static int reset_summary_unit_check(struct alias_lcu *lcu, | ||
642 | struct dasd_device *device, | ||
643 | char reason) | ||
644 | { | ||
645 | struct dasd_ccw_req *cqr; | ||
646 | int rc = 0; | ||
647 | |||
648 | cqr = lcu->rsu_cqr; | ||
649 | strncpy((char *) &cqr->magic, "ECKD", 4); | ||
650 | ASCEBC((char *) &cqr->magic, 4); | ||
651 | cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK; | ||
652 | cqr->cpaddr->flags = 0 ; | ||
653 | cqr->cpaddr->count = 16; | ||
654 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | ||
655 | ((char *)cqr->data)[0] = reason; | ||
656 | |||
657 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
658 | cqr->retries = 255; /* set retry counter to enable basic ERP */ | ||
659 | cqr->startdev = device; | ||
660 | cqr->memdev = device; | ||
661 | cqr->block = NULL; | ||
662 | cqr->expires = 5 * HZ; | ||
663 | cqr->buildclk = get_clock(); | ||
664 | cqr->status = DASD_CQR_FILLED; | ||
665 | |||
666 | rc = dasd_sleep_on_immediatly(cqr); | ||
667 | return rc; | ||
668 | } | ||
669 | |||
670 | static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu) | ||
671 | { | ||
672 | struct alias_pav_group *pavgroup; | ||
673 | struct dasd_device *device; | ||
674 | struct dasd_eckd_private *private; | ||
675 | |||
676 | /* active and inactive list can contain alias as well as base devices */ | ||
677 | list_for_each_entry(device, &lcu->active_devices, alias_list) { | ||
678 | private = (struct dasd_eckd_private *) device->private; | ||
679 | if (private->uid.type != UA_BASE_DEVICE) | ||
680 | continue; | ||
681 | dasd_schedule_block_bh(device->block); | ||
682 | dasd_schedule_device_bh(device); | ||
683 | } | ||
684 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
685 | private = (struct dasd_eckd_private *) device->private; | ||
686 | if (private->uid.type != UA_BASE_DEVICE) | ||
687 | continue; | ||
688 | dasd_schedule_block_bh(device->block); | ||
689 | dasd_schedule_device_bh(device); | ||
690 | } | ||
691 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
692 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { | ||
693 | dasd_schedule_block_bh(device->block); | ||
694 | dasd_schedule_device_bh(device); | ||
695 | } | ||
696 | } | ||
697 | } | ||
698 | |||
699 | static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu) | ||
700 | { | ||
701 | struct alias_pav_group *pavgroup; | ||
702 | struct dasd_device *device, *temp; | ||
703 | struct dasd_eckd_private *private; | ||
704 | int rc; | ||
705 | unsigned long flags; | ||
706 | LIST_HEAD(active); | ||
707 | |||
708 | /* | ||
709 | * Problem here ist that dasd_flush_device_queue may wait | ||
710 | * for termination of a request to complete. We can't keep | ||
711 | * the lcu lock during that time, so we must assume that | ||
712 | * the lists may have changed. | ||
713 | * Idea: first gather all active alias devices in a separate list, | ||
714 | * then flush the first element of this list unlocked, and afterwards | ||
715 | * check if it is still on the list before moving it to the | ||
716 | * active_devices list. | ||
717 | */ | ||
718 | |||
719 | spin_lock_irqsave(&lcu->lock, flags); | ||
720 | list_for_each_entry_safe(device, temp, &lcu->active_devices, | ||
721 | alias_list) { | ||
722 | private = (struct dasd_eckd_private *) device->private; | ||
723 | if (private->uid.type == UA_BASE_DEVICE) | ||
724 | continue; | ||
725 | list_move(&device->alias_list, &active); | ||
726 | } | ||
727 | |||
728 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
729 | list_splice_init(&pavgroup->aliaslist, &active); | ||
730 | } | ||
731 | while (!list_empty(&active)) { | ||
732 | device = list_first_entry(&active, struct dasd_device, | ||
733 | alias_list); | ||
734 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
735 | rc = dasd_flush_device_queue(device); | ||
736 | spin_lock_irqsave(&lcu->lock, flags); | ||
737 | /* | ||
738 | * only move device around if it wasn't moved away while we | ||
739 | * were waiting for the flush | ||
740 | */ | ||
741 | if (device == list_first_entry(&active, | ||
742 | struct dasd_device, alias_list)) | ||
743 | list_move(&device->alias_list, &lcu->active_devices); | ||
744 | } | ||
745 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * This function is called in interrupt context, so the | ||
750 | * cdev lock for device is already locked! | ||
751 | */ | ||
752 | static void _stop_all_devices_on_lcu(struct alias_lcu *lcu, | ||
753 | struct dasd_device *device) | ||
754 | { | ||
755 | struct alias_pav_group *pavgroup; | ||
756 | struct dasd_device *pos; | ||
757 | |||
758 | list_for_each_entry(pos, &lcu->active_devices, alias_list) { | ||
759 | if (pos != device) | ||
760 | spin_lock(get_ccwdev_lock(pos->cdev)); | ||
761 | pos->stopped |= DASD_STOPPED_SU; | ||
762 | if (pos != device) | ||
763 | spin_unlock(get_ccwdev_lock(pos->cdev)); | ||
764 | } | ||
765 | list_for_each_entry(pos, &lcu->inactive_devices, alias_list) { | ||
766 | if (pos != device) | ||
767 | spin_lock(get_ccwdev_lock(pos->cdev)); | ||
768 | pos->stopped |= DASD_STOPPED_SU; | ||
769 | if (pos != device) | ||
770 | spin_unlock(get_ccwdev_lock(pos->cdev)); | ||
771 | } | ||
772 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
773 | list_for_each_entry(pos, &pavgroup->baselist, alias_list) { | ||
774 | if (pos != device) | ||
775 | spin_lock(get_ccwdev_lock(pos->cdev)); | ||
776 | pos->stopped |= DASD_STOPPED_SU; | ||
777 | if (pos != device) | ||
778 | spin_unlock(get_ccwdev_lock(pos->cdev)); | ||
779 | } | ||
780 | list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) { | ||
781 | if (pos != device) | ||
782 | spin_lock(get_ccwdev_lock(pos->cdev)); | ||
783 | pos->stopped |= DASD_STOPPED_SU; | ||
784 | if (pos != device) | ||
785 | spin_unlock(get_ccwdev_lock(pos->cdev)); | ||
786 | } | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu) | ||
791 | { | ||
792 | struct alias_pav_group *pavgroup; | ||
793 | struct dasd_device *device; | ||
794 | unsigned long flags; | ||
795 | |||
796 | list_for_each_entry(device, &lcu->active_devices, alias_list) { | ||
797 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
798 | device->stopped &= ~DASD_STOPPED_SU; | ||
799 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
800 | } | ||
801 | |||
802 | list_for_each_entry(device, &lcu->inactive_devices, alias_list) { | ||
803 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
804 | device->stopped &= ~DASD_STOPPED_SU; | ||
805 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
806 | } | ||
807 | |||
808 | list_for_each_entry(pavgroup, &lcu->grouplist, group) { | ||
809 | list_for_each_entry(device, &pavgroup->baselist, alias_list) { | ||
810 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
811 | device->stopped &= ~DASD_STOPPED_SU; | ||
812 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), | ||
813 | flags); | ||
814 | } | ||
815 | list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { | ||
816 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
817 | device->stopped &= ~DASD_STOPPED_SU; | ||
818 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), | ||
819 | flags); | ||
820 | } | ||
821 | } | ||
822 | } | ||
823 | |||
824 | static void summary_unit_check_handling_work(struct work_struct *work) | ||
825 | { | ||
826 | struct alias_lcu *lcu; | ||
827 | struct summary_unit_check_work_data *suc_data; | ||
828 | unsigned long flags; | ||
829 | struct dasd_device *device; | ||
830 | |||
831 | suc_data = container_of(work, struct summary_unit_check_work_data, | ||
832 | worker); | ||
833 | lcu = container_of(suc_data, struct alias_lcu, suc_data); | ||
834 | device = suc_data->device; | ||
835 | |||
836 | /* 1. flush alias devices */ | ||
837 | flush_all_alias_devices_on_lcu(lcu); | ||
838 | |||
839 | /* 2. reset summary unit check */ | ||
840 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
841 | device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING); | ||
842 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
843 | reset_summary_unit_check(lcu, device, suc_data->reason); | ||
844 | |||
845 | spin_lock_irqsave(&lcu->lock, flags); | ||
846 | _unstop_all_devices_on_lcu(lcu); | ||
847 | _restart_all_base_devices_on_lcu(lcu); | ||
848 | /* 3. read new alias configuration */ | ||
849 | _schedule_lcu_update(lcu, device); | ||
850 | lcu->suc_data.device = NULL; | ||
851 | spin_unlock_irqrestore(&lcu->lock, flags); | ||
852 | } | ||
853 | |||
854 | /* | ||
855 | * note: this will be called from int handler context (cdev locked) | ||
856 | */ | ||
857 | void dasd_alias_handle_summary_unit_check(struct dasd_device *device, | ||
858 | struct irb *irb) | ||
859 | { | ||
860 | struct alias_lcu *lcu; | ||
861 | char reason; | ||
862 | struct dasd_eckd_private *private; | ||
863 | |||
864 | private = (struct dasd_eckd_private *) device->private; | ||
865 | |||
866 | reason = irb->ecw[8]; | ||
867 | DEV_MESSAGE(KERN_WARNING, device, "%s %x", | ||
868 | "eckd handle summary unit check: reason", reason); | ||
869 | |||
870 | lcu = private->lcu; | ||
871 | if (!lcu) { | ||
872 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
873 | "device not ready to handle summary" | ||
874 | " unit check (no lcu structure)"); | ||
875 | return; | ||
876 | } | ||
877 | spin_lock(&lcu->lock); | ||
878 | _stop_all_devices_on_lcu(lcu, device); | ||
879 | /* prepare for lcu_update */ | ||
880 | private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; | ||
881 | /* If this device is about to be removed just return and wait for | ||
882 | * the next interrupt on a different device | ||
883 | */ | ||
884 | if (list_empty(&device->alias_list)) { | ||
885 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
886 | "device is in offline processing," | ||
887 | " don't do summary unit check handling"); | ||
888 | spin_unlock(&lcu->lock); | ||
889 | return; | ||
890 | } | ||
891 | if (lcu->suc_data.device) { | ||
892 | /* already scheduled or running */ | ||
893 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
894 | "previous instance of summary unit check worker" | ||
895 | " still pending"); | ||
896 | spin_unlock(&lcu->lock); | ||
897 | return ; | ||
898 | } | ||
899 | lcu->suc_data.reason = reason; | ||
900 | lcu->suc_data.device = device; | ||
901 | spin_unlock(&lcu->lock); | ||
902 | schedule_work(&lcu->suc_data.worker); | ||
903 | }; | ||
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 0c67258fb9ec..f4fb40257348 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -49,22 +49,6 @@ struct dasd_devmap { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * dasd_server_ssid_map contains a globally unique storage server subsystem ID. | ||
53 | * dasd_server_ssid_list contains the list of all subsystem IDs accessed by | ||
54 | * the DASD device driver. | ||
55 | */ | ||
56 | struct dasd_server_ssid_map { | ||
57 | struct list_head list; | ||
58 | struct system_id { | ||
59 | char vendor[4]; | ||
60 | char serial[15]; | ||
61 | __u16 ssid; | ||
62 | } sid; | ||
63 | }; | ||
64 | |||
65 | static struct list_head dasd_server_ssid_list; | ||
66 | |||
67 | /* | ||
68 | * Parameter parsing functions for dasd= parameter. The syntax is: | 52 | * Parameter parsing functions for dasd= parameter. The syntax is: |
69 | * <devno> : (0x)?[0-9a-fA-F]+ | 53 | * <devno> : (0x)?[0-9a-fA-F]+ |
70 | * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+ | 54 | * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+ |
@@ -721,8 +705,9 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr, | |||
721 | devmap->features &= ~DASD_FEATURE_READONLY; | 705 | devmap->features &= ~DASD_FEATURE_READONLY; |
722 | if (devmap->device) | 706 | if (devmap->device) |
723 | devmap->device->features = devmap->features; | 707 | devmap->device->features = devmap->features; |
724 | if (devmap->device && devmap->device->gdp) | 708 | if (devmap->device && devmap->device->block |
725 | set_disk_ro(devmap->device->gdp, val); | 709 | && devmap->device->block->gdp) |
710 | set_disk_ro(devmap->device->block->gdp, val); | ||
726 | spin_unlock(&dasd_devmap_lock); | 711 | spin_unlock(&dasd_devmap_lock); |
727 | return count; | 712 | return count; |
728 | } | 713 | } |
@@ -893,12 +878,16 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
893 | 878 | ||
894 | devmap = dasd_find_busid(dev->bus_id); | 879 | devmap = dasd_find_busid(dev->bus_id); |
895 | spin_lock(&dasd_devmap_lock); | 880 | spin_lock(&dasd_devmap_lock); |
896 | if (!IS_ERR(devmap)) | 881 | if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { |
897 | alias = devmap->uid.alias; | 882 | spin_unlock(&dasd_devmap_lock); |
883 | return sprintf(buf, "0\n"); | ||
884 | } | ||
885 | if (devmap->uid.type == UA_BASE_PAV_ALIAS || | ||
886 | devmap->uid.type == UA_HYPER_PAV_ALIAS) | ||
887 | alias = 1; | ||
898 | else | 888 | else |
899 | alias = 0; | 889 | alias = 0; |
900 | spin_unlock(&dasd_devmap_lock); | 890 | spin_unlock(&dasd_devmap_lock); |
901 | |||
902 | return sprintf(buf, alias ? "1\n" : "0\n"); | 891 | return sprintf(buf, alias ? "1\n" : "0\n"); |
903 | } | 892 | } |
904 | 893 | ||
@@ -930,19 +919,36 @@ static ssize_t | |||
930 | dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) | 919 | dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) |
931 | { | 920 | { |
932 | struct dasd_devmap *devmap; | 921 | struct dasd_devmap *devmap; |
933 | char uid[UID_STRLEN]; | 922 | char uid_string[UID_STRLEN]; |
923 | char ua_string[3]; | ||
924 | struct dasd_uid *uid; | ||
934 | 925 | ||
935 | devmap = dasd_find_busid(dev->bus_id); | 926 | devmap = dasd_find_busid(dev->bus_id); |
936 | spin_lock(&dasd_devmap_lock); | 927 | spin_lock(&dasd_devmap_lock); |
937 | if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) | 928 | if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) { |
938 | snprintf(uid, sizeof(uid), "%s.%s.%04x.%02x", | 929 | spin_unlock(&dasd_devmap_lock); |
939 | devmap->uid.vendor, devmap->uid.serial, | 930 | return sprintf(buf, "\n"); |
940 | devmap->uid.ssid, devmap->uid.unit_addr); | 931 | } |
941 | else | 932 | uid = &devmap->uid; |
942 | uid[0] = 0; | 933 | switch (uid->type) { |
934 | case UA_BASE_DEVICE: | ||
935 | sprintf(ua_string, "%02x", uid->real_unit_addr); | ||
936 | break; | ||
937 | case UA_BASE_PAV_ALIAS: | ||
938 | sprintf(ua_string, "%02x", uid->base_unit_addr); | ||
939 | break; | ||
940 | case UA_HYPER_PAV_ALIAS: | ||
941 | sprintf(ua_string, "xx"); | ||
942 | break; | ||
943 | default: | ||
944 | /* should not happen, treat like base device */ | ||
945 | sprintf(ua_string, "%02x", uid->real_unit_addr); | ||
946 | break; | ||
947 | } | ||
948 | snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s", | ||
949 | uid->vendor, uid->serial, uid->ssid, ua_string); | ||
943 | spin_unlock(&dasd_devmap_lock); | 950 | spin_unlock(&dasd_devmap_lock); |
944 | 951 | return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); | |
945 | return snprintf(buf, PAGE_SIZE, "%s\n", uid); | ||
946 | } | 952 | } |
947 | 953 | ||
948 | static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); | 954 | static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); |
@@ -1040,39 +1046,16 @@ int | |||
1040 | dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) | 1046 | dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) |
1041 | { | 1047 | { |
1042 | struct dasd_devmap *devmap; | 1048 | struct dasd_devmap *devmap; |
1043 | struct dasd_server_ssid_map *srv, *tmp; | ||
1044 | 1049 | ||
1045 | devmap = dasd_find_busid(cdev->dev.bus_id); | 1050 | devmap = dasd_find_busid(cdev->dev.bus_id); |
1046 | if (IS_ERR(devmap)) | 1051 | if (IS_ERR(devmap)) |
1047 | return PTR_ERR(devmap); | 1052 | return PTR_ERR(devmap); |
1048 | 1053 | ||
1049 | /* generate entry for server_ssid_map */ | ||
1050 | srv = (struct dasd_server_ssid_map *) | ||
1051 | kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL); | ||
1052 | if (!srv) | ||
1053 | return -ENOMEM; | ||
1054 | strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); | ||
1055 | strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); | ||
1056 | srv->sid.ssid = uid->ssid; | ||
1057 | |||
1058 | /* server is already contained ? */ | ||
1059 | spin_lock(&dasd_devmap_lock); | 1054 | spin_lock(&dasd_devmap_lock); |
1060 | devmap->uid = *uid; | 1055 | devmap->uid = *uid; |
1061 | list_for_each_entry(tmp, &dasd_server_ssid_list, list) { | ||
1062 | if (!memcmp(&srv->sid, &tmp->sid, | ||
1063 | sizeof(struct system_id))) { | ||
1064 | kfree(srv); | ||
1065 | srv = NULL; | ||
1066 | break; | ||
1067 | } | ||
1068 | } | ||
1069 | |||
1070 | /* add servermap to serverlist */ | ||
1071 | if (srv) | ||
1072 | list_add(&srv->list, &dasd_server_ssid_list); | ||
1073 | spin_unlock(&dasd_devmap_lock); | 1056 | spin_unlock(&dasd_devmap_lock); |
1074 | 1057 | ||
1075 | return (srv ? 1 : 0); | 1058 | return 0; |
1076 | } | 1059 | } |
1077 | EXPORT_SYMBOL_GPL(dasd_set_uid); | 1060 | EXPORT_SYMBOL_GPL(dasd_set_uid); |
1078 | 1061 | ||
@@ -1138,9 +1121,6 @@ dasd_devmap_init(void) | |||
1138 | dasd_max_devindex = 0; | 1121 | dasd_max_devindex = 0; |
1139 | for (i = 0; i < 256; i++) | 1122 | for (i = 0; i < 256; i++) |
1140 | INIT_LIST_HEAD(&dasd_hashlists[i]); | 1123 | INIT_LIST_HEAD(&dasd_hashlists[i]); |
1141 | |||
1142 | /* Initialize servermap structure. */ | ||
1143 | INIT_LIST_HEAD(&dasd_server_ssid_list); | ||
1144 | return 0; | 1124 | return 0; |
1145 | } | 1125 | } |
1146 | 1126 | ||
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 571320ab9e1a..d91df38ee4f7 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -142,7 +142,7 @@ dasd_diag_erp(struct dasd_device *device) | |||
142 | int rc; | 142 | int rc; |
143 | 143 | ||
144 | mdsk_term_io(device); | 144 | mdsk_term_io(device); |
145 | rc = mdsk_init_io(device, device->bp_block, 0, NULL); | 145 | rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); |
146 | if (rc) | 146 | if (rc) |
147 | DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, " | 147 | DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, " |
148 | "rc=%d", rc); | 148 | "rc=%d", rc); |
@@ -158,11 +158,11 @@ dasd_start_diag(struct dasd_ccw_req * cqr) | |||
158 | struct dasd_diag_req *dreq; | 158 | struct dasd_diag_req *dreq; |
159 | int rc; | 159 | int rc; |
160 | 160 | ||
161 | device = cqr->device; | 161 | device = cqr->startdev; |
162 | if (cqr->retries < 0) { | 162 | if (cqr->retries < 0) { |
163 | DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p " | 163 | DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p " |
164 | "- no retry left)", cqr); | 164 | "- no retry left)", cqr); |
165 | cqr->status = DASD_CQR_FAILED; | 165 | cqr->status = DASD_CQR_ERROR; |
166 | return -EIO; | 166 | return -EIO; |
167 | } | 167 | } |
168 | private = (struct dasd_diag_private *) device->private; | 168 | private = (struct dasd_diag_private *) device->private; |
@@ -184,7 +184,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr) | |||
184 | switch (rc) { | 184 | switch (rc) { |
185 | case 0: /* Synchronous I/O finished successfully */ | 185 | case 0: /* Synchronous I/O finished successfully */ |
186 | cqr->stopclk = get_clock(); | 186 | cqr->stopclk = get_clock(); |
187 | cqr->status = DASD_CQR_DONE; | 187 | cqr->status = DASD_CQR_SUCCESS; |
188 | /* Indicate to calling function that only a dasd_schedule_bh() | 188 | /* Indicate to calling function that only a dasd_schedule_bh() |
189 | and no timer is needed */ | 189 | and no timer is needed */ |
190 | rc = -EACCES; | 190 | rc = -EACCES; |
@@ -209,12 +209,12 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr) | |||
209 | { | 209 | { |
210 | struct dasd_device *device; | 210 | struct dasd_device *device; |
211 | 211 | ||
212 | device = cqr->device; | 212 | device = cqr->startdev; |
213 | mdsk_term_io(device); | 213 | mdsk_term_io(device); |
214 | mdsk_init_io(device, device->bp_block, 0, NULL); | 214 | mdsk_init_io(device, device->block->bp_block, 0, NULL); |
215 | cqr->status = DASD_CQR_CLEAR; | 215 | cqr->status = DASD_CQR_CLEAR_PENDING; |
216 | cqr->stopclk = get_clock(); | 216 | cqr->stopclk = get_clock(); |
217 | dasd_schedule_bh(device); | 217 | dasd_schedule_device_bh(device); |
218 | return 0; | 218 | return 0; |
219 | } | 219 | } |
220 | 220 | ||
@@ -247,7 +247,7 @@ dasd_ext_handler(__u16 code) | |||
247 | return; | 247 | return; |
248 | } | 248 | } |
249 | cqr = (struct dasd_ccw_req *) ip; | 249 | cqr = (struct dasd_ccw_req *) ip; |
250 | device = (struct dasd_device *) cqr->device; | 250 | device = (struct dasd_device *) cqr->startdev; |
251 | if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 251 | if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
252 | DEV_MESSAGE(KERN_WARNING, device, | 252 | DEV_MESSAGE(KERN_WARNING, device, |
253 | " magic number of dasd_ccw_req 0x%08X doesn't" | 253 | " magic number of dasd_ccw_req 0x%08X doesn't" |
@@ -260,10 +260,10 @@ dasd_ext_handler(__u16 code) | |||
260 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 260 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
261 | 261 | ||
262 | /* Check for a pending clear operation */ | 262 | /* Check for a pending clear operation */ |
263 | if (cqr->status == DASD_CQR_CLEAR) { | 263 | if (cqr->status == DASD_CQR_CLEAR_PENDING) { |
264 | cqr->status = DASD_CQR_QUEUED; | 264 | cqr->status = DASD_CQR_CLEARED; |
265 | dasd_clear_timer(device); | 265 | dasd_device_clear_timer(device); |
266 | dasd_schedule_bh(device); | 266 | dasd_schedule_device_bh(device); |
267 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 267 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
268 | return; | 268 | return; |
269 | } | 269 | } |
@@ -272,11 +272,11 @@ dasd_ext_handler(__u16 code) | |||
272 | 272 | ||
273 | expires = 0; | 273 | expires = 0; |
274 | if (status == 0) { | 274 | if (status == 0) { |
275 | cqr->status = DASD_CQR_DONE; | 275 | cqr->status = DASD_CQR_SUCCESS; |
276 | /* Start first request on queue if possible -> fast_io. */ | 276 | /* Start first request on queue if possible -> fast_io. */ |
277 | if (!list_empty(&device->ccw_queue)) { | 277 | if (!list_empty(&device->ccw_queue)) { |
278 | next = list_entry(device->ccw_queue.next, | 278 | next = list_entry(device->ccw_queue.next, |
279 | struct dasd_ccw_req, list); | 279 | struct dasd_ccw_req, devlist); |
280 | if (next->status == DASD_CQR_QUEUED) { | 280 | if (next->status == DASD_CQR_QUEUED) { |
281 | rc = dasd_start_diag(next); | 281 | rc = dasd_start_diag(next); |
282 | if (rc == 0) | 282 | if (rc == 0) |
@@ -296,10 +296,10 @@ dasd_ext_handler(__u16 code) | |||
296 | } | 296 | } |
297 | 297 | ||
298 | if (expires != 0) | 298 | if (expires != 0) |
299 | dasd_set_timer(device, expires); | 299 | dasd_device_set_timer(device, expires); |
300 | else | 300 | else |
301 | dasd_clear_timer(device); | 301 | dasd_device_clear_timer(device); |
302 | dasd_schedule_bh(device); | 302 | dasd_schedule_device_bh(device); |
303 | 303 | ||
304 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 304 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
305 | } | 305 | } |
@@ -309,6 +309,7 @@ dasd_ext_handler(__u16 code) | |||
309 | static int | 309 | static int |
310 | dasd_diag_check_device(struct dasd_device *device) | 310 | dasd_diag_check_device(struct dasd_device *device) |
311 | { | 311 | { |
312 | struct dasd_block *block; | ||
312 | struct dasd_diag_private *private; | 313 | struct dasd_diag_private *private; |
313 | struct dasd_diag_characteristics *rdc_data; | 314 | struct dasd_diag_characteristics *rdc_data; |
314 | struct dasd_diag_bio bio; | 315 | struct dasd_diag_bio bio; |
@@ -328,6 +329,16 @@ dasd_diag_check_device(struct dasd_device *device) | |||
328 | ccw_device_get_id(device->cdev, &private->dev_id); | 329 | ccw_device_get_id(device->cdev, &private->dev_id); |
329 | device->private = (void *) private; | 330 | device->private = (void *) private; |
330 | } | 331 | } |
332 | block = dasd_alloc_block(); | ||
333 | if (IS_ERR(block)) { | ||
334 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
335 | "could not allocate dasd block structure"); | ||
336 | kfree(device->private); | ||
337 | return PTR_ERR(block); | ||
338 | } | ||
339 | device->block = block; | ||
340 | block->base = device; | ||
341 | |||
331 | /* Read Device Characteristics */ | 342 | /* Read Device Characteristics */ |
332 | rdc_data = (void *) &(private->rdc_data); | 343 | rdc_data = (void *) &(private->rdc_data); |
333 | rdc_data->dev_nr = private->dev_id.devno; | 344 | rdc_data->dev_nr = private->dev_id.devno; |
@@ -409,14 +420,14 @@ dasd_diag_check_device(struct dasd_device *device) | |||
409 | sizeof(DASD_DIAG_CMS1)) == 0) { | 420 | sizeof(DASD_DIAG_CMS1)) == 0) { |
410 | /* get formatted blocksize from label block */ | 421 | /* get formatted blocksize from label block */ |
411 | bsize = (unsigned int) label->block_size; | 422 | bsize = (unsigned int) label->block_size; |
412 | device->blocks = (unsigned long) label->block_count; | 423 | block->blocks = (unsigned long) label->block_count; |
413 | } else | 424 | } else |
414 | device->blocks = end_block; | 425 | block->blocks = end_block; |
415 | device->bp_block = bsize; | 426 | block->bp_block = bsize; |
416 | device->s2b_shift = 0; /* bits to shift 512 to get a block */ | 427 | block->s2b_shift = 0; /* bits to shift 512 to get a block */ |
417 | for (sb = 512; sb < bsize; sb = sb << 1) | 428 | for (sb = 512; sb < bsize; sb = sb << 1) |
418 | device->s2b_shift++; | 429 | block->s2b_shift++; |
419 | rc = mdsk_init_io(device, device->bp_block, 0, NULL); | 430 | rc = mdsk_init_io(device, block->bp_block, 0, NULL); |
420 | if (rc) { | 431 | if (rc) { |
421 | DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization " | 432 | DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization " |
422 | "failed (rc=%d)", rc); | 433 | "failed (rc=%d)", rc); |
@@ -424,9 +435,9 @@ dasd_diag_check_device(struct dasd_device *device) | |||
424 | } else { | 435 | } else { |
425 | DEV_MESSAGE(KERN_INFO, device, | 436 | DEV_MESSAGE(KERN_INFO, device, |
426 | "(%ld B/blk): %ldkB", | 437 | "(%ld B/blk): %ldkB", |
427 | (unsigned long) device->bp_block, | 438 | (unsigned long) block->bp_block, |
428 | (unsigned long) (device->blocks << | 439 | (unsigned long) (block->blocks << |
429 | device->s2b_shift) >> 1); | 440 | block->s2b_shift) >> 1); |
430 | } | 441 | } |
431 | out: | 442 | out: |
432 | free_page((long) label); | 443 | free_page((long) label); |
@@ -436,22 +447,16 @@ out: | |||
436 | /* Fill in virtual disk geometry for device. Return zero on success, non-zero | 447 | /* Fill in virtual disk geometry for device. Return zero on success, non-zero |
437 | * otherwise. */ | 448 | * otherwise. */ |
438 | static int | 449 | static int |
439 | dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) | 450 | dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) |
440 | { | 451 | { |
441 | if (dasd_check_blocksize(device->bp_block) != 0) | 452 | if (dasd_check_blocksize(block->bp_block) != 0) |
442 | return -EINVAL; | 453 | return -EINVAL; |
443 | geo->cylinders = (device->blocks << device->s2b_shift) >> 10; | 454 | geo->cylinders = (block->blocks << block->s2b_shift) >> 10; |
444 | geo->heads = 16; | 455 | geo->heads = 16; |
445 | geo->sectors = 128 >> device->s2b_shift; | 456 | geo->sectors = 128 >> block->s2b_shift; |
446 | return 0; | 457 | return 0; |
447 | } | 458 | } |
448 | 459 | ||
449 | static dasd_era_t | ||
450 | dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat) | ||
451 | { | ||
452 | return dasd_era_fatal; | ||
453 | } | ||
454 | |||
455 | static dasd_erp_fn_t | 460 | static dasd_erp_fn_t |
456 | dasd_diag_erp_action(struct dasd_ccw_req * cqr) | 461 | dasd_diag_erp_action(struct dasd_ccw_req * cqr) |
457 | { | 462 | { |
@@ -466,8 +471,9 @@ dasd_diag_erp_postaction(struct dasd_ccw_req * cqr) | |||
466 | 471 | ||
467 | /* Create DASD request from block device request. Return pointer to new | 472 | /* Create DASD request from block device request. Return pointer to new |
468 | * request on success, ERR_PTR otherwise. */ | 473 | * request on success, ERR_PTR otherwise. */ |
469 | static struct dasd_ccw_req * | 474 | static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, |
470 | dasd_diag_build_cp(struct dasd_device * device, struct request *req) | 475 | struct dasd_block *block, |
476 | struct request *req) | ||
471 | { | 477 | { |
472 | struct dasd_ccw_req *cqr; | 478 | struct dasd_ccw_req *cqr; |
473 | struct dasd_diag_req *dreq; | 479 | struct dasd_diag_req *dreq; |
@@ -486,17 +492,17 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) | |||
486 | rw_cmd = MDSK_WRITE_REQ; | 492 | rw_cmd = MDSK_WRITE_REQ; |
487 | else | 493 | else |
488 | return ERR_PTR(-EINVAL); | 494 | return ERR_PTR(-EINVAL); |
489 | blksize = device->bp_block; | 495 | blksize = block->bp_block; |
490 | /* Calculate record id of first and last block. */ | 496 | /* Calculate record id of first and last block. */ |
491 | first_rec = req->sector >> device->s2b_shift; | 497 | first_rec = req->sector >> block->s2b_shift; |
492 | last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; | 498 | last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; |
493 | /* Check struct bio and count the number of blocks for the request. */ | 499 | /* Check struct bio and count the number of blocks for the request. */ |
494 | count = 0; | 500 | count = 0; |
495 | rq_for_each_segment(bv, req, iter) { | 501 | rq_for_each_segment(bv, req, iter) { |
496 | if (bv->bv_len & (blksize - 1)) | 502 | if (bv->bv_len & (blksize - 1)) |
497 | /* Fba can only do full blocks. */ | 503 | /* Fba can only do full blocks. */ |
498 | return ERR_PTR(-EINVAL); | 504 | return ERR_PTR(-EINVAL); |
499 | count += bv->bv_len >> (device->s2b_shift + 9); | 505 | count += bv->bv_len >> (block->s2b_shift + 9); |
500 | } | 506 | } |
501 | /* Paranoia. */ | 507 | /* Paranoia. */ |
502 | if (count != last_rec - first_rec + 1) | 508 | if (count != last_rec - first_rec + 1) |
@@ -505,7 +511,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) | |||
505 | datasize = sizeof(struct dasd_diag_req) + | 511 | datasize = sizeof(struct dasd_diag_req) + |
506 | count*sizeof(struct dasd_diag_bio); | 512 | count*sizeof(struct dasd_diag_bio); |
507 | cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, | 513 | cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, |
508 | datasize, device); | 514 | datasize, memdev); |
509 | if (IS_ERR(cqr)) | 515 | if (IS_ERR(cqr)) |
510 | return cqr; | 516 | return cqr; |
511 | 517 | ||
@@ -529,7 +535,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req) | |||
529 | cqr->buildclk = get_clock(); | 535 | cqr->buildclk = get_clock(); |
530 | if (req->cmd_flags & REQ_FAILFAST) | 536 | if (req->cmd_flags & REQ_FAILFAST) |
531 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 537 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
532 | cqr->device = device; | 538 | cqr->startdev = memdev; |
539 | cqr->memdev = memdev; | ||
540 | cqr->block = block; | ||
533 | cqr->expires = DIAG_TIMEOUT; | 541 | cqr->expires = DIAG_TIMEOUT; |
534 | cqr->status = DASD_CQR_FILLED; | 542 | cqr->status = DASD_CQR_FILLED; |
535 | return cqr; | 543 | return cqr; |
@@ -543,10 +551,15 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
543 | int status; | 551 | int status; |
544 | 552 | ||
545 | status = cqr->status == DASD_CQR_DONE; | 553 | status = cqr->status == DASD_CQR_DONE; |
546 | dasd_sfree_request(cqr, cqr->device); | 554 | dasd_sfree_request(cqr, cqr->memdev); |
547 | return status; | 555 | return status; |
548 | } | 556 | } |
549 | 557 | ||
558 | static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr) | ||
559 | { | ||
560 | cqr->status = DASD_CQR_FILLED; | ||
561 | }; | ||
562 | |||
550 | /* Fill in IOCTL data for device. */ | 563 | /* Fill in IOCTL data for device. */ |
551 | static int | 564 | static int |
552 | dasd_diag_fill_info(struct dasd_device * device, | 565 | dasd_diag_fill_info(struct dasd_device * device, |
@@ -583,7 +596,7 @@ static struct dasd_discipline dasd_diag_discipline = { | |||
583 | .fill_geometry = dasd_diag_fill_geometry, | 596 | .fill_geometry = dasd_diag_fill_geometry, |
584 | .start_IO = dasd_start_diag, | 597 | .start_IO = dasd_start_diag, |
585 | .term_IO = dasd_diag_term_IO, | 598 | .term_IO = dasd_diag_term_IO, |
586 | .examine_error = dasd_diag_examine_error, | 599 | .handle_terminated_request = dasd_diag_handle_terminated_request, |
587 | .erp_action = dasd_diag_erp_action, | 600 | .erp_action = dasd_diag_erp_action, |
588 | .erp_postaction = dasd_diag_erp_postaction, | 601 | .erp_postaction = dasd_diag_erp_postaction, |
589 | .build_cp = dasd_diag_build_cp, | 602 | .build_cp = dasd_diag_build_cp, |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 44adf8496bda..61f16937c1e0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -52,16 +52,6 @@ MODULE_LICENSE("GPL"); | |||
52 | 52 | ||
53 | static struct dasd_discipline dasd_eckd_discipline; | 53 | static struct dasd_discipline dasd_eckd_discipline; |
54 | 54 | ||
55 | struct dasd_eckd_private { | ||
56 | struct dasd_eckd_characteristics rdc_data; | ||
57 | struct dasd_eckd_confdata conf_data; | ||
58 | struct dasd_eckd_path path_data; | ||
59 | struct eckd_count count_area[5]; | ||
60 | int init_cqr_status; | ||
61 | int uses_cdl; | ||
62 | struct attrib_data_t attrib; /* e.g. cache operations */ | ||
63 | }; | ||
64 | |||
65 | /* The ccw bus type uses this table to find devices that it sends to | 55 | /* The ccw bus type uses this table to find devices that it sends to |
66 | * dasd_eckd_probe */ | 56 | * dasd_eckd_probe */ |
67 | static struct ccw_device_id dasd_eckd_ids[] = { | 57 | static struct ccw_device_id dasd_eckd_ids[] = { |
@@ -188,7 +178,7 @@ check_XRC (struct ccw1 *de_ccw, | |||
188 | if (rc == -ENOSYS || rc == -EACCES) | 178 | if (rc == -ENOSYS || rc == -EACCES) |
189 | rc = 0; | 179 | rc = 0; |
190 | 180 | ||
191 | de_ccw->count = sizeof (struct DE_eckd_data); | 181 | de_ccw->count = sizeof(struct DE_eckd_data); |
192 | de_ccw->flags |= CCW_FLAG_SLI; | 182 | de_ccw->flags |= CCW_FLAG_SLI; |
193 | return rc; | 183 | return rc; |
194 | } | 184 | } |
@@ -208,7 +198,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
208 | ccw->count = 16; | 198 | ccw->count = 16; |
209 | ccw->cda = (__u32) __pa(data); | 199 | ccw->cda = (__u32) __pa(data); |
210 | 200 | ||
211 | memset(data, 0, sizeof (struct DE_eckd_data)); | 201 | memset(data, 0, sizeof(struct DE_eckd_data)); |
212 | switch (cmd) { | 202 | switch (cmd) { |
213 | case DASD_ECKD_CCW_READ_HOME_ADDRESS: | 203 | case DASD_ECKD_CCW_READ_HOME_ADDRESS: |
214 | case DASD_ECKD_CCW_READ_RECORD_ZERO: | 204 | case DASD_ECKD_CCW_READ_RECORD_ZERO: |
@@ -280,6 +270,132 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
280 | return rc; | 270 | return rc; |
281 | } | 271 | } |
282 | 272 | ||
273 | static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, | ||
274 | struct dasd_device *device) | ||
275 | { | ||
276 | struct dasd_eckd_private *private; | ||
277 | int rc; | ||
278 | |||
279 | private = (struct dasd_eckd_private *) device->private; | ||
280 | if (!private->rdc_data.facilities.XRC_supported) | ||
281 | return 0; | ||
282 | |||
283 | /* switch on System Time Stamp - needed for XRC Support */ | ||
284 | pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */ | ||
285 | pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */ | ||
286 | pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ | ||
287 | |||
288 | rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time); | ||
289 | /* Ignore return code if sync clock is switched off. */ | ||
290 | if (rc == -ENOSYS || rc == -EACCES) | ||
291 | rc = 0; | ||
292 | return rc; | ||
293 | } | ||
294 | |||
295 | static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk, | ||
296 | int totrk, int cmd, struct dasd_device *basedev, | ||
297 | struct dasd_device *startdev) | ||
298 | { | ||
299 | struct dasd_eckd_private *basepriv, *startpriv; | ||
300 | struct DE_eckd_data *data; | ||
301 | struct ch_t geo, beg, end; | ||
302 | int rc = 0; | ||
303 | |||
304 | basepriv = (struct dasd_eckd_private *) basedev->private; | ||
305 | startpriv = (struct dasd_eckd_private *) startdev->private; | ||
306 | data = &pfxdata->define_extend; | ||
307 | |||
308 | ccw->cmd_code = DASD_ECKD_CCW_PFX; | ||
309 | ccw->flags = 0; | ||
310 | ccw->count = sizeof(*pfxdata); | ||
311 | ccw->cda = (__u32) __pa(pfxdata); | ||
312 | |||
313 | memset(pfxdata, 0, sizeof(*pfxdata)); | ||
314 | /* prefix data */ | ||
315 | pfxdata->format = 0; | ||
316 | pfxdata->base_address = basepriv->conf_data.ned1.unit_addr; | ||
317 | pfxdata->base_lss = basepriv->conf_data.ned1.ID; | ||
318 | pfxdata->validity.define_extend = 1; | ||
319 | |||
320 | /* private uid is kept up to date, conf_data may be outdated */ | ||
321 | if (startpriv->uid.type != UA_BASE_DEVICE) { | ||
322 | pfxdata->validity.verify_base = 1; | ||
323 | if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) | ||
324 | pfxdata->validity.hyper_pav = 1; | ||
325 | } | ||
326 | |||
327 | /* define extend data (mostly)*/ | ||
328 | switch (cmd) { | ||
329 | case DASD_ECKD_CCW_READ_HOME_ADDRESS: | ||
330 | case DASD_ECKD_CCW_READ_RECORD_ZERO: | ||
331 | case DASD_ECKD_CCW_READ: | ||
332 | case DASD_ECKD_CCW_READ_MT: | ||
333 | case DASD_ECKD_CCW_READ_CKD: | ||
334 | case DASD_ECKD_CCW_READ_CKD_MT: | ||
335 | case DASD_ECKD_CCW_READ_KD: | ||
336 | case DASD_ECKD_CCW_READ_KD_MT: | ||
337 | case DASD_ECKD_CCW_READ_COUNT: | ||
338 | data->mask.perm = 0x1; | ||
339 | data->attributes.operation = basepriv->attrib.operation; | ||
340 | break; | ||
341 | case DASD_ECKD_CCW_WRITE: | ||
342 | case DASD_ECKD_CCW_WRITE_MT: | ||
343 | case DASD_ECKD_CCW_WRITE_KD: | ||
344 | case DASD_ECKD_CCW_WRITE_KD_MT: | ||
345 | data->mask.perm = 0x02; | ||
346 | data->attributes.operation = basepriv->attrib.operation; | ||
347 | rc = check_XRC_on_prefix(pfxdata, basedev); | ||
348 | break; | ||
349 | case DASD_ECKD_CCW_WRITE_CKD: | ||
350 | case DASD_ECKD_CCW_WRITE_CKD_MT: | ||
351 | data->attributes.operation = DASD_BYPASS_CACHE; | ||
352 | rc = check_XRC_on_prefix(pfxdata, basedev); | ||
353 | break; | ||
354 | case DASD_ECKD_CCW_ERASE: | ||
355 | case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: | ||
356 | case DASD_ECKD_CCW_WRITE_RECORD_ZERO: | ||
357 | data->mask.perm = 0x3; | ||
358 | data->mask.auth = 0x1; | ||
359 | data->attributes.operation = DASD_BYPASS_CACHE; | ||
360 | rc = check_XRC_on_prefix(pfxdata, basedev); | ||
361 | break; | ||
362 | default: | ||
363 | DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd); | ||
364 | break; | ||
365 | } | ||
366 | |||
367 | data->attributes.mode = 0x3; /* ECKD */ | ||
368 | |||
369 | if ((basepriv->rdc_data.cu_type == 0x2105 || | ||
370 | basepriv->rdc_data.cu_type == 0x2107 || | ||
371 | basepriv->rdc_data.cu_type == 0x1750) | ||
372 | && !(basepriv->uses_cdl && trk < 2)) | ||
373 | data->ga_extended |= 0x40; /* Regular Data Format Mode */ | ||
374 | |||
375 | geo.cyl = basepriv->rdc_data.no_cyl; | ||
376 | geo.head = basepriv->rdc_data.trk_per_cyl; | ||
377 | beg.cyl = trk / geo.head; | ||
378 | beg.head = trk % geo.head; | ||
379 | end.cyl = totrk / geo.head; | ||
380 | end.head = totrk % geo.head; | ||
381 | |||
382 | /* check for sequential prestage - enhance cylinder range */ | ||
383 | if (data->attributes.operation == DASD_SEQ_PRESTAGE || | ||
384 | data->attributes.operation == DASD_SEQ_ACCESS) { | ||
385 | |||
386 | if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl) | ||
387 | end.cyl += basepriv->attrib.nr_cyl; | ||
388 | else | ||
389 | end.cyl = (geo.cyl - 1); | ||
390 | } | ||
391 | |||
392 | data->beg_ext.cyl = beg.cyl; | ||
393 | data->beg_ext.head = beg.head; | ||
394 | data->end_ext.cyl = end.cyl; | ||
395 | data->end_ext.head = end.head; | ||
396 | return rc; | ||
397 | } | ||
398 | |||
283 | static void | 399 | static void |
284 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, | 400 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, |
285 | int rec_on_trk, int no_rec, int cmd, | 401 | int rec_on_trk, int no_rec, int cmd, |
@@ -300,7 +416,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, | |||
300 | ccw->count = 16; | 416 | ccw->count = 16; |
301 | ccw->cda = (__u32) __pa(data); | 417 | ccw->cda = (__u32) __pa(data); |
302 | 418 | ||
303 | memset(data, 0, sizeof (struct LO_eckd_data)); | 419 | memset(data, 0, sizeof(struct LO_eckd_data)); |
304 | sector = 0; | 420 | sector = 0; |
305 | if (rec_on_trk) { | 421 | if (rec_on_trk) { |
306 | switch (private->rdc_data.dev_type) { | 422 | switch (private->rdc_data.dev_type) { |
@@ -441,12 +557,15 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid) | |||
441 | sizeof(uid->serial) - 1); | 557 | sizeof(uid->serial) - 1); |
442 | EBCASC(uid->serial, sizeof(uid->serial) - 1); | 558 | EBCASC(uid->serial, sizeof(uid->serial) - 1); |
443 | uid->ssid = confdata->neq.subsystemID; | 559 | uid->ssid = confdata->neq.subsystemID; |
444 | if (confdata->ned2.sneq.flags == 0x40) { | 560 | uid->real_unit_addr = confdata->ned1.unit_addr; |
445 | uid->alias = 1; | 561 | if (confdata->ned2.sneq.flags == 0x40 && |
446 | uid->unit_addr = confdata->ned2.sneq.base_unit_addr; | 562 | confdata->ned2.sneq.format == 0x0001) { |
447 | } else | 563 | uid->type = confdata->ned2.sneq.sua_flags; |
448 | uid->unit_addr = confdata->ned1.unit_addr; | 564 | if (uid->type == UA_BASE_PAV_ALIAS) |
449 | 565 | uid->base_unit_addr = confdata->ned2.sneq.base_unit_addr; | |
566 | } else { | ||
567 | uid->type = UA_BASE_DEVICE; | ||
568 | } | ||
450 | return 0; | 569 | return 0; |
451 | } | 570 | } |
452 | 571 | ||
@@ -470,7 +589,9 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, | |||
470 | ccw->cda = (__u32)(addr_t)rcd_buffer; | 589 | ccw->cda = (__u32)(addr_t)rcd_buffer; |
471 | ccw->count = ciw->count; | 590 | ccw->count = ciw->count; |
472 | 591 | ||
473 | cqr->device = device; | 592 | cqr->startdev = device; |
593 | cqr->memdev = device; | ||
594 | cqr->block = NULL; | ||
474 | cqr->expires = 10*HZ; | 595 | cqr->expires = 10*HZ; |
475 | cqr->lpm = lpm; | 596 | cqr->lpm = lpm; |
476 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 597 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
@@ -511,7 +632,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, | |||
511 | /* | 632 | /* |
512 | * on success we update the user input parms | 633 | * on success we update the user input parms |
513 | */ | 634 | */ |
514 | dasd_sfree_request(cqr, cqr->device); | 635 | dasd_sfree_request(cqr, cqr->memdev); |
515 | if (ret) | 636 | if (ret) |
516 | goto out_error; | 637 | goto out_error; |
517 | 638 | ||
@@ -557,19 +678,19 @@ dasd_eckd_read_conf(struct dasd_device *device) | |||
557 | "data retrieved"); | 678 | "data retrieved"); |
558 | continue; /* no error */ | 679 | continue; /* no error */ |
559 | } | 680 | } |
560 | if (conf_len != sizeof (struct dasd_eckd_confdata)) { | 681 | if (conf_len != sizeof(struct dasd_eckd_confdata)) { |
561 | MESSAGE(KERN_WARNING, | 682 | MESSAGE(KERN_WARNING, |
562 | "sizes of configuration data mismatch" | 683 | "sizes of configuration data mismatch" |
563 | "%d (read) vs %ld (expected)", | 684 | "%d (read) vs %ld (expected)", |
564 | conf_len, | 685 | conf_len, |
565 | sizeof (struct dasd_eckd_confdata)); | 686 | sizeof(struct dasd_eckd_confdata)); |
566 | kfree(conf_data); | 687 | kfree(conf_data); |
567 | continue; /* no error */ | 688 | continue; /* no error */ |
568 | } | 689 | } |
569 | /* save first valid configuration data */ | 690 | /* save first valid configuration data */ |
570 | if (!conf_data_saved){ | 691 | if (!conf_data_saved){ |
571 | memcpy(&private->conf_data, conf_data, | 692 | memcpy(&private->conf_data, conf_data, |
572 | sizeof (struct dasd_eckd_confdata)); | 693 | sizeof(struct dasd_eckd_confdata)); |
573 | conf_data_saved++; | 694 | conf_data_saved++; |
574 | } | 695 | } |
575 | switch (((char *)conf_data)[242] & 0x07){ | 696 | switch (((char *)conf_data)[242] & 0x07){ |
@@ -586,39 +707,104 @@ dasd_eckd_read_conf(struct dasd_device *device) | |||
586 | return 0; | 707 | return 0; |
587 | } | 708 | } |
588 | 709 | ||
710 | static int dasd_eckd_read_features(struct dasd_device *device) | ||
711 | { | ||
712 | struct dasd_psf_prssd_data *prssdp; | ||
713 | struct dasd_rssd_features *features; | ||
714 | struct dasd_ccw_req *cqr; | ||
715 | struct ccw1 *ccw; | ||
716 | int rc; | ||
717 | struct dasd_eckd_private *private; | ||
718 | |||
719 | private = (struct dasd_eckd_private *) device->private; | ||
720 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | ||
721 | 1 /* PSF */ + 1 /* RSSD */ , | ||
722 | (sizeof(struct dasd_psf_prssd_data) + | ||
723 | sizeof(struct dasd_rssd_features)), | ||
724 | device); | ||
725 | if (IS_ERR(cqr)) { | ||
726 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
727 | "Could not allocate initialization request"); | ||
728 | return PTR_ERR(cqr); | ||
729 | } | ||
730 | cqr->startdev = device; | ||
731 | cqr->memdev = device; | ||
732 | cqr->block = NULL; | ||
733 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
734 | cqr->retries = 5; | ||
735 | cqr->expires = 10 * HZ; | ||
736 | |||
737 | /* Prepare for Read Subsystem Data */ | ||
738 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | ||
739 | memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); | ||
740 | prssdp->order = PSF_ORDER_PRSSD; | ||
741 | prssdp->suborder = 0x41; /* Read Feature Codes */ | ||
742 | /* all other bytes of prssdp must be zero */ | ||
743 | |||
744 | ccw = cqr->cpaddr; | ||
745 | ccw->cmd_code = DASD_ECKD_CCW_PSF; | ||
746 | ccw->count = sizeof(struct dasd_psf_prssd_data); | ||
747 | ccw->flags |= CCW_FLAG_CC; | ||
748 | ccw->cda = (__u32)(addr_t) prssdp; | ||
749 | |||
750 | /* Read Subsystem Data - feature codes */ | ||
751 | features = (struct dasd_rssd_features *) (prssdp + 1); | ||
752 | memset(features, 0, sizeof(struct dasd_rssd_features)); | ||
753 | |||
754 | ccw++; | ||
755 | ccw->cmd_code = DASD_ECKD_CCW_RSSD; | ||
756 | ccw->count = sizeof(struct dasd_rssd_features); | ||
757 | ccw->cda = (__u32)(addr_t) features; | ||
758 | |||
759 | cqr->buildclk = get_clock(); | ||
760 | cqr->status = DASD_CQR_FILLED; | ||
761 | rc = dasd_sleep_on(cqr); | ||
762 | if (rc == 0) { | ||
763 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | ||
764 | features = (struct dasd_rssd_features *) (prssdp + 1); | ||
765 | memcpy(&private->features, features, | ||
766 | sizeof(struct dasd_rssd_features)); | ||
767 | } | ||
768 | dasd_sfree_request(cqr, cqr->memdev); | ||
769 | return rc; | ||
770 | } | ||
771 | |||
772 | |||
589 | /* | 773 | /* |
590 | * Build CP for Perform Subsystem Function - SSC. | 774 | * Build CP for Perform Subsystem Function - SSC. |
591 | */ | 775 | */ |
592 | static struct dasd_ccw_req * | 776 | static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device) |
593 | dasd_eckd_build_psf_ssc(struct dasd_device *device) | ||
594 | { | 777 | { |
595 | struct dasd_ccw_req *cqr; | 778 | struct dasd_ccw_req *cqr; |
596 | struct dasd_psf_ssc_data *psf_ssc_data; | 779 | struct dasd_psf_ssc_data *psf_ssc_data; |
597 | struct ccw1 *ccw; | 780 | struct ccw1 *ccw; |
598 | 781 | ||
599 | cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , | 782 | cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , |
600 | sizeof(struct dasd_psf_ssc_data), | 783 | sizeof(struct dasd_psf_ssc_data), |
601 | device); | 784 | device); |
602 | 785 | ||
603 | if (IS_ERR(cqr)) { | 786 | if (IS_ERR(cqr)) { |
604 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 787 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
605 | "Could not allocate PSF-SSC request"); | 788 | "Could not allocate PSF-SSC request"); |
606 | return cqr; | 789 | return cqr; |
607 | } | 790 | } |
608 | psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; | 791 | psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; |
609 | psf_ssc_data->order = PSF_ORDER_SSC; | 792 | psf_ssc_data->order = PSF_ORDER_SSC; |
610 | psf_ssc_data->suborder = 0x08; | 793 | psf_ssc_data->suborder = 0x88; |
611 | 794 | psf_ssc_data->reserved[0] = 0x88; | |
612 | ccw = cqr->cpaddr; | 795 | |
613 | ccw->cmd_code = DASD_ECKD_CCW_PSF; | 796 | ccw = cqr->cpaddr; |
614 | ccw->cda = (__u32)(addr_t)psf_ssc_data; | 797 | ccw->cmd_code = DASD_ECKD_CCW_PSF; |
615 | ccw->count = 66; | 798 | ccw->cda = (__u32)(addr_t)psf_ssc_data; |
616 | 799 | ccw->count = 66; | |
617 | cqr->device = device; | 800 | |
618 | cqr->expires = 10*HZ; | 801 | cqr->startdev = device; |
619 | cqr->buildclk = get_clock(); | 802 | cqr->memdev = device; |
620 | cqr->status = DASD_CQR_FILLED; | 803 | cqr->block = NULL; |
621 | return cqr; | 804 | cqr->expires = 10*HZ; |
805 | cqr->buildclk = get_clock(); | ||
806 | cqr->status = DASD_CQR_FILLED; | ||
807 | return cqr; | ||
622 | } | 808 | } |
623 | 809 | ||
624 | /* | 810 | /* |
@@ -629,28 +815,28 @@ dasd_eckd_build_psf_ssc(struct dasd_device *device) | |||
629 | static int | 815 | static int |
630 | dasd_eckd_psf_ssc(struct dasd_device *device) | 816 | dasd_eckd_psf_ssc(struct dasd_device *device) |
631 | { | 817 | { |
632 | struct dasd_ccw_req *cqr; | 818 | struct dasd_ccw_req *cqr; |
633 | int rc; | 819 | int rc; |
634 | 820 | ||
635 | cqr = dasd_eckd_build_psf_ssc(device); | 821 | cqr = dasd_eckd_build_psf_ssc(device); |
636 | if (IS_ERR(cqr)) | 822 | if (IS_ERR(cqr)) |
637 | return PTR_ERR(cqr); | 823 | return PTR_ERR(cqr); |
638 | 824 | ||
639 | rc = dasd_sleep_on(cqr); | 825 | rc = dasd_sleep_on(cqr); |
640 | if (!rc) | 826 | if (!rc) |
641 | /* trigger CIO to reprobe devices */ | 827 | /* trigger CIO to reprobe devices */ |
642 | css_schedule_reprobe(); | 828 | css_schedule_reprobe(); |
643 | dasd_sfree_request(cqr, cqr->device); | 829 | dasd_sfree_request(cqr, cqr->memdev); |
644 | return rc; | 830 | return rc; |
645 | } | 831 | } |
646 | 832 | ||
647 | /* | 833 | /* |
648 | * Valide storage server of current device. | 834 | * Valide storage server of current device. |
649 | */ | 835 | */ |
650 | static int | 836 | static int dasd_eckd_validate_server(struct dasd_device *device) |
651 | dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) | ||
652 | { | 837 | { |
653 | int rc; | 838 | int rc; |
839 | struct dasd_eckd_private *private; | ||
654 | 840 | ||
655 | /* Currently PAV is the only reason to 'validate' server on LPAR */ | 841 | /* Currently PAV is the only reason to 'validate' server on LPAR */ |
656 | if (dasd_nopav || MACHINE_IS_VM) | 842 | if (dasd_nopav || MACHINE_IS_VM) |
@@ -659,9 +845,11 @@ dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) | |||
659 | rc = dasd_eckd_psf_ssc(device); | 845 | rc = dasd_eckd_psf_ssc(device); |
660 | /* may be requested feature is not available on server, | 846 | /* may be requested feature is not available on server, |
661 | * therefore just report error and go ahead */ | 847 | * therefore just report error and go ahead */ |
848 | private = (struct dasd_eckd_private *) device->private; | ||
662 | DEV_MESSAGE(KERN_INFO, device, | 849 | DEV_MESSAGE(KERN_INFO, device, |
663 | "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", | 850 | "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", |
664 | uid->vendor, uid->serial, uid->ssid, rc); | 851 | private->uid.vendor, private->uid.serial, |
852 | private->uid.ssid, rc); | ||
665 | /* RE-Read Configuration Data */ | 853 | /* RE-Read Configuration Data */ |
666 | return dasd_eckd_read_conf(device); | 854 | return dasd_eckd_read_conf(device); |
667 | } | 855 | } |
@@ -674,9 +862,9 @@ static int | |||
674 | dasd_eckd_check_characteristics(struct dasd_device *device) | 862 | dasd_eckd_check_characteristics(struct dasd_device *device) |
675 | { | 863 | { |
676 | struct dasd_eckd_private *private; | 864 | struct dasd_eckd_private *private; |
677 | struct dasd_uid uid; | 865 | struct dasd_block *block; |
678 | void *rdc_data; | 866 | void *rdc_data; |
679 | int rc; | 867 | int is_known, rc; |
680 | 868 | ||
681 | private = (struct dasd_eckd_private *) device->private; | 869 | private = (struct dasd_eckd_private *) device->private; |
682 | if (private == NULL) { | 870 | if (private == NULL) { |
@@ -699,27 +887,54 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
699 | /* Read Configuration Data */ | 887 | /* Read Configuration Data */ |
700 | rc = dasd_eckd_read_conf(device); | 888 | rc = dasd_eckd_read_conf(device); |
701 | if (rc) | 889 | if (rc) |
702 | return rc; | 890 | goto out_err1; |
703 | 891 | ||
704 | /* Generate device unique id and register in devmap */ | 892 | /* Generate device unique id and register in devmap */ |
705 | rc = dasd_eckd_generate_uid(device, &uid); | 893 | rc = dasd_eckd_generate_uid(device, &private->uid); |
706 | if (rc) | 894 | if (rc) |
707 | return rc; | 895 | goto out_err1; |
708 | rc = dasd_set_uid(device->cdev, &uid); | 896 | dasd_set_uid(device->cdev, &private->uid); |
709 | if (rc == 1) /* new server found */ | 897 | |
710 | rc = dasd_eckd_validate_server(device, &uid); | 898 | if (private->uid.type == UA_BASE_DEVICE) { |
899 | block = dasd_alloc_block(); | ||
900 | if (IS_ERR(block)) { | ||
901 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
902 | "could not allocate dasd block structure"); | ||
903 | rc = PTR_ERR(block); | ||
904 | goto out_err1; | ||
905 | } | ||
906 | device->block = block; | ||
907 | block->base = device; | ||
908 | } | ||
909 | |||
910 | /* register lcu with alias handling, enable PAV if this is a new lcu */ | ||
911 | is_known = dasd_alias_make_device_known_to_lcu(device); | ||
912 | if (is_known < 0) { | ||
913 | rc = is_known; | ||
914 | goto out_err2; | ||
915 | } | ||
916 | if (!is_known) { | ||
917 | /* new lcu found */ | ||
918 | rc = dasd_eckd_validate_server(device); /* will switch pav on */ | ||
919 | if (rc) | ||
920 | goto out_err3; | ||
921 | } | ||
922 | |||
923 | /* Read Feature Codes */ | ||
924 | rc = dasd_eckd_read_features(device); | ||
711 | if (rc) | 925 | if (rc) |
712 | return rc; | 926 | goto out_err3; |
713 | 927 | ||
714 | /* Read Device Characteristics */ | 928 | /* Read Device Characteristics */ |
715 | rdc_data = (void *) &(private->rdc_data); | 929 | rdc_data = (void *) &(private->rdc_data); |
716 | memset(rdc_data, 0, sizeof(rdc_data)); | 930 | memset(rdc_data, 0, sizeof(rdc_data)); |
717 | rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); | 931 | rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); |
718 | if (rc) | 932 | if (rc) { |
719 | DEV_MESSAGE(KERN_WARNING, device, | 933 | DEV_MESSAGE(KERN_WARNING, device, |
720 | "Read device characteristics returned " | 934 | "Read device characteristics returned " |
721 | "rc=%d", rc); | 935 | "rc=%d", rc); |
722 | 936 | goto out_err3; | |
937 | } | ||
723 | DEV_MESSAGE(KERN_INFO, device, | 938 | DEV_MESSAGE(KERN_INFO, device, |
724 | "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", | 939 | "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", |
725 | private->rdc_data.dev_type, | 940 | private->rdc_data.dev_type, |
@@ -729,9 +944,24 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
729 | private->rdc_data.no_cyl, | 944 | private->rdc_data.no_cyl, |
730 | private->rdc_data.trk_per_cyl, | 945 | private->rdc_data.trk_per_cyl, |
731 | private->rdc_data.sec_per_trk); | 946 | private->rdc_data.sec_per_trk); |
947 | return 0; | ||
948 | |||
949 | out_err3: | ||
950 | dasd_alias_disconnect_device_from_lcu(device); | ||
951 | out_err2: | ||
952 | dasd_free_block(device->block); | ||
953 | device->block = NULL; | ||
954 | out_err1: | ||
955 | kfree(device->private); | ||
956 | device->private = NULL; | ||
732 | return rc; | 957 | return rc; |
733 | } | 958 | } |
734 | 959 | ||
960 | static void dasd_eckd_uncheck_device(struct dasd_device *device) | ||
961 | { | ||
962 | dasd_alias_disconnect_device_from_lcu(device); | ||
963 | } | ||
964 | |||
735 | static struct dasd_ccw_req * | 965 | static struct dasd_ccw_req * |
736 | dasd_eckd_analysis_ccw(struct dasd_device *device) | 966 | dasd_eckd_analysis_ccw(struct dasd_device *device) |
737 | { | 967 | { |
@@ -755,7 +985,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) | |||
755 | /* Define extent for the first 3 tracks. */ | 985 | /* Define extent for the first 3 tracks. */ |
756 | define_extent(ccw++, cqr->data, 0, 2, | 986 | define_extent(ccw++, cqr->data, 0, 2, |
757 | DASD_ECKD_CCW_READ_COUNT, device); | 987 | DASD_ECKD_CCW_READ_COUNT, device); |
758 | LO_data = cqr->data + sizeof (struct DE_eckd_data); | 988 | LO_data = cqr->data + sizeof(struct DE_eckd_data); |
759 | /* Locate record for the first 4 records on track 0. */ | 989 | /* Locate record for the first 4 records on track 0. */ |
760 | ccw[-1].flags |= CCW_FLAG_CC; | 990 | ccw[-1].flags |= CCW_FLAG_CC; |
761 | locate_record(ccw++, LO_data++, 0, 0, 4, | 991 | locate_record(ccw++, LO_data++, 0, 0, 4, |
@@ -783,7 +1013,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) | |||
783 | ccw->count = 8; | 1013 | ccw->count = 8; |
784 | ccw->cda = (__u32)(addr_t) count_data; | 1014 | ccw->cda = (__u32)(addr_t) count_data; |
785 | 1015 | ||
786 | cqr->device = device; | 1016 | cqr->block = NULL; |
1017 | cqr->startdev = device; | ||
1018 | cqr->memdev = device; | ||
787 | cqr->retries = 0; | 1019 | cqr->retries = 0; |
788 | cqr->buildclk = get_clock(); | 1020 | cqr->buildclk = get_clock(); |
789 | cqr->status = DASD_CQR_FILLED; | 1021 | cqr->status = DASD_CQR_FILLED; |
@@ -803,7 +1035,7 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) | |||
803 | struct dasd_eckd_private *private; | 1035 | struct dasd_eckd_private *private; |
804 | struct dasd_device *device; | 1036 | struct dasd_device *device; |
805 | 1037 | ||
806 | device = init_cqr->device; | 1038 | device = init_cqr->startdev; |
807 | private = (struct dasd_eckd_private *) device->private; | 1039 | private = (struct dasd_eckd_private *) device->private; |
808 | private->init_cqr_status = init_cqr->status; | 1040 | private->init_cqr_status = init_cqr->status; |
809 | dasd_sfree_request(init_cqr, device); | 1041 | dasd_sfree_request(init_cqr, device); |
@@ -811,13 +1043,13 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) | |||
811 | } | 1043 | } |
812 | 1044 | ||
813 | static int | 1045 | static int |
814 | dasd_eckd_start_analysis(struct dasd_device *device) | 1046 | dasd_eckd_start_analysis(struct dasd_block *block) |
815 | { | 1047 | { |
816 | struct dasd_eckd_private *private; | 1048 | struct dasd_eckd_private *private; |
817 | struct dasd_ccw_req *init_cqr; | 1049 | struct dasd_ccw_req *init_cqr; |
818 | 1050 | ||
819 | private = (struct dasd_eckd_private *) device->private; | 1051 | private = (struct dasd_eckd_private *) block->base->private; |
820 | init_cqr = dasd_eckd_analysis_ccw(device); | 1052 | init_cqr = dasd_eckd_analysis_ccw(block->base); |
821 | if (IS_ERR(init_cqr)) | 1053 | if (IS_ERR(init_cqr)) |
822 | return PTR_ERR(init_cqr); | 1054 | return PTR_ERR(init_cqr); |
823 | init_cqr->callback = dasd_eckd_analysis_callback; | 1055 | init_cqr->callback = dasd_eckd_analysis_callback; |
@@ -828,13 +1060,15 @@ dasd_eckd_start_analysis(struct dasd_device *device) | |||
828 | } | 1060 | } |
829 | 1061 | ||
830 | static int | 1062 | static int |
831 | dasd_eckd_end_analysis(struct dasd_device *device) | 1063 | dasd_eckd_end_analysis(struct dasd_block *block) |
832 | { | 1064 | { |
1065 | struct dasd_device *device; | ||
833 | struct dasd_eckd_private *private; | 1066 | struct dasd_eckd_private *private; |
834 | struct eckd_count *count_area; | 1067 | struct eckd_count *count_area; |
835 | unsigned int sb, blk_per_trk; | 1068 | unsigned int sb, blk_per_trk; |
836 | int status, i; | 1069 | int status, i; |
837 | 1070 | ||
1071 | device = block->base; | ||
838 | private = (struct dasd_eckd_private *) device->private; | 1072 | private = (struct dasd_eckd_private *) device->private; |
839 | status = private->init_cqr_status; | 1073 | status = private->init_cqr_status; |
840 | private->init_cqr_status = -1; | 1074 | private->init_cqr_status = -1; |
@@ -846,7 +1080,7 @@ dasd_eckd_end_analysis(struct dasd_device *device) | |||
846 | 1080 | ||
847 | private->uses_cdl = 1; | 1081 | private->uses_cdl = 1; |
848 | /* Calculate number of blocks/records per track. */ | 1082 | /* Calculate number of blocks/records per track. */ |
849 | blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); | 1083 | blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); |
850 | /* Check Track 0 for Compatible Disk Layout */ | 1084 | /* Check Track 0 for Compatible Disk Layout */ |
851 | count_area = NULL; | 1085 | count_area = NULL; |
852 | for (i = 0; i < 3; i++) { | 1086 | for (i = 0; i < 3; i++) { |
@@ -876,56 +1110,65 @@ dasd_eckd_end_analysis(struct dasd_device *device) | |||
876 | if (count_area != NULL && count_area->kl == 0) { | 1110 | if (count_area != NULL && count_area->kl == 0) { |
877 | /* we found notthing violating our disk layout */ | 1111 | /* we found notthing violating our disk layout */ |
878 | if (dasd_check_blocksize(count_area->dl) == 0) | 1112 | if (dasd_check_blocksize(count_area->dl) == 0) |
879 | device->bp_block = count_area->dl; | 1113 | block->bp_block = count_area->dl; |
880 | } | 1114 | } |
881 | if (device->bp_block == 0) { | 1115 | if (block->bp_block == 0) { |
882 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1116 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
883 | "Volume has incompatible disk layout"); | 1117 | "Volume has incompatible disk layout"); |
884 | return -EMEDIUMTYPE; | 1118 | return -EMEDIUMTYPE; |
885 | } | 1119 | } |
886 | device->s2b_shift = 0; /* bits to shift 512 to get a block */ | 1120 | block->s2b_shift = 0; /* bits to shift 512 to get a block */ |
887 | for (sb = 512; sb < device->bp_block; sb = sb << 1) | 1121 | for (sb = 512; sb < block->bp_block; sb = sb << 1) |
888 | device->s2b_shift++; | 1122 | block->s2b_shift++; |
889 | 1123 | ||
890 | blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); | 1124 | blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); |
891 | device->blocks = (private->rdc_data.no_cyl * | 1125 | block->blocks = (private->rdc_data.no_cyl * |
892 | private->rdc_data.trk_per_cyl * | 1126 | private->rdc_data.trk_per_cyl * |
893 | blk_per_trk); | 1127 | blk_per_trk); |
894 | 1128 | ||
895 | DEV_MESSAGE(KERN_INFO, device, | 1129 | DEV_MESSAGE(KERN_INFO, device, |
896 | "(%dkB blks): %dkB at %dkB/trk %s", | 1130 | "(%dkB blks): %dkB at %dkB/trk %s", |
897 | (device->bp_block >> 10), | 1131 | (block->bp_block >> 10), |
898 | ((private->rdc_data.no_cyl * | 1132 | ((private->rdc_data.no_cyl * |
899 | private->rdc_data.trk_per_cyl * | 1133 | private->rdc_data.trk_per_cyl * |
900 | blk_per_trk * (device->bp_block >> 9)) >> 1), | 1134 | blk_per_trk * (block->bp_block >> 9)) >> 1), |
901 | ((blk_per_trk * device->bp_block) >> 10), | 1135 | ((blk_per_trk * block->bp_block) >> 10), |
902 | private->uses_cdl ? | 1136 | private->uses_cdl ? |
903 | "compatible disk layout" : "linux disk layout"); | 1137 | "compatible disk layout" : "linux disk layout"); |
904 | 1138 | ||
905 | return 0; | 1139 | return 0; |
906 | } | 1140 | } |
907 | 1141 | ||
908 | static int | 1142 | static int dasd_eckd_do_analysis(struct dasd_block *block) |
909 | dasd_eckd_do_analysis(struct dasd_device *device) | ||
910 | { | 1143 | { |
911 | struct dasd_eckd_private *private; | 1144 | struct dasd_eckd_private *private; |
912 | 1145 | ||
913 | private = (struct dasd_eckd_private *) device->private; | 1146 | private = (struct dasd_eckd_private *) block->base->private; |
914 | if (private->init_cqr_status < 0) | 1147 | if (private->init_cqr_status < 0) |
915 | return dasd_eckd_start_analysis(device); | 1148 | return dasd_eckd_start_analysis(block); |
916 | else | 1149 | else |
917 | return dasd_eckd_end_analysis(device); | 1150 | return dasd_eckd_end_analysis(block); |
918 | } | 1151 | } |
919 | 1152 | ||
1153 | static int dasd_eckd_ready_to_online(struct dasd_device *device) | ||
1154 | { | ||
1155 | return dasd_alias_add_device(device); | ||
1156 | }; | ||
1157 | |||
1158 | static int dasd_eckd_online_to_ready(struct dasd_device *device) | ||
1159 | { | ||
1160 | return dasd_alias_remove_device(device); | ||
1161 | }; | ||
1162 | |||
920 | static int | 1163 | static int |
921 | dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) | 1164 | dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) |
922 | { | 1165 | { |
923 | struct dasd_eckd_private *private; | 1166 | struct dasd_eckd_private *private; |
924 | 1167 | ||
925 | private = (struct dasd_eckd_private *) device->private; | 1168 | private = (struct dasd_eckd_private *) block->base->private; |
926 | if (dasd_check_blocksize(device->bp_block) == 0) { | 1169 | if (dasd_check_blocksize(block->bp_block) == 0) { |
927 | geo->sectors = recs_per_track(&private->rdc_data, | 1170 | geo->sectors = recs_per_track(&private->rdc_data, |
928 | 0, device->bp_block); | 1171 | 0, block->bp_block); |
929 | } | 1172 | } |
930 | geo->cylinders = private->rdc_data.no_cyl; | 1173 | geo->cylinders = private->rdc_data.no_cyl; |
931 | geo->heads = private->rdc_data.trk_per_cyl; | 1174 | geo->heads = private->rdc_data.trk_per_cyl; |
@@ -1037,7 +1280,7 @@ dasd_eckd_format_device(struct dasd_device * device, | |||
1037 | locate_record(ccw++, (struct LO_eckd_data *) data, | 1280 | locate_record(ccw++, (struct LO_eckd_data *) data, |
1038 | fdata->start_unit, 0, rpt + 1, | 1281 | fdata->start_unit, 0, rpt + 1, |
1039 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, | 1282 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, |
1040 | device->bp_block); | 1283 | device->block->bp_block); |
1041 | data += sizeof(struct LO_eckd_data); | 1284 | data += sizeof(struct LO_eckd_data); |
1042 | break; | 1285 | break; |
1043 | case 0x04: /* Invalidate track. */ | 1286 | case 0x04: /* Invalidate track. */ |
@@ -1110,43 +1353,28 @@ dasd_eckd_format_device(struct dasd_device * device, | |||
1110 | ccw++; | 1353 | ccw++; |
1111 | } | 1354 | } |
1112 | } | 1355 | } |
1113 | fcp->device = device; | 1356 | fcp->startdev = device; |
1114 | fcp->retries = 2; /* set retry counter to enable ERP */ | 1357 | fcp->memdev = device; |
1358 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); | ||
1359 | fcp->retries = 5; /* set retry counter to enable default ERP */ | ||
1115 | fcp->buildclk = get_clock(); | 1360 | fcp->buildclk = get_clock(); |
1116 | fcp->status = DASD_CQR_FILLED; | 1361 | fcp->status = DASD_CQR_FILLED; |
1117 | return fcp; | 1362 | return fcp; |
1118 | } | 1363 | } |
1119 | 1364 | ||
1120 | static dasd_era_t | 1365 | static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) |
1121 | dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb) | ||
1122 | { | 1366 | { |
1123 | struct dasd_device *device = (struct dasd_device *) cqr->device; | 1367 | cqr->status = DASD_CQR_FILLED; |
1124 | struct ccw_device *cdev = device->cdev; | 1368 | if (cqr->block && (cqr->startdev != cqr->block->base)) { |
1125 | 1369 | dasd_eckd_reset_ccw_to_base_io(cqr); | |
1126 | if (irb->scsw.cstat == 0x00 && | 1370 | cqr->startdev = cqr->block->base; |
1127 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
1128 | return dasd_era_none; | ||
1129 | |||
1130 | switch (cdev->id.cu_type) { | ||
1131 | case 0x3990: | ||
1132 | case 0x2105: | ||
1133 | case 0x2107: | ||
1134 | case 0x1750: | ||
1135 | return dasd_3990_erp_examine(cqr, irb); | ||
1136 | case 0x9343: | ||
1137 | return dasd_9343_erp_examine(cqr, irb); | ||
1138 | case 0x3880: | ||
1139 | default: | ||
1140 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
1141 | "default (unknown CU type) - RECOVERABLE return"); | ||
1142 | return dasd_era_recover; | ||
1143 | } | 1371 | } |
1144 | } | 1372 | }; |
1145 | 1373 | ||
1146 | static dasd_erp_fn_t | 1374 | static dasd_erp_fn_t |
1147 | dasd_eckd_erp_action(struct dasd_ccw_req * cqr) | 1375 | dasd_eckd_erp_action(struct dasd_ccw_req * cqr) |
1148 | { | 1376 | { |
1149 | struct dasd_device *device = (struct dasd_device *) cqr->device; | 1377 | struct dasd_device *device = (struct dasd_device *) cqr->startdev; |
1150 | struct ccw_device *cdev = device->cdev; | 1378 | struct ccw_device *cdev = device->cdev; |
1151 | 1379 | ||
1152 | switch (cdev->id.cu_type) { | 1380 | switch (cdev->id.cu_type) { |
@@ -1168,8 +1396,37 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) | |||
1168 | return dasd_default_erp_postaction; | 1396 | return dasd_default_erp_postaction; |
1169 | } | 1397 | } |
1170 | 1398 | ||
1171 | static struct dasd_ccw_req * | 1399 | |
1172 | dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | 1400 | static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, |
1401 | struct irb *irb) | ||
1402 | { | ||
1403 | char mask; | ||
1404 | |||
1405 | /* first of all check for state change pending interrupt */ | ||
1406 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | ||
1407 | if ((irb->scsw.dstat & mask) == mask) { | ||
1408 | dasd_generic_handle_state_change(device); | ||
1409 | return; | ||
1410 | } | ||
1411 | |||
1412 | /* summary unit check */ | ||
1413 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) { | ||
1414 | dasd_alias_handle_summary_unit_check(device, irb); | ||
1415 | return; | ||
1416 | } | ||
1417 | |||
1418 | /* just report other unsolicited interrupts */ | ||
1419 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | ||
1420 | "unsolicited interrupt received"); | ||
1421 | device->discipline->dump_sense(device, NULL, irb); | ||
1422 | dasd_schedule_device_bh(device); | ||
1423 | |||
1424 | return; | ||
1425 | }; | ||
1426 | |||
1427 | static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, | ||
1428 | struct dasd_block *block, | ||
1429 | struct request *req) | ||
1173 | { | 1430 | { |
1174 | struct dasd_eckd_private *private; | 1431 | struct dasd_eckd_private *private; |
1175 | unsigned long *idaws; | 1432 | unsigned long *idaws; |
@@ -1185,8 +1442,11 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1185 | sector_t first_trk, last_trk; | 1442 | sector_t first_trk, last_trk; |
1186 | unsigned int first_offs, last_offs; | 1443 | unsigned int first_offs, last_offs; |
1187 | unsigned char cmd, rcmd; | 1444 | unsigned char cmd, rcmd; |
1445 | int use_prefix; | ||
1446 | struct dasd_device *basedev; | ||
1188 | 1447 | ||
1189 | private = (struct dasd_eckd_private *) device->private; | 1448 | basedev = block->base; |
1449 | private = (struct dasd_eckd_private *) basedev->private; | ||
1190 | if (rq_data_dir(req) == READ) | 1450 | if (rq_data_dir(req) == READ) |
1191 | cmd = DASD_ECKD_CCW_READ_MT; | 1451 | cmd = DASD_ECKD_CCW_READ_MT; |
1192 | else if (rq_data_dir(req) == WRITE) | 1452 | else if (rq_data_dir(req) == WRITE) |
@@ -1194,13 +1454,13 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1194 | else | 1454 | else |
1195 | return ERR_PTR(-EINVAL); | 1455 | return ERR_PTR(-EINVAL); |
1196 | /* Calculate number of blocks/records per track. */ | 1456 | /* Calculate number of blocks/records per track. */ |
1197 | blksize = device->bp_block; | 1457 | blksize = block->bp_block; |
1198 | blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); | 1458 | blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); |
1199 | /* Calculate record id of first and last block. */ | 1459 | /* Calculate record id of first and last block. */ |
1200 | first_rec = first_trk = req->sector >> device->s2b_shift; | 1460 | first_rec = first_trk = req->sector >> block->s2b_shift; |
1201 | first_offs = sector_div(first_trk, blk_per_trk); | 1461 | first_offs = sector_div(first_trk, blk_per_trk); |
1202 | last_rec = last_trk = | 1462 | last_rec = last_trk = |
1203 | (req->sector + req->nr_sectors - 1) >> device->s2b_shift; | 1463 | (req->sector + req->nr_sectors - 1) >> block->s2b_shift; |
1204 | last_offs = sector_div(last_trk, blk_per_trk); | 1464 | last_offs = sector_div(last_trk, blk_per_trk); |
1205 | /* Check struct bio and count the number of blocks for the request. */ | 1465 | /* Check struct bio and count the number of blocks for the request. */ |
1206 | count = 0; | 1466 | count = 0; |
@@ -1209,20 +1469,33 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1209 | if (bv->bv_len & (blksize - 1)) | 1469 | if (bv->bv_len & (blksize - 1)) |
1210 | /* Eckd can only do full blocks. */ | 1470 | /* Eckd can only do full blocks. */ |
1211 | return ERR_PTR(-EINVAL); | 1471 | return ERR_PTR(-EINVAL); |
1212 | count += bv->bv_len >> (device->s2b_shift + 9); | 1472 | count += bv->bv_len >> (block->s2b_shift + 9); |
1213 | #if defined(CONFIG_64BIT) | 1473 | #if defined(CONFIG_64BIT) |
1214 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) | 1474 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) |
1215 | cidaw += bv->bv_len >> (device->s2b_shift + 9); | 1475 | cidaw += bv->bv_len >> (block->s2b_shift + 9); |
1216 | #endif | 1476 | #endif |
1217 | } | 1477 | } |
1218 | /* Paranoia. */ | 1478 | /* Paranoia. */ |
1219 | if (count != last_rec - first_rec + 1) | 1479 | if (count != last_rec - first_rec + 1) |
1220 | return ERR_PTR(-EINVAL); | 1480 | return ERR_PTR(-EINVAL); |
1221 | /* 1x define extent + 1x locate record + number of blocks */ | 1481 | |
1222 | cplength = 2 + count; | 1482 | /* use the prefix command if available */ |
1223 | /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ | 1483 | use_prefix = private->features.feature[8] & 0x01; |
1224 | datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + | 1484 | if (use_prefix) { |
1225 | cidaw * sizeof(unsigned long); | 1485 | /* 1x prefix + number of blocks */ |
1486 | cplength = 2 + count; | ||
1487 | /* 1x prefix + cidaws*sizeof(long) */ | ||
1488 | datasize = sizeof(struct PFX_eckd_data) + | ||
1489 | sizeof(struct LO_eckd_data) + | ||
1490 | cidaw * sizeof(unsigned long); | ||
1491 | } else { | ||
1492 | /* 1x define extent + 1x locate record + number of blocks */ | ||
1493 | cplength = 2 + count; | ||
1494 | /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ | ||
1495 | datasize = sizeof(struct DE_eckd_data) + | ||
1496 | sizeof(struct LO_eckd_data) + | ||
1497 | cidaw * sizeof(unsigned long); | ||
1498 | } | ||
1226 | /* Find out the number of additional locate record ccws for cdl. */ | 1499 | /* Find out the number of additional locate record ccws for cdl. */ |
1227 | if (private->uses_cdl && first_rec < 2*blk_per_trk) { | 1500 | if (private->uses_cdl && first_rec < 2*blk_per_trk) { |
1228 | if (last_rec >= 2*blk_per_trk) | 1501 | if (last_rec >= 2*blk_per_trk) |
@@ -1232,26 +1505,42 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1232 | } | 1505 | } |
1233 | /* Allocate the ccw request. */ | 1506 | /* Allocate the ccw request. */ |
1234 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1507 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1235 | cplength, datasize, device); | 1508 | cplength, datasize, startdev); |
1236 | if (IS_ERR(cqr)) | 1509 | if (IS_ERR(cqr)) |
1237 | return cqr; | 1510 | return cqr; |
1238 | ccw = cqr->cpaddr; | 1511 | ccw = cqr->cpaddr; |
1239 | /* First ccw is define extent. */ | 1512 | /* First ccw is define extent or prefix. */ |
1240 | if (define_extent(ccw++, cqr->data, first_trk, | 1513 | if (use_prefix) { |
1241 | last_trk, cmd, device) == -EAGAIN) { | 1514 | if (prefix(ccw++, cqr->data, first_trk, |
1242 | /* Clock not in sync and XRC is enabled. Try again later. */ | 1515 | last_trk, cmd, basedev, startdev) == -EAGAIN) { |
1243 | dasd_sfree_request(cqr, device); | 1516 | /* Clock not in sync and XRC is enabled. |
1244 | return ERR_PTR(-EAGAIN); | 1517 | * Try again later. |
1518 | */ | ||
1519 | dasd_sfree_request(cqr, startdev); | ||
1520 | return ERR_PTR(-EAGAIN); | ||
1521 | } | ||
1522 | idaws = (unsigned long *) (cqr->data + | ||
1523 | sizeof(struct PFX_eckd_data)); | ||
1524 | } else { | ||
1525 | if (define_extent(ccw++, cqr->data, first_trk, | ||
1526 | last_trk, cmd, startdev) == -EAGAIN) { | ||
1527 | /* Clock not in sync and XRC is enabled. | ||
1528 | * Try again later. | ||
1529 | */ | ||
1530 | dasd_sfree_request(cqr, startdev); | ||
1531 | return ERR_PTR(-EAGAIN); | ||
1532 | } | ||
1533 | idaws = (unsigned long *) (cqr->data + | ||
1534 | sizeof(struct DE_eckd_data)); | ||
1245 | } | 1535 | } |
1246 | /* Build locate_record+read/write/ccws. */ | 1536 | /* Build locate_record+read/write/ccws. */ |
1247 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); | ||
1248 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); | 1537 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); |
1249 | recid = first_rec; | 1538 | recid = first_rec; |
1250 | if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { | 1539 | if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { |
1251 | /* Only standard blocks so there is just one locate record. */ | 1540 | /* Only standard blocks so there is just one locate record. */ |
1252 | ccw[-1].flags |= CCW_FLAG_CC; | 1541 | ccw[-1].flags |= CCW_FLAG_CC; |
1253 | locate_record(ccw++, LO_data++, first_trk, first_offs + 1, | 1542 | locate_record(ccw++, LO_data++, first_trk, first_offs + 1, |
1254 | last_rec - recid + 1, cmd, device, blksize); | 1543 | last_rec - recid + 1, cmd, basedev, blksize); |
1255 | } | 1544 | } |
1256 | rq_for_each_segment(bv, req, iter) { | 1545 | rq_for_each_segment(bv, req, iter) { |
1257 | dst = page_address(bv->bv_page) + bv->bv_offset; | 1546 | dst = page_address(bv->bv_page) + bv->bv_offset; |
@@ -1281,7 +1570,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1281 | ccw[-1].flags |= CCW_FLAG_CC; | 1570 | ccw[-1].flags |= CCW_FLAG_CC; |
1282 | locate_record(ccw++, LO_data++, | 1571 | locate_record(ccw++, LO_data++, |
1283 | trkid, recoffs + 1, | 1572 | trkid, recoffs + 1, |
1284 | 1, rcmd, device, count); | 1573 | 1, rcmd, basedev, count); |
1285 | } | 1574 | } |
1286 | /* Locate record for standard blocks ? */ | 1575 | /* Locate record for standard blocks ? */ |
1287 | if (private->uses_cdl && recid == 2*blk_per_trk) { | 1576 | if (private->uses_cdl && recid == 2*blk_per_trk) { |
@@ -1289,7 +1578,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1289 | locate_record(ccw++, LO_data++, | 1578 | locate_record(ccw++, LO_data++, |
1290 | trkid, recoffs + 1, | 1579 | trkid, recoffs + 1, |
1291 | last_rec - recid + 1, | 1580 | last_rec - recid + 1, |
1292 | cmd, device, count); | 1581 | cmd, basedev, count); |
1293 | } | 1582 | } |
1294 | /* Read/write ccw. */ | 1583 | /* Read/write ccw. */ |
1295 | ccw[-1].flags |= CCW_FLAG_CC; | 1584 | ccw[-1].flags |= CCW_FLAG_CC; |
@@ -1310,7 +1599,9 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1310 | } | 1599 | } |
1311 | if (req->cmd_flags & REQ_FAILFAST) | 1600 | if (req->cmd_flags & REQ_FAILFAST) |
1312 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1601 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1313 | cqr->device = device; | 1602 | cqr->startdev = startdev; |
1603 | cqr->memdev = startdev; | ||
1604 | cqr->block = block; | ||
1314 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 1605 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ |
1315 | cqr->lpm = private->path_data.ppm; | 1606 | cqr->lpm = private->path_data.ppm; |
1316 | cqr->retries = 256; | 1607 | cqr->retries = 256; |
@@ -1333,10 +1624,10 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
1333 | 1624 | ||
1334 | if (!dasd_page_cache) | 1625 | if (!dasd_page_cache) |
1335 | goto out; | 1626 | goto out; |
1336 | private = (struct dasd_eckd_private *) cqr->device->private; | 1627 | private = (struct dasd_eckd_private *) cqr->block->base->private; |
1337 | blksize = cqr->device->bp_block; | 1628 | blksize = cqr->block->bp_block; |
1338 | blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); | 1629 | blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); |
1339 | recid = req->sector >> cqr->device->s2b_shift; | 1630 | recid = req->sector >> cqr->block->s2b_shift; |
1340 | ccw = cqr->cpaddr; | 1631 | ccw = cqr->cpaddr; |
1341 | /* Skip over define extent & locate record. */ | 1632 | /* Skip over define extent & locate record. */ |
1342 | ccw++; | 1633 | ccw++; |
@@ -1367,10 +1658,71 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
1367 | } | 1658 | } |
1368 | out: | 1659 | out: |
1369 | status = cqr->status == DASD_CQR_DONE; | 1660 | status = cqr->status == DASD_CQR_DONE; |
1370 | dasd_sfree_request(cqr, cqr->device); | 1661 | dasd_sfree_request(cqr, cqr->memdev); |
1371 | return status; | 1662 | return status; |
1372 | } | 1663 | } |
1373 | 1664 | ||
1665 | /* | ||
1666 | * Modify ccw chain in cqr so it can be started on a base device. | ||
1667 | * | ||
1668 | * Note that this is not enough to restart the cqr! | ||
1669 | * Either reset cqr->startdev as well (summary unit check handling) | ||
1670 | * or restart via separate cqr (as in ERP handling). | ||
1671 | */ | ||
1672 | void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) | ||
1673 | { | ||
1674 | struct ccw1 *ccw; | ||
1675 | struct PFX_eckd_data *pfxdata; | ||
1676 | |||
1677 | ccw = cqr->cpaddr; | ||
1678 | pfxdata = cqr->data; | ||
1679 | |||
1680 | if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { | ||
1681 | pfxdata->validity.verify_base = 0; | ||
1682 | pfxdata->validity.hyper_pav = 0; | ||
1683 | } | ||
1684 | } | ||
1685 | |||
1686 | #define DASD_ECKD_CHANQ_MAX_SIZE 4 | ||
1687 | |||
1688 | static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, | ||
1689 | struct dasd_block *block, | ||
1690 | struct request *req) | ||
1691 | { | ||
1692 | struct dasd_eckd_private *private; | ||
1693 | struct dasd_device *startdev; | ||
1694 | unsigned long flags; | ||
1695 | struct dasd_ccw_req *cqr; | ||
1696 | |||
1697 | startdev = dasd_alias_get_start_dev(base); | ||
1698 | if (!startdev) | ||
1699 | startdev = base; | ||
1700 | private = (struct dasd_eckd_private *) startdev->private; | ||
1701 | if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) | ||
1702 | return ERR_PTR(-EBUSY); | ||
1703 | |||
1704 | spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); | ||
1705 | private->count++; | ||
1706 | cqr = dasd_eckd_build_cp(startdev, block, req); | ||
1707 | if (IS_ERR(cqr)) | ||
1708 | private->count--; | ||
1709 | spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); | ||
1710 | return cqr; | ||
1711 | } | ||
1712 | |||
1713 | static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, | ||
1714 | struct request *req) | ||
1715 | { | ||
1716 | struct dasd_eckd_private *private; | ||
1717 | unsigned long flags; | ||
1718 | |||
1719 | spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); | ||
1720 | private = (struct dasd_eckd_private *) cqr->memdev->private; | ||
1721 | private->count--; | ||
1722 | spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); | ||
1723 | return dasd_eckd_free_cp(cqr, req); | ||
1724 | } | ||
1725 | |||
1374 | static int | 1726 | static int |
1375 | dasd_eckd_fill_info(struct dasd_device * device, | 1727 | dasd_eckd_fill_info(struct dasd_device * device, |
1376 | struct dasd_information2_t * info) | 1728 | struct dasd_information2_t * info) |
@@ -1384,9 +1736,9 @@ dasd_eckd_fill_info(struct dasd_device * device, | |||
1384 | info->characteristics_size = sizeof(struct dasd_eckd_characteristics); | 1736 | info->characteristics_size = sizeof(struct dasd_eckd_characteristics); |
1385 | memcpy(info->characteristics, &private->rdc_data, | 1737 | memcpy(info->characteristics, &private->rdc_data, |
1386 | sizeof(struct dasd_eckd_characteristics)); | 1738 | sizeof(struct dasd_eckd_characteristics)); |
1387 | info->confdata_size = sizeof (struct dasd_eckd_confdata); | 1739 | info->confdata_size = sizeof(struct dasd_eckd_confdata); |
1388 | memcpy(info->configuration_data, &private->conf_data, | 1740 | memcpy(info->configuration_data, &private->conf_data, |
1389 | sizeof (struct dasd_eckd_confdata)); | 1741 | sizeof(struct dasd_eckd_confdata)); |
1390 | return 0; | 1742 | return 0; |
1391 | } | 1743 | } |
1392 | 1744 | ||
@@ -1419,7 +1771,8 @@ dasd_eckd_release(struct dasd_device *device) | |||
1419 | cqr->cpaddr->flags |= CCW_FLAG_SLI; | 1771 | cqr->cpaddr->flags |= CCW_FLAG_SLI; |
1420 | cqr->cpaddr->count = 32; | 1772 | cqr->cpaddr->count = 32; |
1421 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | 1773 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; |
1422 | cqr->device = device; | 1774 | cqr->startdev = device; |
1775 | cqr->memdev = device; | ||
1423 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1776 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1424 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1777 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1425 | cqr->retries = 2; /* set retry counter to enable basic ERP */ | 1778 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
@@ -1429,7 +1782,7 @@ dasd_eckd_release(struct dasd_device *device) | |||
1429 | 1782 | ||
1430 | rc = dasd_sleep_on_immediatly(cqr); | 1783 | rc = dasd_sleep_on_immediatly(cqr); |
1431 | 1784 | ||
1432 | dasd_sfree_request(cqr, cqr->device); | 1785 | dasd_sfree_request(cqr, cqr->memdev); |
1433 | return rc; | 1786 | return rc; |
1434 | } | 1787 | } |
1435 | 1788 | ||
@@ -1459,7 +1812,8 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
1459 | cqr->cpaddr->flags |= CCW_FLAG_SLI; | 1812 | cqr->cpaddr->flags |= CCW_FLAG_SLI; |
1460 | cqr->cpaddr->count = 32; | 1813 | cqr->cpaddr->count = 32; |
1461 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | 1814 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; |
1462 | cqr->device = device; | 1815 | cqr->startdev = device; |
1816 | cqr->memdev = device; | ||
1463 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1817 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1464 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1818 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1465 | cqr->retries = 2; /* set retry counter to enable basic ERP */ | 1819 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
@@ -1469,7 +1823,7 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
1469 | 1823 | ||
1470 | rc = dasd_sleep_on_immediatly(cqr); | 1824 | rc = dasd_sleep_on_immediatly(cqr); |
1471 | 1825 | ||
1472 | dasd_sfree_request(cqr, cqr->device); | 1826 | dasd_sfree_request(cqr, cqr->memdev); |
1473 | return rc; | 1827 | return rc; |
1474 | } | 1828 | } |
1475 | 1829 | ||
@@ -1498,7 +1852,8 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
1498 | cqr->cpaddr->flags |= CCW_FLAG_SLI; | 1852 | cqr->cpaddr->flags |= CCW_FLAG_SLI; |
1499 | cqr->cpaddr->count = 32; | 1853 | cqr->cpaddr->count = 32; |
1500 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | 1854 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; |
1501 | cqr->device = device; | 1855 | cqr->startdev = device; |
1856 | cqr->memdev = device; | ||
1502 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1857 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1503 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1858 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1504 | cqr->retries = 2; /* set retry counter to enable basic ERP */ | 1859 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
@@ -1508,7 +1863,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
1508 | 1863 | ||
1509 | rc = dasd_sleep_on_immediatly(cqr); | 1864 | rc = dasd_sleep_on_immediatly(cqr); |
1510 | 1865 | ||
1511 | dasd_sfree_request(cqr, cqr->device); | 1866 | dasd_sfree_request(cqr, cqr->memdev); |
1512 | return rc; | 1867 | return rc; |
1513 | } | 1868 | } |
1514 | 1869 | ||
@@ -1526,52 +1881,52 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) | |||
1526 | 1881 | ||
1527 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, | 1882 | cqr = dasd_smalloc_request(dasd_eckd_discipline.name, |
1528 | 1 /* PSF */ + 1 /* RSSD */ , | 1883 | 1 /* PSF */ + 1 /* RSSD */ , |
1529 | (sizeof (struct dasd_psf_prssd_data) + | 1884 | (sizeof(struct dasd_psf_prssd_data) + |
1530 | sizeof (struct dasd_rssd_perf_stats_t)), | 1885 | sizeof(struct dasd_rssd_perf_stats_t)), |
1531 | device); | 1886 | device); |
1532 | if (IS_ERR(cqr)) { | 1887 | if (IS_ERR(cqr)) { |
1533 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1888 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
1534 | "Could not allocate initialization request"); | 1889 | "Could not allocate initialization request"); |
1535 | return PTR_ERR(cqr); | 1890 | return PTR_ERR(cqr); |
1536 | } | 1891 | } |
1537 | cqr->device = device; | 1892 | cqr->startdev = device; |
1893 | cqr->memdev = device; | ||
1538 | cqr->retries = 0; | 1894 | cqr->retries = 0; |
1539 | cqr->expires = 10 * HZ; | 1895 | cqr->expires = 10 * HZ; |
1540 | 1896 | ||
1541 | /* Prepare for Read Subsystem Data */ | 1897 | /* Prepare for Read Subsystem Data */ |
1542 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | 1898 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; |
1543 | memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data)); | 1899 | memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); |
1544 | prssdp->order = PSF_ORDER_PRSSD; | 1900 | prssdp->order = PSF_ORDER_PRSSD; |
1545 | prssdp->suborder = 0x01; /* Perfomance Statistics */ | 1901 | prssdp->suborder = 0x01; /* Performance Statistics */ |
1546 | prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ | 1902 | prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ |
1547 | 1903 | ||
1548 | ccw = cqr->cpaddr; | 1904 | ccw = cqr->cpaddr; |
1549 | ccw->cmd_code = DASD_ECKD_CCW_PSF; | 1905 | ccw->cmd_code = DASD_ECKD_CCW_PSF; |
1550 | ccw->count = sizeof (struct dasd_psf_prssd_data); | 1906 | ccw->count = sizeof(struct dasd_psf_prssd_data); |
1551 | ccw->flags |= CCW_FLAG_CC; | 1907 | ccw->flags |= CCW_FLAG_CC; |
1552 | ccw->cda = (__u32)(addr_t) prssdp; | 1908 | ccw->cda = (__u32)(addr_t) prssdp; |
1553 | 1909 | ||
1554 | /* Read Subsystem Data - Performance Statistics */ | 1910 | /* Read Subsystem Data - Performance Statistics */ |
1555 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); | 1911 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); |
1556 | memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t)); | 1912 | memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); |
1557 | 1913 | ||
1558 | ccw++; | 1914 | ccw++; |
1559 | ccw->cmd_code = DASD_ECKD_CCW_RSSD; | 1915 | ccw->cmd_code = DASD_ECKD_CCW_RSSD; |
1560 | ccw->count = sizeof (struct dasd_rssd_perf_stats_t); | 1916 | ccw->count = sizeof(struct dasd_rssd_perf_stats_t); |
1561 | ccw->cda = (__u32)(addr_t) stats; | 1917 | ccw->cda = (__u32)(addr_t) stats; |
1562 | 1918 | ||
1563 | cqr->buildclk = get_clock(); | 1919 | cqr->buildclk = get_clock(); |
1564 | cqr->status = DASD_CQR_FILLED; | 1920 | cqr->status = DASD_CQR_FILLED; |
1565 | rc = dasd_sleep_on(cqr); | 1921 | rc = dasd_sleep_on(cqr); |
1566 | if (rc == 0) { | 1922 | if (rc == 0) { |
1567 | /* Prepare for Read Subsystem Data */ | ||
1568 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; | 1923 | prssdp = (struct dasd_psf_prssd_data *) cqr->data; |
1569 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); | 1924 | stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); |
1570 | if (copy_to_user(argp, stats, | 1925 | if (copy_to_user(argp, stats, |
1571 | sizeof(struct dasd_rssd_perf_stats_t))) | 1926 | sizeof(struct dasd_rssd_perf_stats_t))) |
1572 | rc = -EFAULT; | 1927 | rc = -EFAULT; |
1573 | } | 1928 | } |
1574 | dasd_sfree_request(cqr, cqr->device); | 1929 | dasd_sfree_request(cqr, cqr->memdev); |
1575 | return rc; | 1930 | return rc; |
1576 | } | 1931 | } |
1577 | 1932 | ||
@@ -1594,7 +1949,7 @@ dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) | |||
1594 | 1949 | ||
1595 | rc = 0; | 1950 | rc = 0; |
1596 | if (copy_to_user(argp, (long *) &attrib, | 1951 | if (copy_to_user(argp, (long *) &attrib, |
1597 | sizeof (struct attrib_data_t))) | 1952 | sizeof(struct attrib_data_t))) |
1598 | rc = -EFAULT; | 1953 | rc = -EFAULT; |
1599 | 1954 | ||
1600 | return rc; | 1955 | return rc; |
@@ -1627,8 +1982,10 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) | |||
1627 | } | 1982 | } |
1628 | 1983 | ||
1629 | static int | 1984 | static int |
1630 | dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) | 1985 | dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) |
1631 | { | 1986 | { |
1987 | struct dasd_device *device = block->base; | ||
1988 | |||
1632 | switch (cmd) { | 1989 | switch (cmd) { |
1633 | case BIODASDGATTR: | 1990 | case BIODASDGATTR: |
1634 | return dasd_eckd_get_attrib(device, argp); | 1991 | return dasd_eckd_get_attrib(device, argp); |
@@ -1685,9 +2042,8 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) | |||
1685 | * Print sense data and related channel program. | 2042 | * Print sense data and related channel program. |
1686 | * Parts are printed because printk buffer is only 1024 bytes. | 2043 | * Parts are printed because printk buffer is only 1024 bytes. |
1687 | */ | 2044 | */ |
1688 | static void | 2045 | static void dasd_eckd_dump_sense(struct dasd_device *device, |
1689 | dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | 2046 | struct dasd_ccw_req *req, struct irb *irb) |
1690 | struct irb *irb) | ||
1691 | { | 2047 | { |
1692 | char *page; | 2048 | char *page; |
1693 | struct ccw1 *first, *last, *fail, *from, *to; | 2049 | struct ccw1 *first, *last, *fail, *from, *to; |
@@ -1743,37 +2099,40 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
1743 | } | 2099 | } |
1744 | printk("%s", page); | 2100 | printk("%s", page); |
1745 | 2101 | ||
1746 | /* dump the Channel Program (max 140 Bytes per line) */ | 2102 | if (req) { |
1747 | /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ | 2103 | /* req == NULL for unsolicited interrupts */ |
1748 | first = req->cpaddr; | 2104 | /* dump the Channel Program (max 140 Bytes per line) */ |
1749 | for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); | 2105 | /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ |
1750 | to = min(first + 6, last); | 2106 | first = req->cpaddr; |
1751 | len = sprintf(page, KERN_ERR PRINTK_HEADER | 2107 | for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); |
1752 | " Related CP in req: %p\n", req); | 2108 | to = min(first + 6, last); |
1753 | dasd_eckd_dump_ccw_range(first, to, page + len); | 2109 | len = sprintf(page, KERN_ERR PRINTK_HEADER |
1754 | printk("%s", page); | 2110 | " Related CP in req: %p\n", req); |
2111 | dasd_eckd_dump_ccw_range(first, to, page + len); | ||
2112 | printk("%s", page); | ||
1755 | 2113 | ||
1756 | /* print failing CCW area (maximum 4) */ | 2114 | /* print failing CCW area (maximum 4) */ |
1757 | /* scsw->cda is either valid or zero */ | 2115 | /* scsw->cda is either valid or zero */ |
1758 | len = 0; | 2116 | len = 0; |
1759 | from = ++to; | 2117 | from = ++to; |
1760 | fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ | 2118 | fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ |
1761 | if (from < fail - 2) { | 2119 | if (from < fail - 2) { |
1762 | from = fail - 2; /* there is a gap - print header */ | 2120 | from = fail - 2; /* there is a gap - print header */ |
1763 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); | 2121 | len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); |
1764 | } | 2122 | } |
1765 | to = min(fail + 1, last); | 2123 | to = min(fail + 1, last); |
1766 | len += dasd_eckd_dump_ccw_range(from, to, page + len); | 2124 | len += dasd_eckd_dump_ccw_range(from, to, page + len); |
1767 | 2125 | ||
1768 | /* print last CCWs (maximum 2) */ | 2126 | /* print last CCWs (maximum 2) */ |
1769 | from = max(from, ++to); | 2127 | from = max(from, ++to); |
1770 | if (from < last - 1) { | 2128 | if (from < last - 1) { |
1771 | from = last - 1; /* there is a gap - print header */ | 2129 | from = last - 1; /* there is a gap - print header */ |
1772 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); | 2130 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); |
2131 | } | ||
2132 | len += dasd_eckd_dump_ccw_range(from, last, page + len); | ||
2133 | if (len > 0) | ||
2134 | printk("%s", page); | ||
1773 | } | 2135 | } |
1774 | len += dasd_eckd_dump_ccw_range(from, last, page + len); | ||
1775 | if (len > 0) | ||
1776 | printk("%s", page); | ||
1777 | free_page((unsigned long) page); | 2136 | free_page((unsigned long) page); |
1778 | } | 2137 | } |
1779 | 2138 | ||
@@ -1796,16 +2155,20 @@ static struct dasd_discipline dasd_eckd_discipline = { | |||
1796 | .ebcname = "ECKD", | 2155 | .ebcname = "ECKD", |
1797 | .max_blocks = 240, | 2156 | .max_blocks = 240, |
1798 | .check_device = dasd_eckd_check_characteristics, | 2157 | .check_device = dasd_eckd_check_characteristics, |
2158 | .uncheck_device = dasd_eckd_uncheck_device, | ||
1799 | .do_analysis = dasd_eckd_do_analysis, | 2159 | .do_analysis = dasd_eckd_do_analysis, |
2160 | .ready_to_online = dasd_eckd_ready_to_online, | ||
2161 | .online_to_ready = dasd_eckd_online_to_ready, | ||
1800 | .fill_geometry = dasd_eckd_fill_geometry, | 2162 | .fill_geometry = dasd_eckd_fill_geometry, |
1801 | .start_IO = dasd_start_IO, | 2163 | .start_IO = dasd_start_IO, |
1802 | .term_IO = dasd_term_IO, | 2164 | .term_IO = dasd_term_IO, |
2165 | .handle_terminated_request = dasd_eckd_handle_terminated_request, | ||
1803 | .format_device = dasd_eckd_format_device, | 2166 | .format_device = dasd_eckd_format_device, |
1804 | .examine_error = dasd_eckd_examine_error, | ||
1805 | .erp_action = dasd_eckd_erp_action, | 2167 | .erp_action = dasd_eckd_erp_action, |
1806 | .erp_postaction = dasd_eckd_erp_postaction, | 2168 | .erp_postaction = dasd_eckd_erp_postaction, |
1807 | .build_cp = dasd_eckd_build_cp, | 2169 | .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, |
1808 | .free_cp = dasd_eckd_free_cp, | 2170 | .build_cp = dasd_eckd_build_alias_cp, |
2171 | .free_cp = dasd_eckd_free_alias_cp, | ||
1809 | .dump_sense = dasd_eckd_dump_sense, | 2172 | .dump_sense = dasd_eckd_dump_sense, |
1810 | .fill_info = dasd_eckd_fill_info, | 2173 | .fill_info = dasd_eckd_fill_info, |
1811 | .ioctl = dasd_eckd_ioctl, | 2174 | .ioctl = dasd_eckd_ioctl, |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 712ff1650134..fc2509c939bc 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -39,6 +39,8 @@ | |||
39 | #define DASD_ECKD_CCW_READ_CKD_MT 0x9e | 39 | #define DASD_ECKD_CCW_READ_CKD_MT 0x9e |
40 | #define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d | 40 | #define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d |
41 | #define DASD_ECKD_CCW_RESERVE 0xB4 | 41 | #define DASD_ECKD_CCW_RESERVE 0xB4 |
42 | #define DASD_ECKD_CCW_PFX 0xE7 | ||
43 | #define DASD_ECKD_CCW_RSCK 0xF9 | ||
42 | 44 | ||
43 | /* | 45 | /* |
44 | * Perform Subsystem Function / Sub-Orders | 46 | * Perform Subsystem Function / Sub-Orders |
@@ -137,6 +139,25 @@ struct LO_eckd_data { | |||
137 | __u16 length; | 139 | __u16 length; |
138 | } __attribute__ ((packed)); | 140 | } __attribute__ ((packed)); |
139 | 141 | ||
142 | /* Prefix data for format 0x00 and 0x01 */ | ||
143 | struct PFX_eckd_data { | ||
144 | unsigned char format; | ||
145 | struct { | ||
146 | unsigned char define_extend:1; | ||
147 | unsigned char time_stamp:1; | ||
148 | unsigned char verify_base:1; | ||
149 | unsigned char hyper_pav:1; | ||
150 | unsigned char reserved:4; | ||
151 | } __attribute__ ((packed)) validity; | ||
152 | __u8 base_address; | ||
153 | __u8 aux; | ||
154 | __u8 base_lss; | ||
155 | __u8 reserved[7]; | ||
156 | struct DE_eckd_data define_extend; | ||
157 | struct LO_eckd_data locate_record; | ||
158 | __u8 LO_extended_data[4]; | ||
159 | } __attribute__ ((packed)); | ||
160 | |||
140 | struct dasd_eckd_characteristics { | 161 | struct dasd_eckd_characteristics { |
141 | __u16 cu_type; | 162 | __u16 cu_type; |
142 | struct { | 163 | struct { |
@@ -254,7 +275,9 @@ struct dasd_eckd_confdata { | |||
254 | } __attribute__ ((packed)) ned; | 275 | } __attribute__ ((packed)) ned; |
255 | struct { | 276 | struct { |
256 | unsigned char flags; /* byte 0 */ | 277 | unsigned char flags; /* byte 0 */ |
257 | unsigned char res2[7]; /* byte 1- 7 */ | 278 | unsigned char res1; /* byte 1 */ |
279 | __u16 format; /* byte 2-3 */ | ||
280 | unsigned char res2[4]; /* byte 4-7 */ | ||
258 | unsigned char sua_flags; /* byte 8 */ | 281 | unsigned char sua_flags; /* byte 8 */ |
259 | __u8 base_unit_addr; /* byte 9 */ | 282 | __u8 base_unit_addr; /* byte 9 */ |
260 | unsigned char res3[22]; /* byte 10-31 */ | 283 | unsigned char res3[22]; /* byte 10-31 */ |
@@ -343,6 +366,11 @@ struct dasd_eckd_path { | |||
343 | __u8 npm; | 366 | __u8 npm; |
344 | }; | 367 | }; |
345 | 368 | ||
369 | struct dasd_rssd_features { | ||
370 | char feature[256]; | ||
371 | } __attribute__((packed)); | ||
372 | |||
373 | |||
346 | /* | 374 | /* |
347 | * Perform Subsystem Function - Prepare for Read Subsystem Data | 375 | * Perform Subsystem Function - Prepare for Read Subsystem Data |
348 | */ | 376 | */ |
@@ -365,4 +393,99 @@ struct dasd_psf_ssc_data { | |||
365 | unsigned char reserved[59]; | 393 | unsigned char reserved[59]; |
366 | } __attribute__((packed)); | 394 | } __attribute__((packed)); |
367 | 395 | ||
396 | |||
397 | /* | ||
398 | * some structures and definitions for alias handling | ||
399 | */ | ||
400 | struct dasd_unit_address_configuration { | ||
401 | struct { | ||
402 | char ua_type; | ||
403 | char base_ua; | ||
404 | } unit[256]; | ||
405 | } __attribute__((packed)); | ||
406 | |||
407 | |||
408 | #define MAX_DEVICES_PER_LCU 256 | ||
409 | |||
410 | /* flags on the LCU */ | ||
411 | #define NEED_UAC_UPDATE 0x01 | ||
412 | #define UPDATE_PENDING 0x02 | ||
413 | |||
414 | enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV}; | ||
415 | |||
416 | |||
417 | struct alias_root { | ||
418 | struct list_head serverlist; | ||
419 | spinlock_t lock; | ||
420 | }; | ||
421 | |||
422 | struct alias_server { | ||
423 | struct list_head server; | ||
424 | struct dasd_uid uid; | ||
425 | struct list_head lculist; | ||
426 | }; | ||
427 | |||
428 | struct summary_unit_check_work_data { | ||
429 | char reason; | ||
430 | struct dasd_device *device; | ||
431 | struct work_struct worker; | ||
432 | }; | ||
433 | |||
434 | struct read_uac_work_data { | ||
435 | struct dasd_device *device; | ||
436 | struct delayed_work dwork; | ||
437 | }; | ||
438 | |||
439 | struct alias_lcu { | ||
440 | struct list_head lcu; | ||
441 | struct dasd_uid uid; | ||
442 | enum pavtype pav; | ||
443 | char flags; | ||
444 | spinlock_t lock; | ||
445 | struct list_head grouplist; | ||
446 | struct list_head active_devices; | ||
447 | struct list_head inactive_devices; | ||
448 | struct dasd_unit_address_configuration *uac; | ||
449 | struct summary_unit_check_work_data suc_data; | ||
450 | struct read_uac_work_data ruac_data; | ||
451 | struct dasd_ccw_req *rsu_cqr; | ||
452 | }; | ||
453 | |||
454 | struct alias_pav_group { | ||
455 | struct list_head group; | ||
456 | struct dasd_uid uid; | ||
457 | struct alias_lcu *lcu; | ||
458 | struct list_head baselist; | ||
459 | struct list_head aliaslist; | ||
460 | struct dasd_device *next; | ||
461 | }; | ||
462 | |||
463 | |||
464 | struct dasd_eckd_private { | ||
465 | struct dasd_eckd_characteristics rdc_data; | ||
466 | struct dasd_eckd_confdata conf_data; | ||
467 | struct dasd_eckd_path path_data; | ||
468 | struct eckd_count count_area[5]; | ||
469 | int init_cqr_status; | ||
470 | int uses_cdl; | ||
471 | struct attrib_data_t attrib; /* e.g. cache operations */ | ||
472 | struct dasd_rssd_features features; | ||
473 | |||
474 | /* alias managemnet */ | ||
475 | struct dasd_uid uid; | ||
476 | struct alias_pav_group *pavgroup; | ||
477 | struct alias_lcu *lcu; | ||
478 | int count; | ||
479 | }; | ||
480 | |||
481 | |||
482 | |||
483 | int dasd_alias_make_device_known_to_lcu(struct dasd_device *); | ||
484 | void dasd_alias_disconnect_device_from_lcu(struct dasd_device *); | ||
485 | int dasd_alias_add_device(struct dasd_device *); | ||
486 | int dasd_alias_remove_device(struct dasd_device *); | ||
487 | struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); | ||
488 | void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); | ||
489 | void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); | ||
490 | |||
368 | #endif /* DASD_ECKD_H */ | 491 | #endif /* DASD_ECKD_H */ |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 0c081a664ee8..6e53ab606e97 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -336,7 +336,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device, | |||
336 | unsigned long flags; | 336 | unsigned long flags; |
337 | struct eerbuffer *eerb; | 337 | struct eerbuffer *eerb; |
338 | 338 | ||
339 | snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | 339 | snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
340 | if (snss_rc) | 340 | if (snss_rc) |
341 | data_size = 0; | 341 | data_size = 0; |
342 | else | 342 | else |
@@ -404,10 +404,11 @@ void dasd_eer_snss(struct dasd_device *device) | |||
404 | set_bit(DASD_FLAG_EER_SNSS, &device->flags); | 404 | set_bit(DASD_FLAG_EER_SNSS, &device->flags); |
405 | return; | 405 | return; |
406 | } | 406 | } |
407 | /* cdev is already locked, can't use dasd_add_request_head */ | ||
407 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); | 408 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); |
408 | cqr->status = DASD_CQR_QUEUED; | 409 | cqr->status = DASD_CQR_QUEUED; |
409 | list_add(&cqr->list, &device->ccw_queue); | 410 | list_add(&cqr->devlist, &device->ccw_queue); |
410 | dasd_schedule_bh(device); | 411 | dasd_schedule_device_bh(device); |
411 | } | 412 | } |
412 | 413 | ||
413 | /* | 414 | /* |
@@ -415,7 +416,7 @@ void dasd_eer_snss(struct dasd_device *device) | |||
415 | */ | 416 | */ |
416 | static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) | 417 | static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) |
417 | { | 418 | { |
418 | struct dasd_device *device = cqr->device; | 419 | struct dasd_device *device = cqr->startdev; |
419 | unsigned long flags; | 420 | unsigned long flags; |
420 | 421 | ||
421 | dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); | 422 | dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); |
@@ -458,7 +459,7 @@ int dasd_eer_enable(struct dasd_device *device) | |||
458 | if (!cqr) | 459 | if (!cqr) |
459 | return -ENOMEM; | 460 | return -ENOMEM; |
460 | 461 | ||
461 | cqr->device = device; | 462 | cqr->startdev = device; |
462 | cqr->retries = 255; | 463 | cqr->retries = 255; |
463 | cqr->expires = 10 * HZ; | 464 | cqr->expires = 10 * HZ; |
464 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 465 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index caa5d91420f8..8f10000851a3 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c | |||
@@ -46,6 +46,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize, | |||
46 | if (cqr == NULL) | 46 | if (cqr == NULL) |
47 | return ERR_PTR(-ENOMEM); | 47 | return ERR_PTR(-ENOMEM); |
48 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | 48 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); |
49 | INIT_LIST_HEAD(&cqr->devlist); | ||
50 | INIT_LIST_HEAD(&cqr->blocklist); | ||
49 | data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); | 51 | data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); |
50 | cqr->cpaddr = NULL; | 52 | cqr->cpaddr = NULL; |
51 | if (cplength > 0) { | 53 | if (cplength > 0) { |
@@ -66,7 +68,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize, | |||
66 | } | 68 | } |
67 | 69 | ||
68 | void | 70 | void |
69 | dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | 71 | dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device) |
70 | { | 72 | { |
71 | unsigned long flags; | 73 | unsigned long flags; |
72 | 74 | ||
@@ -81,11 +83,11 @@ dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device) | |||
81 | * dasd_default_erp_action just retries the current cqr | 83 | * dasd_default_erp_action just retries the current cqr |
82 | */ | 84 | */ |
83 | struct dasd_ccw_req * | 85 | struct dasd_ccw_req * |
84 | dasd_default_erp_action(struct dasd_ccw_req * cqr) | 86 | dasd_default_erp_action(struct dasd_ccw_req *cqr) |
85 | { | 87 | { |
86 | struct dasd_device *device; | 88 | struct dasd_device *device; |
87 | 89 | ||
88 | device = cqr->device; | 90 | device = cqr->startdev; |
89 | 91 | ||
90 | /* just retry - there is nothing to save ... I got no sense data.... */ | 92 | /* just retry - there is nothing to save ... I got no sense data.... */ |
91 | if (cqr->retries > 0) { | 93 | if (cqr->retries > 0) { |
@@ -93,12 +95,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr) | |||
93 | "default ERP called (%i retries left)", | 95 | "default ERP called (%i retries left)", |
94 | cqr->retries); | 96 | cqr->retries); |
95 | cqr->lpm = LPM_ANYPATH; | 97 | cqr->lpm = LPM_ANYPATH; |
96 | cqr->status = DASD_CQR_QUEUED; | 98 | cqr->status = DASD_CQR_FILLED; |
97 | } else { | 99 | } else { |
98 | DEV_MESSAGE (KERN_WARNING, device, "%s", | 100 | DEV_MESSAGE (KERN_WARNING, device, "%s", |
99 | "default ERP called (NO retry left)"); | 101 | "default ERP called (NO retry left)"); |
100 | cqr->status = DASD_CQR_FAILED; | 102 | cqr->status = DASD_CQR_FAILED; |
101 | cqr->stopclk = get_clock (); | 103 | cqr->stopclk = get_clock(); |
102 | } | 104 | } |
103 | return cqr; | 105 | return cqr; |
104 | } /* end dasd_default_erp_action */ | 106 | } /* end dasd_default_erp_action */ |
@@ -117,15 +119,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr) | |||
117 | * RETURN VALUES | 119 | * RETURN VALUES |
118 | * cqr pointer to the original CQR | 120 | * cqr pointer to the original CQR |
119 | */ | 121 | */ |
120 | struct dasd_ccw_req * | 122 | struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) |
121 | dasd_default_erp_postaction(struct dasd_ccw_req * cqr) | ||
122 | { | 123 | { |
123 | struct dasd_device *device; | ||
124 | int success; | 124 | int success; |
125 | 125 | ||
126 | BUG_ON(cqr->refers == NULL || cqr->function == NULL); | 126 | BUG_ON(cqr->refers == NULL || cqr->function == NULL); |
127 | 127 | ||
128 | device = cqr->device; | ||
129 | success = cqr->status == DASD_CQR_DONE; | 128 | success = cqr->status == DASD_CQR_DONE; |
130 | 129 | ||
131 | /* free all ERPs - but NOT the original cqr */ | 130 | /* free all ERPs - but NOT the original cqr */ |
@@ -133,10 +132,10 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr) | |||
133 | struct dasd_ccw_req *refers; | 132 | struct dasd_ccw_req *refers; |
134 | 133 | ||
135 | refers = cqr->refers; | 134 | refers = cqr->refers; |
136 | /* remove the request from the device queue */ | 135 | /* remove the request from the block queue */ |
137 | list_del(&cqr->list); | 136 | list_del(&cqr->blocklist); |
138 | /* free the finished erp request */ | 137 | /* free the finished erp request */ |
139 | dasd_free_erp_request(cqr, device); | 138 | dasd_free_erp_request(cqr, cqr->memdev); |
140 | cqr = refers; | 139 | cqr = refers; |
141 | } | 140 | } |
142 | 141 | ||
@@ -157,7 +156,7 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) | |||
157 | { | 156 | { |
158 | struct dasd_device *device; | 157 | struct dasd_device *device; |
159 | 158 | ||
160 | device = cqr->device; | 159 | device = cqr->startdev; |
161 | /* dump sense data */ | 160 | /* dump sense data */ |
162 | if (device->discipline && device->discipline->dump_sense) | 161 | if (device->discipline && device->discipline->dump_sense) |
163 | device->discipline->dump_sense(device, cqr, irb); | 162 | device->discipline->dump_sense(device, cqr, irb); |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 1d95822e0b8e..d13ea05089a7 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -117,6 +117,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, | |||
117 | static int | 117 | static int |
118 | dasd_fba_check_characteristics(struct dasd_device *device) | 118 | dasd_fba_check_characteristics(struct dasd_device *device) |
119 | { | 119 | { |
120 | struct dasd_block *block; | ||
120 | struct dasd_fba_private *private; | 121 | struct dasd_fba_private *private; |
121 | struct ccw_device *cdev = device->cdev; | 122 | struct ccw_device *cdev = device->cdev; |
122 | void *rdc_data; | 123 | void *rdc_data; |
@@ -133,6 +134,16 @@ dasd_fba_check_characteristics(struct dasd_device *device) | |||
133 | } | 134 | } |
134 | device->private = (void *) private; | 135 | device->private = (void *) private; |
135 | } | 136 | } |
137 | block = dasd_alloc_block(); | ||
138 | if (IS_ERR(block)) { | ||
139 | DEV_MESSAGE(KERN_WARNING, device, "%s", | ||
140 | "could not allocate dasd block structure"); | ||
141 | kfree(device->private); | ||
142 | return PTR_ERR(block); | ||
143 | } | ||
144 | device->block = block; | ||
145 | block->base = device; | ||
146 | |||
136 | /* Read Device Characteristics */ | 147 | /* Read Device Characteristics */ |
137 | rdc_data = (void *) &(private->rdc_data); | 148 | rdc_data = (void *) &(private->rdc_data); |
138 | rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); | 149 | rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); |
@@ -155,60 +166,37 @@ dasd_fba_check_characteristics(struct dasd_device *device) | |||
155 | return 0; | 166 | return 0; |
156 | } | 167 | } |
157 | 168 | ||
158 | static int | 169 | static int dasd_fba_do_analysis(struct dasd_block *block) |
159 | dasd_fba_do_analysis(struct dasd_device *device) | ||
160 | { | 170 | { |
161 | struct dasd_fba_private *private; | 171 | struct dasd_fba_private *private; |
162 | int sb, rc; | 172 | int sb, rc; |
163 | 173 | ||
164 | private = (struct dasd_fba_private *) device->private; | 174 | private = (struct dasd_fba_private *) block->base->private; |
165 | rc = dasd_check_blocksize(private->rdc_data.blk_size); | 175 | rc = dasd_check_blocksize(private->rdc_data.blk_size); |
166 | if (rc) { | 176 | if (rc) { |
167 | DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d", | 177 | DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d", |
168 | private->rdc_data.blk_size); | 178 | private->rdc_data.blk_size); |
169 | return rc; | 179 | return rc; |
170 | } | 180 | } |
171 | device->blocks = private->rdc_data.blk_bdsa; | 181 | block->blocks = private->rdc_data.blk_bdsa; |
172 | device->bp_block = private->rdc_data.blk_size; | 182 | block->bp_block = private->rdc_data.blk_size; |
173 | device->s2b_shift = 0; /* bits to shift 512 to get a block */ | 183 | block->s2b_shift = 0; /* bits to shift 512 to get a block */ |
174 | for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) | 184 | for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) |
175 | device->s2b_shift++; | 185 | block->s2b_shift++; |
176 | return 0; | 186 | return 0; |
177 | } | 187 | } |
178 | 188 | ||
179 | static int | 189 | static int dasd_fba_fill_geometry(struct dasd_block *block, |
180 | dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) | 190 | struct hd_geometry *geo) |
181 | { | 191 | { |
182 | if (dasd_check_blocksize(device->bp_block) != 0) | 192 | if (dasd_check_blocksize(block->bp_block) != 0) |
183 | return -EINVAL; | 193 | return -EINVAL; |
184 | geo->cylinders = (device->blocks << device->s2b_shift) >> 10; | 194 | geo->cylinders = (block->blocks << block->s2b_shift) >> 10; |
185 | geo->heads = 16; | 195 | geo->heads = 16; |
186 | geo->sectors = 128 >> device->s2b_shift; | 196 | geo->sectors = 128 >> block->s2b_shift; |
187 | return 0; | 197 | return 0; |
188 | } | 198 | } |
189 | 199 | ||
190 | static dasd_era_t | ||
191 | dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb) | ||
192 | { | ||
193 | struct dasd_device *device; | ||
194 | struct ccw_device *cdev; | ||
195 | |||
196 | device = (struct dasd_device *) cqr->device; | ||
197 | if (irb->scsw.cstat == 0x00 && | ||
198 | irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
199 | return dasd_era_none; | ||
200 | |||
201 | cdev = device->cdev; | ||
202 | switch (cdev->id.dev_type) { | ||
203 | case 0x3370: | ||
204 | return dasd_3370_erp_examine(cqr, irb); | ||
205 | case 0x9336: | ||
206 | return dasd_9336_erp_examine(cqr, irb); | ||
207 | default: | ||
208 | return dasd_era_recover; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static dasd_erp_fn_t | 200 | static dasd_erp_fn_t |
213 | dasd_fba_erp_action(struct dasd_ccw_req * cqr) | 201 | dasd_fba_erp_action(struct dasd_ccw_req * cqr) |
214 | { | 202 | { |
@@ -221,13 +209,34 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr) | |||
221 | if (cqr->function == dasd_default_erp_action) | 209 | if (cqr->function == dasd_default_erp_action) |
222 | return dasd_default_erp_postaction; | 210 | return dasd_default_erp_postaction; |
223 | 211 | ||
224 | DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p", | 212 | DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p", |
225 | cqr->function); | 213 | cqr->function); |
226 | return NULL; | 214 | return NULL; |
227 | } | 215 | } |
228 | 216 | ||
229 | static struct dasd_ccw_req * | 217 | static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device, |
230 | dasd_fba_build_cp(struct dasd_device * device, struct request *req) | 218 | struct irb *irb) |
219 | { | ||
220 | char mask; | ||
221 | |||
222 | /* first of all check for state change pending interrupt */ | ||
223 | mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; | ||
224 | if ((irb->scsw.dstat & mask) == mask) { | ||
225 | dasd_generic_handle_state_change(device); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* check for unsolicited interrupts */ | ||
230 | DEV_MESSAGE(KERN_DEBUG, device, "%s", | ||
231 | "unsolicited interrupt received"); | ||
232 | device->discipline->dump_sense(device, NULL, irb); | ||
233 | dasd_schedule_device_bh(device); | ||
234 | return; | ||
235 | }; | ||
236 | |||
237 | static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | ||
238 | struct dasd_block *block, | ||
239 | struct request *req) | ||
231 | { | 240 | { |
232 | struct dasd_fba_private *private; | 241 | struct dasd_fba_private *private; |
233 | unsigned long *idaws; | 242 | unsigned long *idaws; |
@@ -242,17 +251,17 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
242 | unsigned int blksize, off; | 251 | unsigned int blksize, off; |
243 | unsigned char cmd; | 252 | unsigned char cmd; |
244 | 253 | ||
245 | private = (struct dasd_fba_private *) device->private; | 254 | private = (struct dasd_fba_private *) block->base->private; |
246 | if (rq_data_dir(req) == READ) { | 255 | if (rq_data_dir(req) == READ) { |
247 | cmd = DASD_FBA_CCW_READ; | 256 | cmd = DASD_FBA_CCW_READ; |
248 | } else if (rq_data_dir(req) == WRITE) { | 257 | } else if (rq_data_dir(req) == WRITE) { |
249 | cmd = DASD_FBA_CCW_WRITE; | 258 | cmd = DASD_FBA_CCW_WRITE; |
250 | } else | 259 | } else |
251 | return ERR_PTR(-EINVAL); | 260 | return ERR_PTR(-EINVAL); |
252 | blksize = device->bp_block; | 261 | blksize = block->bp_block; |
253 | /* Calculate record id of first and last block. */ | 262 | /* Calculate record id of first and last block. */ |
254 | first_rec = req->sector >> device->s2b_shift; | 263 | first_rec = req->sector >> block->s2b_shift; |
255 | last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; | 264 | last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; |
256 | /* Check struct bio and count the number of blocks for the request. */ | 265 | /* Check struct bio and count the number of blocks for the request. */ |
257 | count = 0; | 266 | count = 0; |
258 | cidaw = 0; | 267 | cidaw = 0; |
@@ -260,7 +269,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
260 | if (bv->bv_len & (blksize - 1)) | 269 | if (bv->bv_len & (blksize - 1)) |
261 | /* Fba can only do full blocks. */ | 270 | /* Fba can only do full blocks. */ |
262 | return ERR_PTR(-EINVAL); | 271 | return ERR_PTR(-EINVAL); |
263 | count += bv->bv_len >> (device->s2b_shift + 9); | 272 | count += bv->bv_len >> (block->s2b_shift + 9); |
264 | #if defined(CONFIG_64BIT) | 273 | #if defined(CONFIG_64BIT) |
265 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) | 274 | if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) |
266 | cidaw += bv->bv_len / blksize; | 275 | cidaw += bv->bv_len / blksize; |
@@ -284,13 +293,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
284 | } | 293 | } |
285 | /* Allocate the ccw request. */ | 294 | /* Allocate the ccw request. */ |
286 | cqr = dasd_smalloc_request(dasd_fba_discipline.name, | 295 | cqr = dasd_smalloc_request(dasd_fba_discipline.name, |
287 | cplength, datasize, device); | 296 | cplength, datasize, memdev); |
288 | if (IS_ERR(cqr)) | 297 | if (IS_ERR(cqr)) |
289 | return cqr; | 298 | return cqr; |
290 | ccw = cqr->cpaddr; | 299 | ccw = cqr->cpaddr; |
291 | /* First ccw is define extent. */ | 300 | /* First ccw is define extent. */ |
292 | define_extent(ccw++, cqr->data, rq_data_dir(req), | 301 | define_extent(ccw++, cqr->data, rq_data_dir(req), |
293 | device->bp_block, req->sector, req->nr_sectors); | 302 | block->bp_block, req->sector, req->nr_sectors); |
294 | /* Build locate_record + read/write ccws. */ | 303 | /* Build locate_record + read/write ccws. */ |
295 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); | 304 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); |
296 | LO_data = (struct LO_fba_data *) (idaws + cidaw); | 305 | LO_data = (struct LO_fba_data *) (idaws + cidaw); |
@@ -326,7 +335,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
326 | ccw[-1].flags |= CCW_FLAG_CC; | 335 | ccw[-1].flags |= CCW_FLAG_CC; |
327 | } | 336 | } |
328 | ccw->cmd_code = cmd; | 337 | ccw->cmd_code = cmd; |
329 | ccw->count = device->bp_block; | 338 | ccw->count = block->bp_block; |
330 | if (idal_is_needed(dst, blksize)) { | 339 | if (idal_is_needed(dst, blksize)) { |
331 | ccw->cda = (__u32)(addr_t) idaws; | 340 | ccw->cda = (__u32)(addr_t) idaws; |
332 | ccw->flags = CCW_FLAG_IDA; | 341 | ccw->flags = CCW_FLAG_IDA; |
@@ -342,7 +351,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req) | |||
342 | } | 351 | } |
343 | if (req->cmd_flags & REQ_FAILFAST) | 352 | if (req->cmd_flags & REQ_FAILFAST) |
344 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 353 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
345 | cqr->device = device; | 354 | cqr->startdev = memdev; |
355 | cqr->memdev = memdev; | ||
356 | cqr->block = block; | ||
346 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ | 357 | cqr->expires = 5 * 60 * HZ; /* 5 minutes */ |
347 | cqr->retries = 32; | 358 | cqr->retries = 32; |
348 | cqr->buildclk = get_clock(); | 359 | cqr->buildclk = get_clock(); |
@@ -363,8 +374,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
363 | 374 | ||
364 | if (!dasd_page_cache) | 375 | if (!dasd_page_cache) |
365 | goto out; | 376 | goto out; |
366 | private = (struct dasd_fba_private *) cqr->device->private; | 377 | private = (struct dasd_fba_private *) cqr->block->base->private; |
367 | blksize = cqr->device->bp_block; | 378 | blksize = cqr->block->bp_block; |
368 | ccw = cqr->cpaddr; | 379 | ccw = cqr->cpaddr; |
369 | /* Skip over define extent & locate record. */ | 380 | /* Skip over define extent & locate record. */ |
370 | ccw++; | 381 | ccw++; |
@@ -394,10 +405,15 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req) | |||
394 | } | 405 | } |
395 | out: | 406 | out: |
396 | status = cqr->status == DASD_CQR_DONE; | 407 | status = cqr->status == DASD_CQR_DONE; |
397 | dasd_sfree_request(cqr, cqr->device); | 408 | dasd_sfree_request(cqr, cqr->memdev); |
398 | return status; | 409 | return status; |
399 | } | 410 | } |
400 | 411 | ||
412 | static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr) | ||
413 | { | ||
414 | cqr->status = DASD_CQR_FILLED; | ||
415 | }; | ||
416 | |||
401 | static int | 417 | static int |
402 | dasd_fba_fill_info(struct dasd_device * device, | 418 | dasd_fba_fill_info(struct dasd_device * device, |
403 | struct dasd_information2_t * info) | 419 | struct dasd_information2_t * info) |
@@ -546,9 +562,10 @@ static struct dasd_discipline dasd_fba_discipline = { | |||
546 | .fill_geometry = dasd_fba_fill_geometry, | 562 | .fill_geometry = dasd_fba_fill_geometry, |
547 | .start_IO = dasd_start_IO, | 563 | .start_IO = dasd_start_IO, |
548 | .term_IO = dasd_term_IO, | 564 | .term_IO = dasd_term_IO, |
549 | .examine_error = dasd_fba_examine_error, | 565 | .handle_terminated_request = dasd_fba_handle_terminated_request, |
550 | .erp_action = dasd_fba_erp_action, | 566 | .erp_action = dasd_fba_erp_action, |
551 | .erp_postaction = dasd_fba_erp_postaction, | 567 | .erp_postaction = dasd_fba_erp_postaction, |
568 | .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt, | ||
552 | .build_cp = dasd_fba_build_cp, | 569 | .build_cp = dasd_fba_build_cp, |
553 | .free_cp = dasd_fba_free_cp, | 570 | .free_cp = dasd_fba_free_cp, |
554 | .dump_sense = dasd_fba_dump_sense, | 571 | .dump_sense = dasd_fba_dump_sense, |
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 47ba4462708d..aee6565aaf98 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -25,14 +25,15 @@ | |||
25 | /* | 25 | /* |
26 | * Allocate and register gendisk structure for device. | 26 | * Allocate and register gendisk structure for device. |
27 | */ | 27 | */ |
28 | int | 28 | int dasd_gendisk_alloc(struct dasd_block *block) |
29 | dasd_gendisk_alloc(struct dasd_device *device) | ||
30 | { | 29 | { |
31 | struct gendisk *gdp; | 30 | struct gendisk *gdp; |
31 | struct dasd_device *base; | ||
32 | int len; | 32 | int len; |
33 | 33 | ||
34 | /* Make sure the minor for this device exists. */ | 34 | /* Make sure the minor for this device exists. */ |
35 | if (device->devindex >= DASD_PER_MAJOR) | 35 | base = block->base; |
36 | if (base->devindex >= DASD_PER_MAJOR) | ||
36 | return -EBUSY; | 37 | return -EBUSY; |
37 | 38 | ||
38 | gdp = alloc_disk(1 << DASD_PARTN_BITS); | 39 | gdp = alloc_disk(1 << DASD_PARTN_BITS); |
@@ -41,9 +42,9 @@ dasd_gendisk_alloc(struct dasd_device *device) | |||
41 | 42 | ||
42 | /* Initialize gendisk structure. */ | 43 | /* Initialize gendisk structure. */ |
43 | gdp->major = DASD_MAJOR; | 44 | gdp->major = DASD_MAJOR; |
44 | gdp->first_minor = device->devindex << DASD_PARTN_BITS; | 45 | gdp->first_minor = base->devindex << DASD_PARTN_BITS; |
45 | gdp->fops = &dasd_device_operations; | 46 | gdp->fops = &dasd_device_operations; |
46 | gdp->driverfs_dev = &device->cdev->dev; | 47 | gdp->driverfs_dev = &base->cdev->dev; |
47 | 48 | ||
48 | /* | 49 | /* |
49 | * Set device name. | 50 | * Set device name. |
@@ -53,53 +54,51 @@ dasd_gendisk_alloc(struct dasd_device *device) | |||
53 | * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 | 54 | * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 |
54 | */ | 55 | */ |
55 | len = sprintf(gdp->disk_name, "dasd"); | 56 | len = sprintf(gdp->disk_name, "dasd"); |
56 | if (device->devindex > 25) { | 57 | if (base->devindex > 25) { |
57 | if (device->devindex > 701) { | 58 | if (base->devindex > 701) { |
58 | if (device->devindex > 18277) | 59 | if (base->devindex > 18277) |
59 | len += sprintf(gdp->disk_name + len, "%c", | 60 | len += sprintf(gdp->disk_name + len, "%c", |
60 | 'a'+(((device->devindex-18278) | 61 | 'a'+(((base->devindex-18278) |
61 | /17576)%26)); | 62 | /17576)%26)); |
62 | len += sprintf(gdp->disk_name + len, "%c", | 63 | len += sprintf(gdp->disk_name + len, "%c", |
63 | 'a'+(((device->devindex-702)/676)%26)); | 64 | 'a'+(((base->devindex-702)/676)%26)); |
64 | } | 65 | } |
65 | len += sprintf(gdp->disk_name + len, "%c", | 66 | len += sprintf(gdp->disk_name + len, "%c", |
66 | 'a'+(((device->devindex-26)/26)%26)); | 67 | 'a'+(((base->devindex-26)/26)%26)); |
67 | } | 68 | } |
68 | len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26)); | 69 | len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26)); |
69 | 70 | ||
70 | if (device->features & DASD_FEATURE_READONLY) | 71 | if (block->base->features & DASD_FEATURE_READONLY) |
71 | set_disk_ro(gdp, 1); | 72 | set_disk_ro(gdp, 1); |
72 | gdp->private_data = device; | 73 | gdp->private_data = block; |
73 | gdp->queue = device->request_queue; | 74 | gdp->queue = block->request_queue; |
74 | device->gdp = gdp; | 75 | block->gdp = gdp; |
75 | set_capacity(device->gdp, 0); | 76 | set_capacity(block->gdp, 0); |
76 | add_disk(device->gdp); | 77 | add_disk(block->gdp); |
77 | return 0; | 78 | return 0; |
78 | } | 79 | } |
79 | 80 | ||
80 | /* | 81 | /* |
81 | * Unregister and free gendisk structure for device. | 82 | * Unregister and free gendisk structure for device. |
82 | */ | 83 | */ |
83 | void | 84 | void dasd_gendisk_free(struct dasd_block *block) |
84 | dasd_gendisk_free(struct dasd_device *device) | ||
85 | { | 85 | { |
86 | if (device->gdp) { | 86 | if (block->gdp) { |
87 | del_gendisk(device->gdp); | 87 | del_gendisk(block->gdp); |
88 | device->gdp->queue = NULL; | 88 | block->gdp->queue = NULL; |
89 | put_disk(device->gdp); | 89 | put_disk(block->gdp); |
90 | device->gdp = NULL; | 90 | block->gdp = NULL; |
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Trigger a partition detection. | 95 | * Trigger a partition detection. |
96 | */ | 96 | */ |
97 | int | 97 | int dasd_scan_partitions(struct dasd_block *block) |
98 | dasd_scan_partitions(struct dasd_device * device) | ||
99 | { | 98 | { |
100 | struct block_device *bdev; | 99 | struct block_device *bdev; |
101 | 100 | ||
102 | bdev = bdget_disk(device->gdp, 0); | 101 | bdev = bdget_disk(block->gdp, 0); |
103 | if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0) | 102 | if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0) |
104 | return -ENODEV; | 103 | return -ENODEV; |
105 | /* | 104 | /* |
@@ -117,7 +116,7 @@ dasd_scan_partitions(struct dasd_device * device) | |||
117 | * is why the assignment to device->bdev is done AFTER | 116 | * is why the assignment to device->bdev is done AFTER |
118 | * the BLKRRPART ioctl. | 117 | * the BLKRRPART ioctl. |
119 | */ | 118 | */ |
120 | device->bdev = bdev; | 119 | block->bdev = bdev; |
121 | return 0; | 120 | return 0; |
122 | } | 121 | } |
123 | 122 | ||
@@ -125,8 +124,7 @@ dasd_scan_partitions(struct dasd_device * device) | |||
125 | * Remove all inodes in the system for a device, delete the | 124 | * Remove all inodes in the system for a device, delete the |
126 | * partitions and make device unusable by setting its size to zero. | 125 | * partitions and make device unusable by setting its size to zero. |
127 | */ | 126 | */ |
128 | void | 127 | void dasd_destroy_partitions(struct dasd_block *block) |
129 | dasd_destroy_partitions(struct dasd_device * device) | ||
130 | { | 128 | { |
131 | /* The two structs have 168/176 byte on 31/64 bit. */ | 129 | /* The two structs have 168/176 byte on 31/64 bit. */ |
132 | struct blkpg_partition bpart; | 130 | struct blkpg_partition bpart; |
@@ -137,8 +135,8 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
137 | * Get the bdev pointer from the device structure and clear | 135 | * Get the bdev pointer from the device structure and clear |
138 | * device->bdev to lower the offline open_count limit again. | 136 | * device->bdev to lower the offline open_count limit again. |
139 | */ | 137 | */ |
140 | bdev = device->bdev; | 138 | bdev = block->bdev; |
141 | device->bdev = NULL; | 139 | block->bdev = NULL; |
142 | 140 | ||
143 | /* | 141 | /* |
144 | * See fs/partition/check.c:delete_partition | 142 | * See fs/partition/check.c:delete_partition |
@@ -149,17 +147,16 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); | 147 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); |
150 | barg.data = (void __force __user *) &bpart; | 148 | barg.data = (void __force __user *) &bpart; |
151 | barg.op = BLKPG_DEL_PARTITION; | 149 | barg.op = BLKPG_DEL_PARTITION; |
152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) | 150 | for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--) |
153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); | 151 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); |
154 | 152 | ||
155 | invalidate_partition(device->gdp, 0); | 153 | invalidate_partition(block->gdp, 0); |
156 | /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ | 154 | /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ |
157 | blkdev_put(bdev); | 155 | blkdev_put(bdev); |
158 | set_capacity(device->gdp, 0); | 156 | set_capacity(block->gdp, 0); |
159 | } | 157 | } |
160 | 158 | ||
161 | int | 159 | int dasd_gendisk_init(void) |
162 | dasd_gendisk_init(void) | ||
163 | { | 160 | { |
164 | int rc; | 161 | int rc; |
165 | 162 | ||
@@ -174,8 +171,7 @@ dasd_gendisk_init(void) | |||
174 | return 0; | 171 | return 0; |
175 | } | 172 | } |
176 | 173 | ||
177 | void | 174 | void dasd_gendisk_exit(void) |
178 | dasd_gendisk_exit(void) | ||
179 | { | 175 | { |
180 | unregister_blkdev(DASD_MAJOR, "dasd"); | 176 | unregister_blkdev(DASD_MAJOR, "dasd"); |
181 | } | 177 | } |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index d427daeef511..44b2984dfbee 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -64,13 +64,7 @@ | |||
64 | * SECTION: Type definitions | 64 | * SECTION: Type definitions |
65 | */ | 65 | */ |
66 | struct dasd_device; | 66 | struct dasd_device; |
67 | 67 | struct dasd_block; | |
68 | typedef enum { | ||
69 | dasd_era_fatal = -1, /* no chance to recover */ | ||
70 | dasd_era_none = 0, /* don't recover, everything alright */ | ||
71 | dasd_era_msg = 1, /* don't recover, just report... */ | ||
72 | dasd_era_recover = 2 /* recovery action recommended */ | ||
73 | } dasd_era_t; | ||
74 | 68 | ||
75 | /* BIT DEFINITIONS FOR SENSE DATA */ | 69 | /* BIT DEFINITIONS FOR SENSE DATA */ |
76 | #define DASD_SENSE_BIT_0 0x80 | 70 | #define DASD_SENSE_BIT_0 0x80 |
@@ -151,19 +145,22 @@ do { \ | |||
151 | 145 | ||
152 | struct dasd_ccw_req { | 146 | struct dasd_ccw_req { |
153 | unsigned int magic; /* Eye catcher */ | 147 | unsigned int magic; /* Eye catcher */ |
154 | struct list_head list; /* list_head for request queueing. */ | 148 | struct list_head devlist; /* for dasd_device request queue */ |
149 | struct list_head blocklist; /* for dasd_block request queue */ | ||
155 | 150 | ||
156 | /* Where to execute what... */ | 151 | /* Where to execute what... */ |
157 | struct dasd_device *device; /* device the request is for */ | 152 | struct dasd_block *block; /* the originating block device */ |
153 | struct dasd_device *memdev; /* the device used to allocate this */ | ||
154 | struct dasd_device *startdev; /* device the request is started on */ | ||
158 | struct ccw1 *cpaddr; /* address of channel program */ | 155 | struct ccw1 *cpaddr; /* address of channel program */ |
159 | char status; /* status of this request */ | 156 | char status; /* status of this request */ |
160 | short retries; /* A retry counter */ | 157 | short retries; /* A retry counter */ |
161 | unsigned long flags; /* flags of this request */ | 158 | unsigned long flags; /* flags of this request */ |
162 | 159 | ||
163 | /* ... and how */ | 160 | /* ... and how */ |
164 | unsigned long starttime; /* jiffies time of request start */ | 161 | unsigned long starttime; /* jiffies time of request start */ |
165 | int expires; /* expiration period in jiffies */ | 162 | int expires; /* expiration period in jiffies */ |
166 | char lpm; /* logical path mask */ | 163 | char lpm; /* logical path mask */ |
167 | void *data; /* pointer to data area */ | 164 | void *data; /* pointer to data area */ |
168 | 165 | ||
169 | /* these are important for recovering erroneous requests */ | 166 | /* these are important for recovering erroneous requests */ |
@@ -178,20 +175,27 @@ struct dasd_ccw_req { | |||
178 | unsigned long long endclk; /* TOD-clock of request termination */ | 175 | unsigned long long endclk; /* TOD-clock of request termination */ |
179 | 176 | ||
180 | /* Callback that is called after reaching final status. */ | 177 | /* Callback that is called after reaching final status. */ |
181 | void (*callback)(struct dasd_ccw_req *, void *data); | 178 | void (*callback)(struct dasd_ccw_req *, void *data); |
182 | void *callback_data; | 179 | void *callback_data; |
183 | }; | 180 | }; |
184 | 181 | ||
185 | /* | 182 | /* |
186 | * dasd_ccw_req -> status can be: | 183 | * dasd_ccw_req -> status can be: |
187 | */ | 184 | */ |
188 | #define DASD_CQR_FILLED 0x00 /* request is ready to be processed */ | 185 | #define DASD_CQR_FILLED 0x00 /* request is ready to be processed */ |
189 | #define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */ | 186 | #define DASD_CQR_DONE 0x01 /* request is completed successfully */ |
190 | #define DASD_CQR_IN_IO 0x02 /* request is currently in IO */ | 187 | #define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */ |
191 | #define DASD_CQR_DONE 0x03 /* request is completed successfully */ | 188 | #define DASD_CQR_IN_ERP 0x03 /* request is in recovery */ |
192 | #define DASD_CQR_ERROR 0x04 /* request is completed with error */ | 189 | #define DASD_CQR_FAILED 0x04 /* request is finally failed */ |
193 | #define DASD_CQR_FAILED 0x05 /* request is finally failed */ | 190 | #define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */ |
194 | #define DASD_CQR_CLEAR 0x06 /* request is clear pending */ | 191 | |
192 | #define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */ | ||
193 | #define DASD_CQR_IN_IO 0x81 /* request is currently in IO */ | ||
194 | #define DASD_CQR_ERROR 0x82 /* request is completed with error */ | ||
195 | #define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */ | ||
196 | #define DASD_CQR_CLEARED 0x84 /* request was cleared */ | ||
197 | #define DASD_CQR_SUCCESS 0x85 /* request was successfull */ | ||
198 | |||
195 | 199 | ||
196 | /* per dasd_ccw_req flags */ | 200 | /* per dasd_ccw_req flags */ |
197 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ | 201 | #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ |
@@ -214,52 +218,71 @@ struct dasd_discipline { | |||
214 | 218 | ||
215 | struct list_head list; /* used for list of disciplines */ | 219 | struct list_head list; /* used for list of disciplines */ |
216 | 220 | ||
217 | /* | 221 | /* |
218 | * Device recognition functions. check_device is used to verify | 222 | * Device recognition functions. check_device is used to verify |
219 | * the sense data and the information returned by read device | 223 | * the sense data and the information returned by read device |
220 | * characteristics. It returns 0 if the discipline can be used | 224 | * characteristics. It returns 0 if the discipline can be used |
221 | * for the device in question. | 225 | * for the device in question. uncheck_device is called during |
222 | * do_analysis is used in the step from device state "basic" to | 226 | * device shutdown to deregister a device from its discipline. |
223 | * state "accept". It returns 0 if the device can be made ready, | 227 | */ |
224 | * it returns -EMEDIUMTYPE if the device can't be made ready or | 228 | int (*check_device) (struct dasd_device *); |
225 | * -EAGAIN if do_analysis started a ccw that needs to complete | 229 | void (*uncheck_device) (struct dasd_device *); |
226 | * before the analysis may be repeated. | 230 | |
227 | */ | 231 | /* |
228 | int (*check_device)(struct dasd_device *); | 232 | * do_analysis is used in the step from device state "basic" to |
229 | int (*do_analysis) (struct dasd_device *); | 233 | * state "accept". It returns 0 if the device can be made ready, |
230 | 234 | * it returns -EMEDIUMTYPE if the device can't be made ready or | |
231 | /* | 235 | * -EAGAIN if do_analysis started a ccw that needs to complete |
232 | * Device operation functions. build_cp creates a ccw chain for | 236 | * before the analysis may be repeated. |
233 | * a block device request, start_io starts the request and | 237 | */ |
234 | * term_IO cancels it (e.g. in case of a timeout). format_device | 238 | int (*do_analysis) (struct dasd_block *); |
235 | * returns a ccw chain to be used to format the device. | 239 | |
236 | */ | 240 | /* |
241 | * Last things to do when a device is set online, and first things | ||
242 | * when it is set offline. | ||
243 | */ | ||
244 | int (*ready_to_online) (struct dasd_device *); | ||
245 | int (*online_to_ready) (struct dasd_device *); | ||
246 | |||
247 | /* | ||
248 | * Device operation functions. build_cp creates a ccw chain for | ||
249 | * a block device request, start_io starts the request and | ||
250 | * term_IO cancels it (e.g. in case of a timeout). format_device | ||
251 | * returns a ccw chain to be used to format the device. | ||
252 | * handle_terminated_request allows to examine a cqr and prepare | ||
253 | * it for retry. | ||
254 | */ | ||
237 | struct dasd_ccw_req *(*build_cp) (struct dasd_device *, | 255 | struct dasd_ccw_req *(*build_cp) (struct dasd_device *, |
256 | struct dasd_block *, | ||
238 | struct request *); | 257 | struct request *); |
239 | int (*start_IO) (struct dasd_ccw_req *); | 258 | int (*start_IO) (struct dasd_ccw_req *); |
240 | int (*term_IO) (struct dasd_ccw_req *); | 259 | int (*term_IO) (struct dasd_ccw_req *); |
260 | void (*handle_terminated_request) (struct dasd_ccw_req *); | ||
241 | struct dasd_ccw_req *(*format_device) (struct dasd_device *, | 261 | struct dasd_ccw_req *(*format_device) (struct dasd_device *, |
242 | struct format_data_t *); | 262 | struct format_data_t *); |
243 | int (*free_cp) (struct dasd_ccw_req *, struct request *); | 263 | int (*free_cp) (struct dasd_ccw_req *, struct request *); |
244 | /* | 264 | |
245 | * Error recovery functions. examine_error() returns a value that | 265 | /* |
246 | * indicates what to do for an error condition. If examine_error() | 266 | * Error recovery functions. examine_error() returns a value that |
267 | * indicates what to do for an error condition. If examine_error() | ||
247 | * returns 'dasd_era_recover' erp_action() is called to create a | 268 | * returns 'dasd_era_recover' erp_action() is called to create a |
248 | * special error recovery ccw. erp_postaction() is called after | 269 | * special error recovery ccw. erp_postaction() is called after |
249 | * an error recovery ccw has finished its execution. dump_sense | 270 | * an error recovery ccw has finished its execution. dump_sense |
250 | * is called for every error condition to print the sense data | 271 | * is called for every error condition to print the sense data |
251 | * to the console. | 272 | * to the console. |
252 | */ | 273 | */ |
253 | dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *); | ||
254 | dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *); | 274 | dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *); |
255 | dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); | 275 | dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); |
256 | void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, | 276 | void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, |
257 | struct irb *); | 277 | struct irb *); |
258 | 278 | ||
279 | void (*handle_unsolicited_interrupt) (struct dasd_device *, | ||
280 | struct irb *); | ||
281 | |||
259 | /* i/o control functions. */ | 282 | /* i/o control functions. */ |
260 | int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); | 283 | int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); |
261 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); | 284 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); |
262 | int (*ioctl) (struct dasd_device *, unsigned int, void __user *); | 285 | int (*ioctl) (struct dasd_block *, unsigned int, void __user *); |
263 | }; | 286 | }; |
264 | 287 | ||
265 | extern struct dasd_discipline *dasd_diag_discipline_pointer; | 288 | extern struct dasd_discipline *dasd_diag_discipline_pointer; |
@@ -267,12 +290,18 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; | |||
267 | /* | 290 | /* |
268 | * Unique identifier for dasd device. | 291 | * Unique identifier for dasd device. |
269 | */ | 292 | */ |
293 | #define UA_NOT_CONFIGURED 0x00 | ||
294 | #define UA_BASE_DEVICE 0x01 | ||
295 | #define UA_BASE_PAV_ALIAS 0x02 | ||
296 | #define UA_HYPER_PAV_ALIAS 0x03 | ||
297 | |||
270 | struct dasd_uid { | 298 | struct dasd_uid { |
271 | __u8 alias; | 299 | __u8 type; |
272 | char vendor[4]; | 300 | char vendor[4]; |
273 | char serial[15]; | 301 | char serial[15]; |
274 | __u16 ssid; | 302 | __u16 ssid; |
275 | __u8 unit_addr; | 303 | __u8 real_unit_addr; |
304 | __u8 base_unit_addr; | ||
276 | }; | 305 | }; |
277 | 306 | ||
278 | /* | 307 | /* |
@@ -293,14 +322,9 @@ struct dasd_uid { | |||
293 | 322 | ||
294 | struct dasd_device { | 323 | struct dasd_device { |
295 | /* Block device stuff. */ | 324 | /* Block device stuff. */ |
296 | struct gendisk *gdp; | 325 | struct dasd_block *block; |
297 | struct request_queue *request_queue; | 326 | |
298 | spinlock_t request_queue_lock; | ||
299 | struct block_device *bdev; | ||
300 | unsigned int devindex; | 327 | unsigned int devindex; |
301 | unsigned long blocks; /* size of volume in blocks */ | ||
302 | unsigned int bp_block; /* bytes per block */ | ||
303 | unsigned int s2b_shift; /* log2 (bp_block/512) */ | ||
304 | unsigned long flags; /* per device flags */ | 328 | unsigned long flags; /* per device flags */ |
305 | unsigned short features; /* copy of devmap-features (read-only!) */ | 329 | unsigned short features; /* copy of devmap-features (read-only!) */ |
306 | 330 | ||
@@ -316,9 +340,8 @@ struct dasd_device { | |||
316 | int state, target; | 340 | int state, target; |
317 | int stopped; /* device (ccw_device_start) was stopped */ | 341 | int stopped; /* device (ccw_device_start) was stopped */ |
318 | 342 | ||
319 | /* Open and reference count. */ | 343 | /* reference count. */ |
320 | atomic_t ref_count; | 344 | atomic_t ref_count; |
321 | atomic_t open_count; | ||
322 | 345 | ||
323 | /* ccw queue and memory for static ccw/erp buffers. */ | 346 | /* ccw queue and memory for static ccw/erp buffers. */ |
324 | struct list_head ccw_queue; | 347 | struct list_head ccw_queue; |
@@ -337,20 +360,45 @@ struct dasd_device { | |||
337 | 360 | ||
338 | struct ccw_device *cdev; | 361 | struct ccw_device *cdev; |
339 | 362 | ||
363 | /* hook for alias management */ | ||
364 | struct list_head alias_list; | ||
365 | }; | ||
366 | |||
367 | struct dasd_block { | ||
368 | /* Block device stuff. */ | ||
369 | struct gendisk *gdp; | ||
370 | struct request_queue *request_queue; | ||
371 | spinlock_t request_queue_lock; | ||
372 | struct block_device *bdev; | ||
373 | atomic_t open_count; | ||
374 | |||
375 | unsigned long blocks; /* size of volume in blocks */ | ||
376 | unsigned int bp_block; /* bytes per block */ | ||
377 | unsigned int s2b_shift; /* log2 (bp_block/512) */ | ||
378 | |||
379 | struct dasd_device *base; | ||
380 | struct list_head ccw_queue; | ||
381 | spinlock_t queue_lock; | ||
382 | |||
383 | atomic_t tasklet_scheduled; | ||
384 | struct tasklet_struct tasklet; | ||
385 | struct timer_list timer; | ||
386 | |||
340 | #ifdef CONFIG_DASD_PROFILE | 387 | #ifdef CONFIG_DASD_PROFILE |
341 | struct dasd_profile_info_t profile; | 388 | struct dasd_profile_info_t profile; |
342 | #endif | 389 | #endif |
343 | }; | 390 | }; |
344 | 391 | ||
392 | |||
393 | |||
345 | /* reasons why device (ccw_device_start) was stopped */ | 394 | /* reasons why device (ccw_device_start) was stopped */ |
346 | #define DASD_STOPPED_NOT_ACC 1 /* not accessible */ | 395 | #define DASD_STOPPED_NOT_ACC 1 /* not accessible */ |
347 | #define DASD_STOPPED_QUIESCE 2 /* Quiesced */ | 396 | #define DASD_STOPPED_QUIESCE 2 /* Quiesced */ |
348 | #define DASD_STOPPED_PENDING 4 /* long busy */ | 397 | #define DASD_STOPPED_PENDING 4 /* long busy */ |
349 | #define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ | 398 | #define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ |
350 | #define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */ | 399 | #define DASD_STOPPED_SU 16 /* summary unit check handling */ |
351 | 400 | ||
352 | /* per device flags */ | 401 | /* per device flags */ |
353 | #define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */ | ||
354 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ | 402 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ |
355 | #define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ | 403 | #define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ |
356 | #define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ | 404 | #define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ |
@@ -489,6 +537,9 @@ dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) | |||
489 | struct dasd_device *dasd_alloc_device(void); | 537 | struct dasd_device *dasd_alloc_device(void); |
490 | void dasd_free_device(struct dasd_device *); | 538 | void dasd_free_device(struct dasd_device *); |
491 | 539 | ||
540 | struct dasd_block *dasd_alloc_block(void); | ||
541 | void dasd_free_block(struct dasd_block *); | ||
542 | |||
492 | void dasd_enable_device(struct dasd_device *); | 543 | void dasd_enable_device(struct dasd_device *); |
493 | void dasd_set_target_state(struct dasd_device *, int); | 544 | void dasd_set_target_state(struct dasd_device *, int); |
494 | void dasd_kick_device(struct dasd_device *); | 545 | void dasd_kick_device(struct dasd_device *); |
@@ -497,18 +548,23 @@ void dasd_add_request_head(struct dasd_ccw_req *); | |||
497 | void dasd_add_request_tail(struct dasd_ccw_req *); | 548 | void dasd_add_request_tail(struct dasd_ccw_req *); |
498 | int dasd_start_IO(struct dasd_ccw_req *); | 549 | int dasd_start_IO(struct dasd_ccw_req *); |
499 | int dasd_term_IO(struct dasd_ccw_req *); | 550 | int dasd_term_IO(struct dasd_ccw_req *); |
500 | void dasd_schedule_bh(struct dasd_device *); | 551 | void dasd_schedule_device_bh(struct dasd_device *); |
552 | void dasd_schedule_block_bh(struct dasd_block *); | ||
501 | int dasd_sleep_on(struct dasd_ccw_req *); | 553 | int dasd_sleep_on(struct dasd_ccw_req *); |
502 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *); | 554 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *); |
503 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *); | 555 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *); |
504 | void dasd_set_timer(struct dasd_device *, int); | 556 | void dasd_device_set_timer(struct dasd_device *, int); |
505 | void dasd_clear_timer(struct dasd_device *); | 557 | void dasd_device_clear_timer(struct dasd_device *); |
558 | void dasd_block_set_timer(struct dasd_block *, int); | ||
559 | void dasd_block_clear_timer(struct dasd_block *); | ||
506 | int dasd_cancel_req(struct dasd_ccw_req *); | 560 | int dasd_cancel_req(struct dasd_ccw_req *); |
561 | int dasd_flush_device_queue(struct dasd_device *); | ||
507 | int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *); | 562 | int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *); |
508 | void dasd_generic_remove (struct ccw_device *cdev); | 563 | void dasd_generic_remove (struct ccw_device *cdev); |
509 | int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); | 564 | int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); |
510 | int dasd_generic_set_offline (struct ccw_device *cdev); | 565 | int dasd_generic_set_offline (struct ccw_device *cdev); |
511 | int dasd_generic_notify(struct ccw_device *, int); | 566 | int dasd_generic_notify(struct ccw_device *, int); |
567 | void dasd_generic_handle_state_change(struct dasd_device *); | ||
512 | 568 | ||
513 | int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); | 569 | int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); |
514 | 570 | ||
@@ -542,10 +598,10 @@ int dasd_busid_known(char *); | |||
542 | /* externals in dasd_gendisk.c */ | 598 | /* externals in dasd_gendisk.c */ |
543 | int dasd_gendisk_init(void); | 599 | int dasd_gendisk_init(void); |
544 | void dasd_gendisk_exit(void); | 600 | void dasd_gendisk_exit(void); |
545 | int dasd_gendisk_alloc(struct dasd_device *); | 601 | int dasd_gendisk_alloc(struct dasd_block *); |
546 | void dasd_gendisk_free(struct dasd_device *); | 602 | void dasd_gendisk_free(struct dasd_block *); |
547 | int dasd_scan_partitions(struct dasd_device *); | 603 | int dasd_scan_partitions(struct dasd_block *); |
548 | void dasd_destroy_partitions(struct dasd_device *); | 604 | void dasd_destroy_partitions(struct dasd_block *); |
549 | 605 | ||
550 | /* externals in dasd_ioctl.c */ | 606 | /* externals in dasd_ioctl.c */ |
551 | int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); | 607 | int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); |
@@ -563,20 +619,9 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int, | |||
563 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); | 619 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); |
564 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); | 620 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); |
565 | 621 | ||
566 | /* externals in dasd_3370_erp.c */ | ||
567 | dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); | ||
568 | |||
569 | /* externals in dasd_3990_erp.c */ | 622 | /* externals in dasd_3990_erp.c */ |
570 | dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *); | ||
571 | struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); | 623 | struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); |
572 | 624 | ||
573 | /* externals in dasd_9336_erp.c */ | ||
574 | dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *); | ||
575 | |||
576 | /* externals in dasd_9336_erp.c */ | ||
577 | dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *); | ||
578 | struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *); | ||
579 | |||
580 | /* externals in dasd_eer.c */ | 625 | /* externals in dasd_eer.c */ |
581 | #ifdef CONFIG_DASD_EER | 626 | #ifdef CONFIG_DASD_EER |
582 | int dasd_eer_init(void); | 627 | int dasd_eer_init(void); |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 672eb0a3dd0b..91a64630cb0f 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -38,15 +38,15 @@ dasd_ioctl_api_version(void __user *argp) | |||
38 | static int | 38 | static int |
39 | dasd_ioctl_enable(struct block_device *bdev) | 39 | dasd_ioctl_enable(struct block_device *bdev) |
40 | { | 40 | { |
41 | struct dasd_device *device = bdev->bd_disk->private_data; | 41 | struct dasd_block *block = bdev->bd_disk->private_data; |
42 | 42 | ||
43 | if (!capable(CAP_SYS_ADMIN)) | 43 | if (!capable(CAP_SYS_ADMIN)) |
44 | return -EACCES; | 44 | return -EACCES; |
45 | 45 | ||
46 | dasd_enable_device(device); | 46 | dasd_enable_device(block->base); |
47 | /* Formatting the dasd device can change the capacity. */ | 47 | /* Formatting the dasd device can change the capacity. */ |
48 | mutex_lock(&bdev->bd_mutex); | 48 | mutex_lock(&bdev->bd_mutex); |
49 | i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); | 49 | i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); |
50 | mutex_unlock(&bdev->bd_mutex); | 50 | mutex_unlock(&bdev->bd_mutex); |
51 | return 0; | 51 | return 0; |
52 | } | 52 | } |
@@ -58,7 +58,7 @@ dasd_ioctl_enable(struct block_device *bdev) | |||
58 | static int | 58 | static int |
59 | dasd_ioctl_disable(struct block_device *bdev) | 59 | dasd_ioctl_disable(struct block_device *bdev) |
60 | { | 60 | { |
61 | struct dasd_device *device = bdev->bd_disk->private_data; | 61 | struct dasd_block *block = bdev->bd_disk->private_data; |
62 | 62 | ||
63 | if (!capable(CAP_SYS_ADMIN)) | 63 | if (!capable(CAP_SYS_ADMIN)) |
64 | return -EACCES; | 64 | return -EACCES; |
@@ -71,7 +71,7 @@ dasd_ioctl_disable(struct block_device *bdev) | |||
71 | * using the BIODASDFMT ioctl. Therefore the correct state for the | 71 | * using the BIODASDFMT ioctl. Therefore the correct state for the |
72 | * device is DASD_STATE_BASIC that allows to do basic i/o. | 72 | * device is DASD_STATE_BASIC that allows to do basic i/o. |
73 | */ | 73 | */ |
74 | dasd_set_target_state(device, DASD_STATE_BASIC); | 74 | dasd_set_target_state(block->base, DASD_STATE_BASIC); |
75 | /* | 75 | /* |
76 | * Set i_size to zero, since read, write, etc. check against this | 76 | * Set i_size to zero, since read, write, etc. check against this |
77 | * value. | 77 | * value. |
@@ -85,19 +85,19 @@ dasd_ioctl_disable(struct block_device *bdev) | |||
85 | /* | 85 | /* |
86 | * Quiesce device. | 86 | * Quiesce device. |
87 | */ | 87 | */ |
88 | static int | 88 | static int dasd_ioctl_quiesce(struct dasd_block *block) |
89 | dasd_ioctl_quiesce(struct dasd_device *device) | ||
90 | { | 89 | { |
91 | unsigned long flags; | 90 | unsigned long flags; |
91 | struct dasd_device *base; | ||
92 | 92 | ||
93 | base = block->base; | ||
93 | if (!capable (CAP_SYS_ADMIN)) | 94 | if (!capable (CAP_SYS_ADMIN)) |
94 | return -EACCES; | 95 | return -EACCES; |
95 | 96 | ||
96 | DEV_MESSAGE (KERN_DEBUG, device, "%s", | 97 | DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device"); |
97 | "Quiesce IO on device"); | 98 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); |
98 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 99 | base->stopped |= DASD_STOPPED_QUIESCE; |
99 | device->stopped |= DASD_STOPPED_QUIESCE; | 100 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); |
100 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | 103 | ||
@@ -105,22 +105,21 @@ dasd_ioctl_quiesce(struct dasd_device *device) | |||
105 | /* | 105 | /* |
106 | * Quiesce device. | 106 | * Quiesce device. |
107 | */ | 107 | */ |
108 | static int | 108 | static int dasd_ioctl_resume(struct dasd_block *block) |
109 | dasd_ioctl_resume(struct dasd_device *device) | ||
110 | { | 109 | { |
111 | unsigned long flags; | 110 | unsigned long flags; |
111 | struct dasd_device *base; | ||
112 | 112 | ||
113 | base = block->base; | ||
113 | if (!capable (CAP_SYS_ADMIN)) | 114 | if (!capable (CAP_SYS_ADMIN)) |
114 | return -EACCES; | 115 | return -EACCES; |
115 | 116 | ||
116 | DEV_MESSAGE (KERN_DEBUG, device, "%s", | 117 | DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device"); |
117 | "resume IO on device"); | 118 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); |
118 | 119 | base->stopped &= ~DASD_STOPPED_QUIESCE; | |
119 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 120 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); |
120 | device->stopped &= ~DASD_STOPPED_QUIESCE; | ||
121 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
122 | 121 | ||
123 | dasd_schedule_bh (device); | 122 | dasd_schedule_block_bh(block); |
124 | return 0; | 123 | return 0; |
125 | } | 124 | } |
126 | 125 | ||
@@ -130,22 +129,23 @@ dasd_ioctl_resume(struct dasd_device *device) | |||
130 | * commands to format a single unit of the device. In terms of the ECKD | 129 | * commands to format a single unit of the device. In terms of the ECKD |
131 | * devices this means CCWs are generated to format a single track. | 130 | * devices this means CCWs are generated to format a single track. |
132 | */ | 131 | */ |
133 | static int | 132 | static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) |
134 | dasd_format(struct dasd_device * device, struct format_data_t * fdata) | ||
135 | { | 133 | { |
136 | struct dasd_ccw_req *cqr; | 134 | struct dasd_ccw_req *cqr; |
135 | struct dasd_device *base; | ||
137 | int rc; | 136 | int rc; |
138 | 137 | ||
139 | if (device->discipline->format_device == NULL) | 138 | base = block->base; |
139 | if (base->discipline->format_device == NULL) | ||
140 | return -EPERM; | 140 | return -EPERM; |
141 | 141 | ||
142 | if (device->state != DASD_STATE_BASIC) { | 142 | if (base->state != DASD_STATE_BASIC) { |
143 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 143 | DEV_MESSAGE(KERN_WARNING, base, "%s", |
144 | "dasd_format: device is not disabled! "); | 144 | "dasd_format: device is not disabled! "); |
145 | return -EBUSY; | 145 | return -EBUSY; |
146 | } | 146 | } |
147 | 147 | ||
148 | DBF_DEV_EVENT(DBF_NOTICE, device, | 148 | DBF_DEV_EVENT(DBF_NOTICE, base, |
149 | "formatting units %d to %d (%d B blocks) flags %d", | 149 | "formatting units %d to %d (%d B blocks) flags %d", |
150 | fdata->start_unit, | 150 | fdata->start_unit, |
151 | fdata->stop_unit, fdata->blksize, fdata->intensity); | 151 | fdata->stop_unit, fdata->blksize, fdata->intensity); |
@@ -156,20 +156,20 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata) | |||
156 | * enabling the device later. | 156 | * enabling the device later. |
157 | */ | 157 | */ |
158 | if (fdata->start_unit == 0) { | 158 | if (fdata->start_unit == 0) { |
159 | struct block_device *bdev = bdget_disk(device->gdp, 0); | 159 | struct block_device *bdev = bdget_disk(block->gdp, 0); |
160 | bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize); | 160 | bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize); |
161 | bdput(bdev); | 161 | bdput(bdev); |
162 | } | 162 | } |
163 | 163 | ||
164 | while (fdata->start_unit <= fdata->stop_unit) { | 164 | while (fdata->start_unit <= fdata->stop_unit) { |
165 | cqr = device->discipline->format_device(device, fdata); | 165 | cqr = base->discipline->format_device(base, fdata); |
166 | if (IS_ERR(cqr)) | 166 | if (IS_ERR(cqr)) |
167 | return PTR_ERR(cqr); | 167 | return PTR_ERR(cqr); |
168 | rc = dasd_sleep_on_interruptible(cqr); | 168 | rc = dasd_sleep_on_interruptible(cqr); |
169 | dasd_sfree_request(cqr, cqr->device); | 169 | dasd_sfree_request(cqr, cqr->memdev); |
170 | if (rc) { | 170 | if (rc) { |
171 | if (rc != -ERESTARTSYS) | 171 | if (rc != -ERESTARTSYS) |
172 | DEV_MESSAGE(KERN_ERR, device, | 172 | DEV_MESSAGE(KERN_ERR, base, |
173 | " Formatting of unit %d failed " | 173 | " Formatting of unit %d failed " |
174 | "with rc = %d", | 174 | "with rc = %d", |
175 | fdata->start_unit, rc); | 175 | fdata->start_unit, rc); |
@@ -186,7 +186,7 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata) | |||
186 | static int | 186 | static int |
187 | dasd_ioctl_format(struct block_device *bdev, void __user *argp) | 187 | dasd_ioctl_format(struct block_device *bdev, void __user *argp) |
188 | { | 188 | { |
189 | struct dasd_device *device = bdev->bd_disk->private_data; | 189 | struct dasd_block *block = bdev->bd_disk->private_data; |
190 | struct format_data_t fdata; | 190 | struct format_data_t fdata; |
191 | 191 | ||
192 | if (!capable(CAP_SYS_ADMIN)) | 192 | if (!capable(CAP_SYS_ADMIN)) |
@@ -194,51 +194,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp) | |||
194 | if (!argp) | 194 | if (!argp) |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | 196 | ||
197 | if (device->features & DASD_FEATURE_READONLY) | 197 | if (block->base->features & DASD_FEATURE_READONLY) |
198 | return -EROFS; | 198 | return -EROFS; |
199 | if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) | 199 | if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) |
200 | return -EFAULT; | 200 | return -EFAULT; |
201 | if (bdev != bdev->bd_contains) { | 201 | if (bdev != bdev->bd_contains) { |
202 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 202 | DEV_MESSAGE(KERN_WARNING, block->base, "%s", |
203 | "Cannot low-level format a partition"); | 203 | "Cannot low-level format a partition"); |
204 | return -EINVAL; | 204 | return -EINVAL; |
205 | } | 205 | } |
206 | return dasd_format(device, &fdata); | 206 | return dasd_format(block, &fdata); |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_DASD_PROFILE | 209 | #ifdef CONFIG_DASD_PROFILE |
210 | /* | 210 | /* |
211 | * Reset device profile information | 211 | * Reset device profile information |
212 | */ | 212 | */ |
213 | static int | 213 | static int dasd_ioctl_reset_profile(struct dasd_block *block) |
214 | dasd_ioctl_reset_profile(struct dasd_device *device) | ||
215 | { | 214 | { |
216 | memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); | 215 | memset(&block->profile, 0, sizeof(struct dasd_profile_info_t)); |
217 | return 0; | 216 | return 0; |
218 | } | 217 | } |
219 | 218 | ||
220 | /* | 219 | /* |
221 | * Return device profile information | 220 | * Return device profile information |
222 | */ | 221 | */ |
223 | static int | 222 | static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) |
224 | dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) | ||
225 | { | 223 | { |
226 | if (dasd_profile_level == DASD_PROFILE_OFF) | 224 | if (dasd_profile_level == DASD_PROFILE_OFF) |
227 | return -EIO; | 225 | return -EIO; |
228 | if (copy_to_user(argp, &device->profile, | 226 | if (copy_to_user(argp, &block->profile, |
229 | sizeof (struct dasd_profile_info_t))) | 227 | sizeof(struct dasd_profile_info_t))) |
230 | return -EFAULT; | 228 | return -EFAULT; |
231 | return 0; | 229 | return 0; |
232 | } | 230 | } |
233 | #else | 231 | #else |
234 | static int | 232 | static int dasd_ioctl_reset_profile(struct dasd_block *block) |
235 | dasd_ioctl_reset_profile(struct dasd_device *device) | ||
236 | { | 233 | { |
237 | return -ENOSYS; | 234 | return -ENOSYS; |
238 | } | 235 | } |
239 | 236 | ||
240 | static int | 237 | static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) |
241 | dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) | ||
242 | { | 238 | { |
243 | return -ENOSYS; | 239 | return -ENOSYS; |
244 | } | 240 | } |
@@ -247,87 +243,88 @@ dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) | |||
247 | /* | 243 | /* |
248 | * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. | 244 | * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. |
249 | */ | 245 | */ |
250 | static int | 246 | static int dasd_ioctl_information(struct dasd_block *block, |
251 | dasd_ioctl_information(struct dasd_device *device, | 247 | unsigned int cmd, void __user *argp) |
252 | unsigned int cmd, void __user *argp) | ||
253 | { | 248 | { |
254 | struct dasd_information2_t *dasd_info; | 249 | struct dasd_information2_t *dasd_info; |
255 | unsigned long flags; | 250 | unsigned long flags; |
256 | int rc; | 251 | int rc; |
252 | struct dasd_device *base; | ||
257 | struct ccw_device *cdev; | 253 | struct ccw_device *cdev; |
258 | struct ccw_dev_id dev_id; | 254 | struct ccw_dev_id dev_id; |
259 | 255 | ||
260 | if (!device->discipline->fill_info) | 256 | base = block->base; |
257 | if (!base->discipline->fill_info) | ||
261 | return -EINVAL; | 258 | return -EINVAL; |
262 | 259 | ||
263 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); | 260 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); |
264 | if (dasd_info == NULL) | 261 | if (dasd_info == NULL) |
265 | return -ENOMEM; | 262 | return -ENOMEM; |
266 | 263 | ||
267 | rc = device->discipline->fill_info(device, dasd_info); | 264 | rc = base->discipline->fill_info(base, dasd_info); |
268 | if (rc) { | 265 | if (rc) { |
269 | kfree(dasd_info); | 266 | kfree(dasd_info); |
270 | return rc; | 267 | return rc; |
271 | } | 268 | } |
272 | 269 | ||
273 | cdev = device->cdev; | 270 | cdev = base->cdev; |
274 | ccw_device_get_id(cdev, &dev_id); | 271 | ccw_device_get_id(cdev, &dev_id); |
275 | 272 | ||
276 | dasd_info->devno = dev_id.devno; | 273 | dasd_info->devno = dev_id.devno; |
277 | dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev); | 274 | dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev); |
278 | dasd_info->cu_type = cdev->id.cu_type; | 275 | dasd_info->cu_type = cdev->id.cu_type; |
279 | dasd_info->cu_model = cdev->id.cu_model; | 276 | dasd_info->cu_model = cdev->id.cu_model; |
280 | dasd_info->dev_type = cdev->id.dev_type; | 277 | dasd_info->dev_type = cdev->id.dev_type; |
281 | dasd_info->dev_model = cdev->id.dev_model; | 278 | dasd_info->dev_model = cdev->id.dev_model; |
282 | dasd_info->status = device->state; | 279 | dasd_info->status = base->state; |
283 | /* | 280 | /* |
284 | * The open_count is increased for every opener, that includes | 281 | * The open_count is increased for every opener, that includes |
285 | * the blkdev_get in dasd_scan_partitions. | 282 | * the blkdev_get in dasd_scan_partitions. |
286 | * This must be hidden from user-space. | 283 | * This must be hidden from user-space. |
287 | */ | 284 | */ |
288 | dasd_info->open_count = atomic_read(&device->open_count); | 285 | dasd_info->open_count = atomic_read(&block->open_count); |
289 | if (!device->bdev) | 286 | if (!block->bdev) |
290 | dasd_info->open_count++; | 287 | dasd_info->open_count++; |
291 | 288 | ||
292 | /* | 289 | /* |
293 | * check if device is really formatted | 290 | * check if device is really formatted |
294 | * LDL / CDL was returned by 'fill_info' | 291 | * LDL / CDL was returned by 'fill_info' |
295 | */ | 292 | */ |
296 | if ((device->state < DASD_STATE_READY) || | 293 | if ((base->state < DASD_STATE_READY) || |
297 | (dasd_check_blocksize(device->bp_block))) | 294 | (dasd_check_blocksize(block->bp_block))) |
298 | dasd_info->format = DASD_FORMAT_NONE; | 295 | dasd_info->format = DASD_FORMAT_NONE; |
299 | 296 | ||
300 | dasd_info->features |= | 297 | dasd_info->features |= |
301 | ((device->features & DASD_FEATURE_READONLY) != 0); | 298 | ((base->features & DASD_FEATURE_READONLY) != 0); |
302 | 299 | ||
303 | if (device->discipline) | 300 | if (base->discipline) |
304 | memcpy(dasd_info->type, device->discipline->name, 4); | 301 | memcpy(dasd_info->type, base->discipline->name, 4); |
305 | else | 302 | else |
306 | memcpy(dasd_info->type, "none", 4); | 303 | memcpy(dasd_info->type, "none", 4); |
307 | 304 | ||
308 | if (device->request_queue->request_fn) { | 305 | if (block->request_queue->request_fn) { |
309 | struct list_head *l; | 306 | struct list_head *l; |
310 | #ifdef DASD_EXTENDED_PROFILING | 307 | #ifdef DASD_EXTENDED_PROFILING |
311 | { | 308 | { |
312 | struct list_head *l; | 309 | struct list_head *l; |
313 | spin_lock_irqsave(&device->lock, flags); | 310 | spin_lock_irqsave(&block->lock, flags); |
314 | list_for_each(l, &device->request_queue->queue_head) | 311 | list_for_each(l, &block->request_queue->queue_head) |
315 | dasd_info->req_queue_len++; | 312 | dasd_info->req_queue_len++; |
316 | spin_unlock_irqrestore(&device->lock, flags); | 313 | spin_unlock_irqrestore(&block->lock, flags); |
317 | } | 314 | } |
318 | #endif /* DASD_EXTENDED_PROFILING */ | 315 | #endif /* DASD_EXTENDED_PROFILING */ |
319 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 316 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); |
320 | list_for_each(l, &device->ccw_queue) | 317 | list_for_each(l, &base->ccw_queue) |
321 | dasd_info->chanq_len++; | 318 | dasd_info->chanq_len++; |
322 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), | 319 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), |
323 | flags); | 320 | flags); |
324 | } | 321 | } |
325 | 322 | ||
326 | rc = 0; | 323 | rc = 0; |
327 | if (copy_to_user(argp, dasd_info, | 324 | if (copy_to_user(argp, dasd_info, |
328 | ((cmd == (unsigned int) BIODASDINFO2) ? | 325 | ((cmd == (unsigned int) BIODASDINFO2) ? |
329 | sizeof (struct dasd_information2_t) : | 326 | sizeof(struct dasd_information2_t) : |
330 | sizeof (struct dasd_information_t)))) | 327 | sizeof(struct dasd_information_t)))) |
331 | rc = -EFAULT; | 328 | rc = -EFAULT; |
332 | kfree(dasd_info); | 329 | kfree(dasd_info); |
333 | return rc; | 330 | return rc; |
@@ -339,7 +336,7 @@ dasd_ioctl_information(struct dasd_device *device, | |||
339 | static int | 336 | static int |
340 | dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) | 337 | dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) |
341 | { | 338 | { |
342 | struct dasd_device *device = bdev->bd_disk->private_data; | 339 | struct dasd_block *block = bdev->bd_disk->private_data; |
343 | int intval; | 340 | int intval; |
344 | 341 | ||
345 | if (!capable(CAP_SYS_ADMIN)) | 342 | if (!capable(CAP_SYS_ADMIN)) |
@@ -351,11 +348,10 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) | |||
351 | return -EFAULT; | 348 | return -EFAULT; |
352 | 349 | ||
353 | set_disk_ro(bdev->bd_disk, intval); | 350 | set_disk_ro(bdev->bd_disk, intval); |
354 | return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); | 351 | return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); |
355 | } | 352 | } |
356 | 353 | ||
357 | static int | 354 | static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, |
358 | dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd, | ||
359 | unsigned long arg) | 355 | unsigned long arg) |
360 | { | 356 | { |
361 | struct cmbdata __user *argp = (void __user *) arg; | 357 | struct cmbdata __user *argp = (void __user *) arg; |
@@ -363,7 +359,7 @@ dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd, | |||
363 | struct cmbdata data; | 359 | struct cmbdata data; |
364 | int ret; | 360 | int ret; |
365 | 361 | ||
366 | ret = cmf_readall(device->cdev, &data); | 362 | ret = cmf_readall(block->base->cdev, &data); |
367 | if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) | 363 | if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) |
368 | return -EFAULT; | 364 | return -EFAULT; |
369 | return ret; | 365 | return ret; |
@@ -374,10 +370,10 @@ dasd_ioctl(struct inode *inode, struct file *file, | |||
374 | unsigned int cmd, unsigned long arg) | 370 | unsigned int cmd, unsigned long arg) |
375 | { | 371 | { |
376 | struct block_device *bdev = inode->i_bdev; | 372 | struct block_device *bdev = inode->i_bdev; |
377 | struct dasd_device *device = bdev->bd_disk->private_data; | 373 | struct dasd_block *block = bdev->bd_disk->private_data; |
378 | void __user *argp = (void __user *)arg; | 374 | void __user *argp = (void __user *)arg; |
379 | 375 | ||
380 | if (!device) | 376 | if (!block) |
381 | return -ENODEV; | 377 | return -ENODEV; |
382 | 378 | ||
383 | if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { | 379 | if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { |
@@ -391,33 +387,33 @@ dasd_ioctl(struct inode *inode, struct file *file, | |||
391 | case BIODASDENABLE: | 387 | case BIODASDENABLE: |
392 | return dasd_ioctl_enable(bdev); | 388 | return dasd_ioctl_enable(bdev); |
393 | case BIODASDQUIESCE: | 389 | case BIODASDQUIESCE: |
394 | return dasd_ioctl_quiesce(device); | 390 | return dasd_ioctl_quiesce(block); |
395 | case BIODASDRESUME: | 391 | case BIODASDRESUME: |
396 | return dasd_ioctl_resume(device); | 392 | return dasd_ioctl_resume(block); |
397 | case BIODASDFMT: | 393 | case BIODASDFMT: |
398 | return dasd_ioctl_format(bdev, argp); | 394 | return dasd_ioctl_format(bdev, argp); |
399 | case BIODASDINFO: | 395 | case BIODASDINFO: |
400 | return dasd_ioctl_information(device, cmd, argp); | 396 | return dasd_ioctl_information(block, cmd, argp); |
401 | case BIODASDINFO2: | 397 | case BIODASDINFO2: |
402 | return dasd_ioctl_information(device, cmd, argp); | 398 | return dasd_ioctl_information(block, cmd, argp); |
403 | case BIODASDPRRD: | 399 | case BIODASDPRRD: |
404 | return dasd_ioctl_read_profile(device, argp); | 400 | return dasd_ioctl_read_profile(block, argp); |
405 | case BIODASDPRRST: | 401 | case BIODASDPRRST: |
406 | return dasd_ioctl_reset_profile(device); | 402 | return dasd_ioctl_reset_profile(block); |
407 | case BLKROSET: | 403 | case BLKROSET: |
408 | return dasd_ioctl_set_ro(bdev, argp); | 404 | return dasd_ioctl_set_ro(bdev, argp); |
409 | case DASDAPIVER: | 405 | case DASDAPIVER: |
410 | return dasd_ioctl_api_version(argp); | 406 | return dasd_ioctl_api_version(argp); |
411 | case BIODASDCMFENABLE: | 407 | case BIODASDCMFENABLE: |
412 | return enable_cmf(device->cdev); | 408 | return enable_cmf(block->base->cdev); |
413 | case BIODASDCMFDISABLE: | 409 | case BIODASDCMFDISABLE: |
414 | return disable_cmf(device->cdev); | 410 | return disable_cmf(block->base->cdev); |
415 | case BIODASDREADALLCMB: | 411 | case BIODASDREADALLCMB: |
416 | return dasd_ioctl_readall_cmb(device, cmd, arg); | 412 | return dasd_ioctl_readall_cmb(block, cmd, arg); |
417 | default: | 413 | default: |
418 | /* if the discipline has an ioctl method try it. */ | 414 | /* if the discipline has an ioctl method try it. */ |
419 | if (device->discipline->ioctl) { | 415 | if (block->base->discipline->ioctl) { |
420 | int rval = device->discipline->ioctl(device, cmd, argp); | 416 | int rval = block->base->discipline->ioctl(block, cmd, argp); |
421 | if (rval != -ENOIOCTLCMD) | 417 | if (rval != -ENOIOCTLCMD) |
422 | return rval; | 418 | return rval; |
423 | } | 419 | } |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index ac7e8ef504cb..28a86f070048 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -54,11 +54,16 @@ static int | |||
54 | dasd_devices_show(struct seq_file *m, void *v) | 54 | dasd_devices_show(struct seq_file *m, void *v) |
55 | { | 55 | { |
56 | struct dasd_device *device; | 56 | struct dasd_device *device; |
57 | struct dasd_block *block; | ||
57 | char *substr; | 58 | char *substr; |
58 | 59 | ||
59 | device = dasd_device_from_devindex((unsigned long) v - 1); | 60 | device = dasd_device_from_devindex((unsigned long) v - 1); |
60 | if (IS_ERR(device)) | 61 | if (IS_ERR(device)) |
61 | return 0; | 62 | return 0; |
63 | if (device->block) | ||
64 | block = device->block; | ||
65 | else | ||
66 | return 0; | ||
62 | /* Print device number. */ | 67 | /* Print device number. */ |
63 | seq_printf(m, "%s", device->cdev->dev.bus_id); | 68 | seq_printf(m, "%s", device->cdev->dev.bus_id); |
64 | /* Print discipline string. */ | 69 | /* Print discipline string. */ |
@@ -67,14 +72,14 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
67 | else | 72 | else |
68 | seq_printf(m, "(none)"); | 73 | seq_printf(m, "(none)"); |
69 | /* Print kdev. */ | 74 | /* Print kdev. */ |
70 | if (device->gdp) | 75 | if (block->gdp) |
71 | seq_printf(m, " at (%3d:%6d)", | 76 | seq_printf(m, " at (%3d:%6d)", |
72 | device->gdp->major, device->gdp->first_minor); | 77 | block->gdp->major, block->gdp->first_minor); |
73 | else | 78 | else |
74 | seq_printf(m, " at (???:??????)"); | 79 | seq_printf(m, " at (???:??????)"); |
75 | /* Print device name. */ | 80 | /* Print device name. */ |
76 | if (device->gdp) | 81 | if (block->gdp) |
77 | seq_printf(m, " is %-8s", device->gdp->disk_name); | 82 | seq_printf(m, " is %-8s", block->gdp->disk_name); |
78 | else | 83 | else |
79 | seq_printf(m, " is ????????"); | 84 | seq_printf(m, " is ????????"); |
80 | /* Print devices features. */ | 85 | /* Print devices features. */ |
@@ -100,14 +105,14 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
100 | case DASD_STATE_READY: | 105 | case DASD_STATE_READY: |
101 | case DASD_STATE_ONLINE: | 106 | case DASD_STATE_ONLINE: |
102 | seq_printf(m, "active "); | 107 | seq_printf(m, "active "); |
103 | if (dasd_check_blocksize(device->bp_block)) | 108 | if (dasd_check_blocksize(block->bp_block)) |
104 | seq_printf(m, "n/f "); | 109 | seq_printf(m, "n/f "); |
105 | else | 110 | else |
106 | seq_printf(m, | 111 | seq_printf(m, |
107 | "at blocksize: %d, %ld blocks, %ld MB", | 112 | "at blocksize: %d, %ld blocks, %ld MB", |
108 | device->bp_block, device->blocks, | 113 | block->bp_block, block->blocks, |
109 | ((device->bp_block >> 9) * | 114 | ((block->bp_block >> 9) * |
110 | device->blocks) >> 11); | 115 | block->blocks) >> 11); |
111 | break; | 116 | break; |
112 | default: | 117 | default: |
113 | seq_printf(m, "no stat"); | 118 | seq_printf(m, "no stat"); |
@@ -137,7 +142,7 @@ static void dasd_devices_stop(struct seq_file *m, void *v) | |||
137 | { | 142 | { |
138 | } | 143 | } |
139 | 144 | ||
140 | static struct seq_operations dasd_devices_seq_ops = { | 145 | static const struct seq_operations dasd_devices_seq_ops = { |
141 | .start = dasd_devices_start, | 146 | .start = dasd_devices_start, |
142 | .next = dasd_devices_next, | 147 | .next = dasd_devices_next, |
143 | .stop = dasd_devices_stop, | 148 | .stop = dasd_devices_stop, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 15a5789b7734..7779bfce1c31 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -82,7 +82,7 @@ struct dcssblk_dev_info { | |||
82 | struct request_queue *dcssblk_queue; | 82 | struct request_queue *dcssblk_queue; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices); | 85 | static LIST_HEAD(dcssblk_devices); |
86 | static struct rw_semaphore dcssblk_devices_sem; | 86 | static struct rw_semaphore dcssblk_devices_sem; |
87 | 87 | ||
88 | /* | 88 | /* |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 130de19916f2..7e73e39a1741 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ | 5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ |
6 | sclp_info.o sclp_config.o sclp_chp.o | 6 | sclp_cmd.o sclp_config.o sclp_cpi_sys.o |
7 | 7 | ||
8 | obj-$(CONFIG_TN3270) += raw3270.o | 8 | obj-$(CONFIG_TN3270) += raw3270.o |
9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o | 9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 20442fbf9346..a86c0534cd49 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -295,7 +295,7 @@ module_init(mon_init); | |||
295 | module_exit(mon_exit); | 295 | module_exit(mon_exit); |
296 | 296 | ||
297 | module_param_named(max_bufs, mon_max_bufs, int, 0644); | 297 | module_param_named(max_bufs, mon_max_bufs, int, 0644); |
298 | MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers" | 298 | MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers " |
299 | "that can be active at one time"); | 299 | "that can be active at one time"); |
300 | 300 | ||
301 | MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>"); | 301 | MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>"); |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 8d1c64a24dec..0d98f1ff2edd 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -66,7 +66,7 @@ struct raw3270 { | |||
66 | static DEFINE_MUTEX(raw3270_mutex); | 66 | static DEFINE_MUTEX(raw3270_mutex); |
67 | 67 | ||
68 | /* List of 3270 devices. */ | 68 | /* List of 3270 devices. */ |
69 | static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); | 69 | static LIST_HEAD(raw3270_devices); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Flag to indicate if the driver has been registered. Some operations | 72 | * Flag to indicate if the driver has been registered. Some operations |
@@ -1210,7 +1210,7 @@ struct raw3270_notifier { | |||
1210 | void (*notifier)(int, int); | 1210 | void (*notifier)(int, int); |
1211 | }; | 1211 | }; |
1212 | 1212 | ||
1213 | static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier); | 1213 | static LIST_HEAD(raw3270_notifier); |
1214 | 1214 | ||
1215 | int raw3270_register_notifier(void (*notifier)(int, int)) | 1215 | int raw3270_register_notifier(void (*notifier)(int, int)) |
1216 | { | 1216 | { |
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index c7318a125852..aa8186d18aee 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -56,8 +56,6 @@ typedef unsigned int sclp_cmdw_t; | |||
56 | #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 | 56 | #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 |
57 | #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 | 57 | #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 |
58 | #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 | 58 | #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 |
59 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | ||
60 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | ||
61 | 59 | ||
62 | #define GDS_ID_MDSMU 0x1310 | 60 | #define GDS_ID_MDSMU 0x1310 |
63 | #define GDS_ID_MDSROUTEINFO 0x1311 | 61 | #define GDS_ID_MDSROUTEINFO 0x1311 |
@@ -83,6 +81,8 @@ extern u64 sclp_facilities; | |||
83 | 81 | ||
84 | #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) | 82 | #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) |
85 | #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) | 83 | #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) |
84 | #define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) | ||
85 | #define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) | ||
86 | 86 | ||
87 | struct gds_subvector { | 87 | struct gds_subvector { |
88 | u8 length; | 88 | u8 length; |
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c deleted file mode 100644 index c68f5e7e63a0..000000000000 --- a/drivers/s390/char/sclp_chp.c +++ /dev/null | |||
@@ -1,200 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_chp.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/gfp.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <asm/sclp.h> | ||
13 | #include <asm/chpid.h> | ||
14 | |||
15 | #include "sclp.h" | ||
16 | |||
17 | #define TAG "sclp_chp: " | ||
18 | |||
19 | #define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001 | ||
20 | #define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001 | ||
21 | #define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001 | ||
22 | |||
23 | static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid) | ||
24 | { | ||
25 | return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
26 | } | ||
27 | |||
28 | static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid) | ||
29 | { | ||
30 | return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8; | ||
31 | } | ||
32 | |||
33 | static void chp_callback(struct sclp_req *req, void *data) | ||
34 | { | ||
35 | struct completion *completion = data; | ||
36 | |||
37 | complete(completion); | ||
38 | } | ||
39 | |||
40 | struct chp_cfg_sccb { | ||
41 | struct sccb_header header; | ||
42 | u8 ccm; | ||
43 | u8 reserved[6]; | ||
44 | u8 cssid; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | struct chp_cfg_data { | ||
48 | struct chp_cfg_sccb sccb; | ||
49 | struct sclp_req req; | ||
50 | struct completion completion; | ||
51 | } __attribute__((packed)); | ||
52 | |||
53 | static int do_configure(sclp_cmdw_t cmd) | ||
54 | { | ||
55 | struct chp_cfg_data *data; | ||
56 | int rc; | ||
57 | |||
58 | if (!SCLP_HAS_CHP_RECONFIG) | ||
59 | return -EOPNOTSUPP; | ||
60 | /* Prepare sccb. */ | ||
61 | data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
62 | if (!data) | ||
63 | return -ENOMEM; | ||
64 | data->sccb.header.length = sizeof(struct chp_cfg_sccb); | ||
65 | data->req.command = cmd; | ||
66 | data->req.sccb = &(data->sccb); | ||
67 | data->req.status = SCLP_REQ_FILLED; | ||
68 | data->req.callback = chp_callback; | ||
69 | data->req.callback_data = &(data->completion); | ||
70 | init_completion(&data->completion); | ||
71 | |||
72 | /* Perform sclp request. */ | ||
73 | rc = sclp_add_request(&(data->req)); | ||
74 | if (rc) | ||
75 | goto out; | ||
76 | wait_for_completion(&data->completion); | ||
77 | |||
78 | /* Check response .*/ | ||
79 | if (data->req.status != SCLP_REQ_DONE) { | ||
80 | printk(KERN_WARNING TAG "configure channel-path request failed " | ||
81 | "(status=0x%02x)\n", data->req.status); | ||
82 | rc = -EIO; | ||
83 | goto out; | ||
84 | } | ||
85 | switch (data->sccb.header.response_code) { | ||
86 | case 0x0020: | ||
87 | case 0x0120: | ||
88 | case 0x0440: | ||
89 | case 0x0450: | ||
90 | break; | ||
91 | default: | ||
92 | printk(KERN_WARNING TAG "configure channel-path failed " | ||
93 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | ||
94 | data->sccb.header.response_code); | ||
95 | rc = -EIO; | ||
96 | break; | ||
97 | } | ||
98 | out: | ||
99 | free_page((unsigned long) data); | ||
100 | |||
101 | return rc; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * sclp_chp_configure - perform configure channel-path sclp command | ||
106 | * @chpid: channel-path ID | ||
107 | * | ||
108 | * Perform configure channel-path command sclp command for specified chpid. | ||
109 | * Return 0 after command successfully finished, non-zero otherwise. | ||
110 | */ | ||
111 | int sclp_chp_configure(struct chp_id chpid) | ||
112 | { | ||
113 | return do_configure(get_configure_cmdw(chpid)); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * sclp_chp_deconfigure - perform deconfigure channel-path sclp command | ||
118 | * @chpid: channel-path ID | ||
119 | * | ||
120 | * Perform deconfigure channel-path command sclp command for specified chpid | ||
121 | * and wait for completion. On success return 0. Return non-zero otherwise. | ||
122 | */ | ||
123 | int sclp_chp_deconfigure(struct chp_id chpid) | ||
124 | { | ||
125 | return do_configure(get_deconfigure_cmdw(chpid)); | ||
126 | } | ||
127 | |||
128 | struct chp_info_sccb { | ||
129 | struct sccb_header header; | ||
130 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; | ||
131 | u8 standby[SCLP_CHP_INFO_MASK_SIZE]; | ||
132 | u8 configured[SCLP_CHP_INFO_MASK_SIZE]; | ||
133 | u8 ccm; | ||
134 | u8 reserved[6]; | ||
135 | u8 cssid; | ||
136 | } __attribute__((packed)); | ||
137 | |||
138 | struct chp_info_data { | ||
139 | struct chp_info_sccb sccb; | ||
140 | struct sclp_req req; | ||
141 | struct completion completion; | ||
142 | } __attribute__((packed)); | ||
143 | |||
144 | /** | ||
145 | * sclp_chp_read_info - perform read channel-path information sclp command | ||
146 | * @info: resulting channel-path information data | ||
147 | * | ||
148 | * Perform read channel-path information sclp command and wait for completion. | ||
149 | * On success, store channel-path information in @info and return 0. Return | ||
150 | * non-zero otherwise. | ||
151 | */ | ||
152 | int sclp_chp_read_info(struct sclp_chp_info *info) | ||
153 | { | ||
154 | struct chp_info_data *data; | ||
155 | int rc; | ||
156 | |||
157 | if (!SCLP_HAS_CHP_INFO) | ||
158 | return -EOPNOTSUPP; | ||
159 | /* Prepare sccb. */ | ||
160 | data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
161 | if (!data) | ||
162 | return -ENOMEM; | ||
163 | data->sccb.header.length = sizeof(struct chp_info_sccb); | ||
164 | data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION; | ||
165 | data->req.sccb = &(data->sccb); | ||
166 | data->req.status = SCLP_REQ_FILLED; | ||
167 | data->req.callback = chp_callback; | ||
168 | data->req.callback_data = &(data->completion); | ||
169 | init_completion(&data->completion); | ||
170 | |||
171 | /* Perform sclp request. */ | ||
172 | rc = sclp_add_request(&(data->req)); | ||
173 | if (rc) | ||
174 | goto out; | ||
175 | wait_for_completion(&data->completion); | ||
176 | |||
177 | /* Check response .*/ | ||
178 | if (data->req.status != SCLP_REQ_DONE) { | ||
179 | printk(KERN_WARNING TAG "read channel-path info request failed " | ||
180 | "(status=0x%02x)\n", data->req.status); | ||
181 | rc = -EIO; | ||
182 | goto out; | ||
183 | } | ||
184 | if (data->sccb.header.response_code != 0x0010) { | ||
185 | printk(KERN_WARNING TAG "read channel-path info failed " | ||
186 | "(response=0x%04x)\n", data->sccb.header.response_code); | ||
187 | rc = -EIO; | ||
188 | goto out; | ||
189 | } | ||
190 | memcpy(info->recognized, data->sccb.recognized, | ||
191 | SCLP_CHP_INFO_MASK_SIZE); | ||
192 | memcpy(info->standby, data->sccb.standby, | ||
193 | SCLP_CHP_INFO_MASK_SIZE); | ||
194 | memcpy(info->configured, data->sccb.configured, | ||
195 | SCLP_CHP_INFO_MASK_SIZE); | ||
196 | out: | ||
197 | free_page((unsigned long) data); | ||
198 | |||
199 | return rc; | ||
200 | } | ||
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c new file mode 100644 index 000000000000..b5c23396f8fe --- /dev/null +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_cmd.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/completion.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <asm/chpid.h> | ||
15 | #include <asm/sclp.h> | ||
16 | #include "sclp.h" | ||
17 | |||
18 | #define TAG "sclp_cmd: " | ||
19 | |||
20 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | ||
21 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | ||
22 | |||
23 | struct read_info_sccb { | ||
24 | struct sccb_header header; /* 0-7 */ | ||
25 | u16 rnmax; /* 8-9 */ | ||
26 | u8 rnsize; /* 10 */ | ||
27 | u8 _reserved0[24 - 11]; /* 11-15 */ | ||
28 | u8 loadparm[8]; /* 24-31 */ | ||
29 | u8 _reserved1[48 - 32]; /* 32-47 */ | ||
30 | u64 facilities; /* 48-55 */ | ||
31 | u8 _reserved2[84 - 56]; /* 56-83 */ | ||
32 | u8 fac84; /* 84 */ | ||
33 | u8 _reserved3[91 - 85]; /* 85-90 */ | ||
34 | u8 flags; /* 91 */ | ||
35 | u8 _reserved4[100 - 92]; /* 92-99 */ | ||
36 | u32 rnsize2; /* 100-103 */ | ||
37 | u64 rnmax2; /* 104-111 */ | ||
38 | u8 _reserved5[4096 - 112]; /* 112-4095 */ | ||
39 | } __attribute__((packed, aligned(PAGE_SIZE))); | ||
40 | |||
41 | static struct read_info_sccb __initdata early_read_info_sccb; | ||
42 | static int __initdata early_read_info_sccb_valid; | ||
43 | |||
44 | u64 sclp_facilities; | ||
45 | static u8 sclp_fac84; | ||
46 | |||
47 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) | ||
48 | { | ||
49 | int rc; | ||
50 | |||
51 | __ctl_set_bit(0, 9); | ||
52 | rc = sclp_service_call(cmd, sccb); | ||
53 | if (rc) | ||
54 | goto out; | ||
55 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | | ||
56 | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | ||
57 | local_irq_disable(); | ||
58 | out: | ||
59 | /* Contents of the sccb might have changed. */ | ||
60 | barrier(); | ||
61 | __ctl_clear_bit(0, 9); | ||
62 | return rc; | ||
63 | } | ||
64 | |||
65 | void __init sclp_read_info_early(void) | ||
66 | { | ||
67 | int rc; | ||
68 | int i; | ||
69 | struct read_info_sccb *sccb; | ||
70 | sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, | ||
71 | SCLP_CMDW_READ_SCP_INFO}; | ||
72 | |||
73 | sccb = &early_read_info_sccb; | ||
74 | for (i = 0; i < ARRAY_SIZE(commands); i++) { | ||
75 | do { | ||
76 | memset(sccb, 0, sizeof(*sccb)); | ||
77 | sccb->header.length = sizeof(*sccb); | ||
78 | sccb->header.control_mask[2] = 0x80; | ||
79 | rc = sclp_cmd_sync_early(commands[i], sccb); | ||
80 | } while (rc == -EBUSY); | ||
81 | |||
82 | if (rc) | ||
83 | break; | ||
84 | if (sccb->header.response_code == 0x10) { | ||
85 | early_read_info_sccb_valid = 1; | ||
86 | break; | ||
87 | } | ||
88 | if (sccb->header.response_code != 0x1f0) | ||
89 | break; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void __init sclp_facilities_detect(void) | ||
94 | { | ||
95 | if (!early_read_info_sccb_valid) | ||
96 | return; | ||
97 | sclp_facilities = early_read_info_sccb.facilities; | ||
98 | sclp_fac84 = early_read_info_sccb.fac84; | ||
99 | } | ||
100 | |||
101 | unsigned long long __init sclp_memory_detect(void) | ||
102 | { | ||
103 | unsigned long long memsize; | ||
104 | struct read_info_sccb *sccb; | ||
105 | |||
106 | if (!early_read_info_sccb_valid) | ||
107 | return 0; | ||
108 | sccb = &early_read_info_sccb; | ||
109 | if (sccb->rnsize) | ||
110 | memsize = sccb->rnsize << 20; | ||
111 | else | ||
112 | memsize = sccb->rnsize2 << 20; | ||
113 | if (sccb->rnmax) | ||
114 | memsize *= sccb->rnmax; | ||
115 | else | ||
116 | memsize *= sccb->rnmax2; | ||
117 | return memsize; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * This function will be called after sclp_memory_detect(), which gets called | ||
122 | * early from early.c code. Therefore the sccb should have valid contents. | ||
123 | */ | ||
124 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) | ||
125 | { | ||
126 | struct read_info_sccb *sccb; | ||
127 | |||
128 | if (!early_read_info_sccb_valid) | ||
129 | return; | ||
130 | sccb = &early_read_info_sccb; | ||
131 | info->is_valid = 1; | ||
132 | if (sccb->flags & 0x2) | ||
133 | info->has_dump = 1; | ||
134 | memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); | ||
135 | } | ||
136 | |||
137 | static void sclp_sync_callback(struct sclp_req *req, void *data) | ||
138 | { | ||
139 | struct completion *completion = data; | ||
140 | |||
141 | complete(completion); | ||
142 | } | ||
143 | |||
144 | static int do_sync_request(sclp_cmdw_t cmd, void *sccb) | ||
145 | { | ||
146 | struct completion completion; | ||
147 | struct sclp_req *request; | ||
148 | int rc; | ||
149 | |||
150 | request = kzalloc(sizeof(*request), GFP_KERNEL); | ||
151 | if (!request) | ||
152 | return -ENOMEM; | ||
153 | request->command = cmd; | ||
154 | request->sccb = sccb; | ||
155 | request->status = SCLP_REQ_FILLED; | ||
156 | request->callback = sclp_sync_callback; | ||
157 | request->callback_data = &completion; | ||
158 | init_completion(&completion); | ||
159 | |||
160 | /* Perform sclp request. */ | ||
161 | rc = sclp_add_request(request); | ||
162 | if (rc) | ||
163 | goto out; | ||
164 | wait_for_completion(&completion); | ||
165 | |||
166 | /* Check response. */ | ||
167 | if (request->status != SCLP_REQ_DONE) { | ||
168 | printk(KERN_WARNING TAG "sync request failed " | ||
169 | "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); | ||
170 | rc = -EIO; | ||
171 | } | ||
172 | out: | ||
173 | kfree(request); | ||
174 | return rc; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * CPU configuration related functions. | ||
179 | */ | ||
180 | |||
181 | #define SCLP_CMDW_READ_CPU_INFO 0x00010001 | ||
182 | #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 | ||
183 | #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 | ||
184 | |||
185 | struct read_cpu_info_sccb { | ||
186 | struct sccb_header header; | ||
187 | u16 nr_configured; | ||
188 | u16 offset_configured; | ||
189 | u16 nr_standby; | ||
190 | u16 offset_standby; | ||
191 | u8 reserved[4096 - 16]; | ||
192 | } __attribute__((packed, aligned(PAGE_SIZE))); | ||
193 | |||
194 | static void sclp_fill_cpu_info(struct sclp_cpu_info *info, | ||
195 | struct read_cpu_info_sccb *sccb) | ||
196 | { | ||
197 | char *page = (char *) sccb; | ||
198 | |||
199 | memset(info, 0, sizeof(*info)); | ||
200 | info->configured = sccb->nr_configured; | ||
201 | info->standby = sccb->nr_standby; | ||
202 | info->combined = sccb->nr_configured + sccb->nr_standby; | ||
203 | info->has_cpu_type = sclp_fac84 & 0x1; | ||
204 | memcpy(&info->cpu, page + sccb->offset_configured, | ||
205 | info->combined * sizeof(struct sclp_cpu_entry)); | ||
206 | } | ||
207 | |||
208 | int sclp_get_cpu_info(struct sclp_cpu_info *info) | ||
209 | { | ||
210 | int rc; | ||
211 | struct read_cpu_info_sccb *sccb; | ||
212 | |||
213 | if (!SCLP_HAS_CPU_INFO) | ||
214 | return -EOPNOTSUPP; | ||
215 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
216 | if (!sccb) | ||
217 | return -ENOMEM; | ||
218 | sccb->header.length = sizeof(*sccb); | ||
219 | rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); | ||
220 | if (rc) | ||
221 | goto out; | ||
222 | if (sccb->header.response_code != 0x0010) { | ||
223 | printk(KERN_WARNING TAG "readcpuinfo failed " | ||
224 | "(response=0x%04x)\n", sccb->header.response_code); | ||
225 | rc = -EIO; | ||
226 | goto out; | ||
227 | } | ||
228 | sclp_fill_cpu_info(info, sccb); | ||
229 | out: | ||
230 | free_page((unsigned long) sccb); | ||
231 | return rc; | ||
232 | } | ||
233 | |||
234 | struct cpu_configure_sccb { | ||
235 | struct sccb_header header; | ||
236 | } __attribute__((packed, aligned(8))); | ||
237 | |||
238 | static int do_cpu_configure(sclp_cmdw_t cmd) | ||
239 | { | ||
240 | struct cpu_configure_sccb *sccb; | ||
241 | int rc; | ||
242 | |||
243 | if (!SCLP_HAS_CPU_RECONFIG) | ||
244 | return -EOPNOTSUPP; | ||
245 | /* | ||
246 | * This is not going to cross a page boundary since we force | ||
247 | * kmalloc to have a minimum alignment of 8 bytes on s390. | ||
248 | */ | ||
249 | sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); | ||
250 | if (!sccb) | ||
251 | return -ENOMEM; | ||
252 | sccb->header.length = sizeof(*sccb); | ||
253 | rc = do_sync_request(cmd, sccb); | ||
254 | if (rc) | ||
255 | goto out; | ||
256 | switch (sccb->header.response_code) { | ||
257 | case 0x0020: | ||
258 | case 0x0120: | ||
259 | break; | ||
260 | default: | ||
261 | printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " | ||
262 | "response=0x%04x)\n", cmd, sccb->header.response_code); | ||
263 | rc = -EIO; | ||
264 | break; | ||
265 | } | ||
266 | out: | ||
267 | kfree(sccb); | ||
268 | return rc; | ||
269 | } | ||
270 | |||
271 | int sclp_cpu_configure(u8 cpu) | ||
272 | { | ||
273 | return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); | ||
274 | } | ||
275 | |||
276 | int sclp_cpu_deconfigure(u8 cpu) | ||
277 | { | ||
278 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Channel path configuration related functions. | ||
283 | */ | ||
284 | |||
285 | #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 | ||
286 | #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 | ||
287 | #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 | ||
288 | |||
289 | struct chp_cfg_sccb { | ||
290 | struct sccb_header header; | ||
291 | u8 ccm; | ||
292 | u8 reserved[6]; | ||
293 | u8 cssid; | ||
294 | } __attribute__((packed)); | ||
295 | |||
296 | static int do_chp_configure(sclp_cmdw_t cmd) | ||
297 | { | ||
298 | struct chp_cfg_sccb *sccb; | ||
299 | int rc; | ||
300 | |||
301 | if (!SCLP_HAS_CHP_RECONFIG) | ||
302 | return -EOPNOTSUPP; | ||
303 | /* Prepare sccb. */ | ||
304 | sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
305 | if (!sccb) | ||
306 | return -ENOMEM; | ||
307 | sccb->header.length = sizeof(*sccb); | ||
308 | rc = do_sync_request(cmd, sccb); | ||
309 | if (rc) | ||
310 | goto out; | ||
311 | switch (sccb->header.response_code) { | ||
312 | case 0x0020: | ||
313 | case 0x0120: | ||
314 | case 0x0440: | ||
315 | case 0x0450: | ||
316 | break; | ||
317 | default: | ||
318 | printk(KERN_WARNING TAG "configure channel-path failed " | ||
319 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | ||
320 | sccb->header.response_code); | ||
321 | rc = -EIO; | ||
322 | break; | ||
323 | } | ||
324 | out: | ||
325 | free_page((unsigned long) sccb); | ||
326 | return rc; | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * sclp_chp_configure - perform configure channel-path sclp command | ||
331 | * @chpid: channel-path ID | ||
332 | * | ||
333 | * Perform configure channel-path command sclp command for specified chpid. | ||
334 | * Return 0 after command successfully finished, non-zero otherwise. | ||
335 | */ | ||
336 | int sclp_chp_configure(struct chp_id chpid) | ||
337 | { | ||
338 | return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * sclp_chp_deconfigure - perform deconfigure channel-path sclp command | ||
343 | * @chpid: channel-path ID | ||
344 | * | ||
345 | * Perform deconfigure channel-path command sclp command for specified chpid | ||
346 | * and wait for completion. On success return 0. Return non-zero otherwise. | ||
347 | */ | ||
348 | int sclp_chp_deconfigure(struct chp_id chpid) | ||
349 | { | ||
350 | return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); | ||
351 | } | ||
352 | |||
353 | struct chp_info_sccb { | ||
354 | struct sccb_header header; | ||
355 | u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; | ||
356 | u8 standby[SCLP_CHP_INFO_MASK_SIZE]; | ||
357 | u8 configured[SCLP_CHP_INFO_MASK_SIZE]; | ||
358 | u8 ccm; | ||
359 | u8 reserved[6]; | ||
360 | u8 cssid; | ||
361 | } __attribute__((packed)); | ||
362 | |||
363 | /** | ||
364 | * sclp_chp_read_info - perform read channel-path information sclp command | ||
365 | * @info: resulting channel-path information data | ||
366 | * | ||
367 | * Perform read channel-path information sclp command and wait for completion. | ||
368 | * On success, store channel-path information in @info and return 0. Return | ||
369 | * non-zero otherwise. | ||
370 | */ | ||
371 | int sclp_chp_read_info(struct sclp_chp_info *info) | ||
372 | { | ||
373 | struct chp_info_sccb *sccb; | ||
374 | int rc; | ||
375 | |||
376 | if (!SCLP_HAS_CHP_INFO) | ||
377 | return -EOPNOTSUPP; | ||
378 | /* Prepare sccb. */ | ||
379 | sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
380 | if (!sccb) | ||
381 | return -ENOMEM; | ||
382 | sccb->header.length = sizeof(*sccb); | ||
383 | rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); | ||
384 | if (rc) | ||
385 | goto out; | ||
386 | if (sccb->header.response_code != 0x0010) { | ||
387 | printk(KERN_WARNING TAG "read channel-path info failed " | ||
388 | "(response=0x%04x)\n", sccb->header.response_code); | ||
389 | rc = -EIO; | ||
390 | goto out; | ||
391 | } | ||
392 | memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); | ||
393 | memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); | ||
394 | memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); | ||
395 | out: | ||
396 | free_page((unsigned long) sccb); | ||
397 | return rc; | ||
398 | } | ||
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 82a13d9fdfe4..5716487b8c9d 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c | |||
@@ -1,255 +1,41 @@ | |||
1 | /* | 1 | /* |
2 | * Author: Martin Peschke <mpeschke@de.ibm.com> | 2 | * drivers/s390/char/sclp_cpi.c |
3 | * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation | 3 | * SCLP control programm identification |
4 | * | 4 | * |
5 | * SCLP Control-Program Identification. | 5 | * Copyright IBM Corp. 2001, 2007 |
6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | ||
7 | * Michael Ernst <mernst@de.ibm.com> | ||
6 | */ | 8 | */ |
7 | 9 | ||
8 | #include <linux/version.h> | ||
9 | #include <linux/kmod.h> | 10 | #include <linux/kmod.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/moduleparam.h> | 12 | #include <linux/moduleparam.h> |
12 | #include <linux/init.h> | 13 | #include <linux/version.h> |
13 | #include <linux/timer.h> | 14 | #include "sclp_cpi_sys.h" |
14 | #include <linux/string.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <asm/ebcdic.h> | ||
18 | #include <asm/semaphore.h> | ||
19 | |||
20 | #include "sclp.h" | ||
21 | #include "sclp_rw.h" | ||
22 | |||
23 | #define CPI_LENGTH_SYSTEM_TYPE 8 | ||
24 | #define CPI_LENGTH_SYSTEM_NAME 8 | ||
25 | #define CPI_LENGTH_SYSPLEX_NAME 8 | ||
26 | |||
27 | struct cpi_evbuf { | ||
28 | struct evbuf_header header; | ||
29 | u8 id_format; | ||
30 | u8 reserved0; | ||
31 | u8 system_type[CPI_LENGTH_SYSTEM_TYPE]; | ||
32 | u64 reserved1; | ||
33 | u8 system_name[CPI_LENGTH_SYSTEM_NAME]; | ||
34 | u64 reserved2; | ||
35 | u64 system_level; | ||
36 | u64 reserved3; | ||
37 | u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME]; | ||
38 | u8 reserved4[16]; | ||
39 | } __attribute__((packed)); | ||
40 | |||
41 | struct cpi_sccb { | ||
42 | struct sccb_header header; | ||
43 | struct cpi_evbuf cpi_evbuf; | ||
44 | } __attribute__((packed)); | ||
45 | |||
46 | /* Event type structure for write message and write priority message */ | ||
47 | static struct sclp_register sclp_cpi_event = | ||
48 | { | ||
49 | .send_mask = EVTYP_CTLPROGIDENT_MASK | ||
50 | }; | ||
51 | 15 | ||
52 | MODULE_LICENSE("GPL"); | 16 | MODULE_LICENSE("GPL"); |
17 | MODULE_DESCRIPTION("Identify this operating system instance " | ||
18 | "to the System z hardware"); | ||
19 | MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, " | ||
20 | "Michael Ernst <mernst@de.ibm.com>"); | ||
53 | 21 | ||
54 | MODULE_AUTHOR( | 22 | static char *system_name = ""; |
55 | "Martin Peschke, IBM Deutschland Entwicklung GmbH " | 23 | static char *sysplex_name = ""; |
56 | "<mpeschke@de.ibm.com>"); | ||
57 | |||
58 | MODULE_DESCRIPTION( | ||
59 | "identify this operating system instance to the S/390 " | ||
60 | "or zSeries hardware"); | ||
61 | 24 | ||
62 | static char *system_name = NULL; | ||
63 | module_param(system_name, charp, 0); | 25 | module_param(system_name, charp, 0); |
64 | MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters"); | 26 | MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters"); |
65 | |||
66 | static char *sysplex_name = NULL; | ||
67 | #ifdef ALLOW_SYSPLEX_NAME | ||
68 | module_param(sysplex_name, charp, 0); | 27 | module_param(sysplex_name, charp, 0); |
69 | MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters"); | 28 | MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters"); |
70 | #endif | ||
71 | |||
72 | /* use default value for this field (as well as for system level) */ | ||
73 | static char *system_type = "LINUX"; | ||
74 | 29 | ||
75 | static int | 30 | static int __init cpi_module_init(void) |
76 | cpi_check_parms(void) | ||
77 | { | 31 | { |
78 | /* reject if no system type specified */ | 32 | return sclp_cpi_set_data(system_name, sysplex_name, "LINUX", |
79 | if (!system_type) { | 33 | LINUX_VERSION_CODE); |
80 | printk("cpi: bug: no system type specified\n"); | ||
81 | return -EINVAL; | ||
82 | } | ||
83 | |||
84 | /* reject if system type larger than 8 characters */ | ||
85 | if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) { | ||
86 | printk("cpi: bug: system type has length of %li characters - " | ||
87 | "only %i characters supported\n", | ||
88 | strlen(system_type), CPI_LENGTH_SYSTEM_TYPE); | ||
89 | return -EINVAL; | ||
90 | } | ||
91 | |||
92 | /* reject if no system name specified */ | ||
93 | if (!system_name) { | ||
94 | printk("cpi: no system name specified\n"); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | /* reject if system name larger than 8 characters */ | ||
99 | if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) { | ||
100 | printk("cpi: system name has length of %li characters - " | ||
101 | "only %i characters supported\n", | ||
102 | strlen(system_name), CPI_LENGTH_SYSTEM_NAME); | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | |||
106 | /* reject if specified sysplex name larger than 8 characters */ | ||
107 | if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) { | ||
108 | printk("cpi: sysplex name has length of %li characters" | ||
109 | " - only %i characters supported\n", | ||
110 | strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | return 0; | ||
114 | } | 34 | } |
115 | 35 | ||
116 | static void | ||
117 | cpi_callback(struct sclp_req *req, void *data) | ||
118 | { | ||
119 | struct semaphore *sem; | ||
120 | |||
121 | sem = (struct semaphore *) data; | ||
122 | up(sem); | ||
123 | } | ||
124 | |||
125 | static struct sclp_req * | ||
126 | cpi_prepare_req(void) | ||
127 | { | ||
128 | struct sclp_req *req; | ||
129 | struct cpi_sccb *sccb; | ||
130 | struct cpi_evbuf *evb; | ||
131 | |||
132 | req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL); | ||
133 | if (req == NULL) | ||
134 | return ERR_PTR(-ENOMEM); | ||
135 | sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA); | ||
136 | if (sccb == NULL) { | ||
137 | kfree(req); | ||
138 | return ERR_PTR(-ENOMEM); | ||
139 | } | ||
140 | memset(sccb, 0, sizeof(struct cpi_sccb)); | ||
141 | |||
142 | /* setup SCCB for Control-Program Identification */ | ||
143 | sccb->header.length = sizeof(struct cpi_sccb); | ||
144 | sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf); | ||
145 | sccb->cpi_evbuf.header.type = 0x0B; | ||
146 | evb = &sccb->cpi_evbuf; | ||
147 | |||
148 | /* set system type */ | ||
149 | memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE); | ||
150 | memcpy(evb->system_type, system_type, strlen(system_type)); | ||
151 | sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); | ||
152 | EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE); | ||
153 | |||
154 | /* set system name */ | ||
155 | memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME); | ||
156 | memcpy(evb->system_name, system_name, strlen(system_name)); | ||
157 | sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME); | ||
158 | EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME); | ||
159 | |||
160 | /* set system level */ | ||
161 | evb->system_level = LINUX_VERSION_CODE; | ||
162 | |||
163 | /* set sysplex name */ | ||
164 | if (sysplex_name) { | ||
165 | memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME); | ||
166 | memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name)); | ||
167 | sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); | ||
168 | EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME); | ||
169 | } | ||
170 | |||
171 | /* prepare request data structure presented to SCLP driver */ | ||
172 | req->command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
173 | req->sccb = sccb; | ||
174 | req->status = SCLP_REQ_FILLED; | ||
175 | req->callback = cpi_callback; | ||
176 | return req; | ||
177 | } | ||
178 | |||
179 | static void | ||
180 | cpi_free_req(struct sclp_req *req) | ||
181 | { | ||
182 | free_page((unsigned long) req->sccb); | ||
183 | kfree(req); | ||
184 | } | ||
185 | |||
186 | static int __init | ||
187 | cpi_module_init(void) | ||
188 | { | ||
189 | struct semaphore sem; | ||
190 | struct sclp_req *req; | ||
191 | int rc; | ||
192 | |||
193 | rc = cpi_check_parms(); | ||
194 | if (rc) | ||
195 | return rc; | ||
196 | |||
197 | rc = sclp_register(&sclp_cpi_event); | ||
198 | if (rc) { | ||
199 | /* could not register sclp event. Die. */ | ||
200 | printk(KERN_WARNING "cpi: could not register to hardware " | ||
201 | "console.\n"); | ||
202 | return -EINVAL; | ||
203 | } | ||
204 | if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { | ||
205 | printk(KERN_WARNING "cpi: no control program identification " | ||
206 | "support\n"); | ||
207 | sclp_unregister(&sclp_cpi_event); | ||
208 | return -EOPNOTSUPP; | ||
209 | } | ||
210 | |||
211 | req = cpi_prepare_req(); | ||
212 | if (IS_ERR(req)) { | ||
213 | printk(KERN_WARNING "cpi: couldn't allocate request\n"); | ||
214 | sclp_unregister(&sclp_cpi_event); | ||
215 | return PTR_ERR(req); | ||
216 | } | ||
217 | |||
218 | /* Prepare semaphore */ | ||
219 | sema_init(&sem, 0); | ||
220 | req->callback_data = &sem; | ||
221 | /* Add request to sclp queue */ | ||
222 | rc = sclp_add_request(req); | ||
223 | if (rc) { | ||
224 | printk(KERN_WARNING "cpi: could not start request\n"); | ||
225 | cpi_free_req(req); | ||
226 | sclp_unregister(&sclp_cpi_event); | ||
227 | return rc; | ||
228 | } | ||
229 | /* make "insmod" sleep until callback arrives */ | ||
230 | down(&sem); | ||
231 | |||
232 | rc = ((struct cpi_sccb *) req->sccb)->header.response_code; | ||
233 | if (rc != 0x0020) { | ||
234 | printk(KERN_WARNING "cpi: failed with response code 0x%x\n", | ||
235 | rc); | ||
236 | rc = -ECOMM; | ||
237 | } else | ||
238 | rc = 0; | ||
239 | |||
240 | cpi_free_req(req); | ||
241 | sclp_unregister(&sclp_cpi_event); | ||
242 | |||
243 | return rc; | ||
244 | } | ||
245 | |||
246 | |||
247 | static void __exit cpi_module_exit(void) | 36 | static void __exit cpi_module_exit(void) |
248 | { | 37 | { |
249 | } | 38 | } |
250 | 39 | ||
251 | |||
252 | /* declare driver module init/cleanup functions */ | ||
253 | module_init(cpi_module_init); | 40 | module_init(cpi_module_init); |
254 | module_exit(cpi_module_exit); | 41 | module_exit(cpi_module_exit); |
255 | |||
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c new file mode 100644 index 000000000000..41617032afdc --- /dev/null +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
@@ -0,0 +1,400 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_cpi_sys.c | ||
3 | * SCLP control program identification sysfs interface | ||
4 | * | ||
5 | * Copyright IBM Corp. 2001, 2007 | ||
6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | ||
7 | * Michael Ernst <mernst@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/stat.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/ctype.h> | ||
16 | #include <linux/kmod.h> | ||
17 | #include <linux/timer.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/completion.h> | ||
21 | #include <asm/ebcdic.h> | ||
22 | #include <asm/sclp.h> | ||
23 | #include "sclp.h" | ||
24 | #include "sclp_rw.h" | ||
25 | #include "sclp_cpi_sys.h" | ||
26 | |||
27 | #define CPI_LENGTH_NAME 8 | ||
28 | #define CPI_LENGTH_LEVEL 16 | ||
29 | |||
30 | struct cpi_evbuf { | ||
31 | struct evbuf_header header; | ||
32 | u8 id_format; | ||
33 | u8 reserved0; | ||
34 | u8 system_type[CPI_LENGTH_NAME]; | ||
35 | u64 reserved1; | ||
36 | u8 system_name[CPI_LENGTH_NAME]; | ||
37 | u64 reserved2; | ||
38 | u64 system_level; | ||
39 | u64 reserved3; | ||
40 | u8 sysplex_name[CPI_LENGTH_NAME]; | ||
41 | u8 reserved4[16]; | ||
42 | } __attribute__((packed)); | ||
43 | |||
44 | struct cpi_sccb { | ||
45 | struct sccb_header header; | ||
46 | struct cpi_evbuf cpi_evbuf; | ||
47 | } __attribute__((packed)); | ||
48 | |||
49 | static struct sclp_register sclp_cpi_event = { | ||
50 | .send_mask = EVTYP_CTLPROGIDENT_MASK, | ||
51 | }; | ||
52 | |||
53 | static char system_name[CPI_LENGTH_NAME + 1]; | ||
54 | static char sysplex_name[CPI_LENGTH_NAME + 1]; | ||
55 | static char system_type[CPI_LENGTH_NAME + 1]; | ||
56 | static u64 system_level; | ||
57 | |||
58 | static void set_data(char *field, char *data) | ||
59 | { | ||
60 | memset(field, ' ', CPI_LENGTH_NAME); | ||
61 | memcpy(field, data, strlen(data)); | ||
62 | sclp_ascebc_str(field, CPI_LENGTH_NAME); | ||
63 | } | ||
64 | |||
65 | static void cpi_callback(struct sclp_req *req, void *data) | ||
66 | { | ||
67 | struct completion *completion = data; | ||
68 | |||
69 | complete(completion); | ||
70 | } | ||
71 | |||
72 | static struct sclp_req *cpi_prepare_req(void) | ||
73 | { | ||
74 | struct sclp_req *req; | ||
75 | struct cpi_sccb *sccb; | ||
76 | struct cpi_evbuf *evb; | ||
77 | |||
78 | req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); | ||
79 | if (!req) | ||
80 | return ERR_PTR(-ENOMEM); | ||
81 | sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
82 | if (!sccb) { | ||
83 | kfree(req); | ||
84 | return ERR_PTR(-ENOMEM); | ||
85 | } | ||
86 | |||
87 | /* setup SCCB for Control-Program Identification */ | ||
88 | sccb->header.length = sizeof(struct cpi_sccb); | ||
89 | sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf); | ||
90 | sccb->cpi_evbuf.header.type = 0x0b; | ||
91 | evb = &sccb->cpi_evbuf; | ||
92 | |||
93 | /* set system type */ | ||
94 | set_data(evb->system_type, system_type); | ||
95 | |||
96 | /* set system name */ | ||
97 | set_data(evb->system_name, system_name); | ||
98 | |||
99 | /* set sytem level */ | ||
100 | evb->system_level = system_level; | ||
101 | |||
102 | /* set sysplex name */ | ||
103 | set_data(evb->sysplex_name, sysplex_name); | ||
104 | |||
105 | /* prepare request data structure presented to SCLP driver */ | ||
106 | req->command = SCLP_CMDW_WRITE_EVENT_DATA; | ||
107 | req->sccb = sccb; | ||
108 | req->status = SCLP_REQ_FILLED; | ||
109 | req->callback = cpi_callback; | ||
110 | return req; | ||
111 | } | ||
112 | |||
113 | static void cpi_free_req(struct sclp_req *req) | ||
114 | { | ||
115 | free_page((unsigned long) req->sccb); | ||
116 | kfree(req); | ||
117 | } | ||
118 | |||
119 | static int cpi_req(void) | ||
120 | { | ||
121 | struct completion completion; | ||
122 | struct sclp_req *req; | ||
123 | int rc; | ||
124 | int response; | ||
125 | |||
126 | rc = sclp_register(&sclp_cpi_event); | ||
127 | if (rc) { | ||
128 | printk(KERN_WARNING "cpi: could not register " | ||
129 | "to hardware console.\n"); | ||
130 | goto out; | ||
131 | } | ||
132 | if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { | ||
133 | printk(KERN_WARNING "cpi: no control program " | ||
134 | "identification support\n"); | ||
135 | rc = -EOPNOTSUPP; | ||
136 | goto out_unregister; | ||
137 | } | ||
138 | |||
139 | req = cpi_prepare_req(); | ||
140 | if (IS_ERR(req)) { | ||
141 | printk(KERN_WARNING "cpi: could not allocate request\n"); | ||
142 | rc = PTR_ERR(req); | ||
143 | goto out_unregister; | ||
144 | } | ||
145 | |||
146 | init_completion(&completion); | ||
147 | req->callback_data = &completion; | ||
148 | |||
149 | /* Add request to sclp queue */ | ||
150 | rc = sclp_add_request(req); | ||
151 | if (rc) { | ||
152 | printk(KERN_WARNING "cpi: could not start request\n"); | ||
153 | goto out_free_req; | ||
154 | } | ||
155 | |||
156 | wait_for_completion(&completion); | ||
157 | |||
158 | if (req->status != SCLP_REQ_DONE) { | ||
159 | printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n", | ||
160 | req->status); | ||
161 | rc = -EIO; | ||
162 | goto out_free_req; | ||
163 | } | ||
164 | |||
165 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; | ||
166 | if (response != 0x0020) { | ||
167 | printk(KERN_WARNING "cpi: failed with " | ||
168 | "response code 0x%x\n", response); | ||
169 | rc = -EIO; | ||
170 | } | ||
171 | |||
172 | out_free_req: | ||
173 | cpi_free_req(req); | ||
174 | |||
175 | out_unregister: | ||
176 | sclp_unregister(&sclp_cpi_event); | ||
177 | |||
178 | out: | ||
179 | return rc; | ||
180 | } | ||
181 | |||
182 | static int check_string(const char *attr, const char *str) | ||
183 | { | ||
184 | size_t len; | ||
185 | size_t i; | ||
186 | |||
187 | len = strlen(str); | ||
188 | |||
189 | if ((len > 0) && (str[len - 1] == '\n')) | ||
190 | len--; | ||
191 | |||
192 | if (len > CPI_LENGTH_NAME) | ||
193 | return -EINVAL; | ||
194 | |||
195 | for (i = 0; i < len ; i++) { | ||
196 | if (isalpha(str[i]) || isdigit(str[i]) || | ||
197 | strchr("$@# ", str[i])) | ||
198 | continue; | ||
199 | return -EINVAL; | ||
200 | } | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static void set_string(char *attr, const char *value) | ||
206 | { | ||
207 | size_t len; | ||
208 | size_t i; | ||
209 | |||
210 | len = strlen(value); | ||
211 | |||
212 | if ((len > 0) && (value[len - 1] == '\n')) | ||
213 | len--; | ||
214 | |||
215 | for (i = 0; i < CPI_LENGTH_NAME; i++) { | ||
216 | if (i < len) | ||
217 | attr[i] = toupper(value[i]); | ||
218 | else | ||
219 | attr[i] = ' '; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static ssize_t system_name_show(struct kobject *kobj, | ||
224 | struct kobj_attribute *attr, char *page) | ||
225 | { | ||
226 | return snprintf(page, PAGE_SIZE, "%s\n", system_name); | ||
227 | } | ||
228 | |||
229 | static ssize_t system_name_store(struct kobject *kobj, | ||
230 | struct kobj_attribute *attr, | ||
231 | const char *buf, | ||
232 | size_t len) | ||
233 | { | ||
234 | int rc; | ||
235 | |||
236 | rc = check_string("system_name", buf); | ||
237 | if (rc) | ||
238 | return rc; | ||
239 | |||
240 | set_string(system_name, buf); | ||
241 | |||
242 | return len; | ||
243 | } | ||
244 | |||
245 | static struct kobj_attribute system_name_attr = | ||
246 | __ATTR(system_name, 0644, system_name_show, system_name_store); | ||
247 | |||
248 | static ssize_t sysplex_name_show(struct kobject *kobj, | ||
249 | struct kobj_attribute *attr, char *page) | ||
250 | { | ||
251 | return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name); | ||
252 | } | ||
253 | |||
254 | static ssize_t sysplex_name_store(struct kobject *kobj, | ||
255 | struct kobj_attribute *attr, | ||
256 | const char *buf, | ||
257 | size_t len) | ||
258 | { | ||
259 | int rc; | ||
260 | |||
261 | rc = check_string("sysplex_name", buf); | ||
262 | if (rc) | ||
263 | return rc; | ||
264 | |||
265 | set_string(sysplex_name, buf); | ||
266 | |||
267 | return len; | ||
268 | } | ||
269 | |||
270 | static struct kobj_attribute sysplex_name_attr = | ||
271 | __ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store); | ||
272 | |||
273 | static ssize_t system_type_show(struct kobject *kobj, | ||
274 | struct kobj_attribute *attr, char *page) | ||
275 | { | ||
276 | return snprintf(page, PAGE_SIZE, "%s\n", system_type); | ||
277 | } | ||
278 | |||
279 | static ssize_t system_type_store(struct kobject *kobj, | ||
280 | struct kobj_attribute *attr, | ||
281 | const char *buf, | ||
282 | size_t len) | ||
283 | { | ||
284 | int rc; | ||
285 | |||
286 | rc = check_string("system_type", buf); | ||
287 | if (rc) | ||
288 | return rc; | ||
289 | |||
290 | set_string(system_type, buf); | ||
291 | |||
292 | return len; | ||
293 | } | ||
294 | |||
295 | static struct kobj_attribute system_type_attr = | ||
296 | __ATTR(system_type, 0644, system_type_show, system_type_store); | ||
297 | |||
298 | static ssize_t system_level_show(struct kobject *kobj, | ||
299 | struct kobj_attribute *attr, char *page) | ||
300 | { | ||
301 | unsigned long long level = system_level; | ||
302 | |||
303 | return snprintf(page, PAGE_SIZE, "%#018llx\n", level); | ||
304 | } | ||
305 | |||
306 | static ssize_t system_level_store(struct kobject *kobj, | ||
307 | struct kobj_attribute *attr, | ||
308 | const char *buf, | ||
309 | size_t len) | ||
310 | { | ||
311 | unsigned long long level; | ||
312 | char *endp; | ||
313 | |||
314 | level = simple_strtoull(buf, &endp, 16); | ||
315 | |||
316 | if (endp == buf) | ||
317 | return -EINVAL; | ||
318 | if (*endp == '\n') | ||
319 | endp++; | ||
320 | if (*endp) | ||
321 | return -EINVAL; | ||
322 | |||
323 | system_level = level; | ||
324 | |||
325 | return len; | ||
326 | } | ||
327 | |||
328 | static struct kobj_attribute system_level_attr = | ||
329 | __ATTR(system_level, 0644, system_level_show, system_level_store); | ||
330 | |||
331 | static ssize_t set_store(struct kobject *kobj, | ||
332 | struct kobj_attribute *attr, | ||
333 | const char *buf, size_t len) | ||
334 | { | ||
335 | int rc; | ||
336 | |||
337 | rc = cpi_req(); | ||
338 | if (rc) | ||
339 | return rc; | ||
340 | |||
341 | return len; | ||
342 | } | ||
343 | |||
344 | static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store); | ||
345 | |||
346 | static struct attribute *cpi_attrs[] = { | ||
347 | &system_name_attr.attr, | ||
348 | &sysplex_name_attr.attr, | ||
349 | &system_type_attr.attr, | ||
350 | &system_level_attr.attr, | ||
351 | &set_attr.attr, | ||
352 | NULL, | ||
353 | }; | ||
354 | |||
355 | static struct attribute_group cpi_attr_group = { | ||
356 | .attrs = cpi_attrs, | ||
357 | }; | ||
358 | |||
359 | static struct kset *cpi_kset; | ||
360 | |||
361 | int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type, | ||
362 | const u64 level) | ||
363 | { | ||
364 | int rc; | ||
365 | |||
366 | rc = check_string("system_name", system); | ||
367 | if (rc) | ||
368 | return rc; | ||
369 | rc = check_string("sysplex_name", sysplex); | ||
370 | if (rc) | ||
371 | return rc; | ||
372 | rc = check_string("system_type", type); | ||
373 | if (rc) | ||
374 | return rc; | ||
375 | |||
376 | set_string(system_name, system); | ||
377 | set_string(sysplex_name, sysplex); | ||
378 | set_string(system_type, type); | ||
379 | system_level = level; | ||
380 | |||
381 | return cpi_req(); | ||
382 | } | ||
383 | EXPORT_SYMBOL(sclp_cpi_set_data); | ||
384 | |||
385 | static int __init cpi_init(void) | ||
386 | { | ||
387 | int rc; | ||
388 | |||
389 | cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj); | ||
390 | if (!cpi_kset) | ||
391 | return -ENOMEM; | ||
392 | |||
393 | rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group); | ||
394 | if (rc) | ||
395 | kset_unregister(cpi_kset); | ||
396 | |||
397 | return rc; | ||
398 | } | ||
399 | |||
400 | __initcall(cpi_init); | ||
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h new file mode 100644 index 000000000000..deef3e6ff496 --- /dev/null +++ b/drivers/s390/char/sclp_cpi_sys.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_cpi_sys.h | ||
3 | * SCLP control program identification sysfs interface | ||
4 | * | ||
5 | * Copyright IBM Corp. 2007 | ||
6 | * Author(s): Michael Ernst <mernst@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __SCLP_CPI_SYS_H__ | ||
10 | #define __SCLP_CPI_SYS_H__ | ||
11 | |||
12 | int sclp_cpi_set_data(const char *system, const char *sysplex, | ||
13 | const char *type, u64 level); | ||
14 | |||
15 | #endif /* __SCLP_CPI_SYS_H__ */ | ||
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c deleted file mode 100644 index a1136e052750..000000000000 --- a/drivers/s390/char/sclp_info.c +++ /dev/null | |||
@@ -1,116 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_info.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <asm/sclp.h> | ||
12 | #include "sclp.h" | ||
13 | |||
14 | struct sclp_readinfo_sccb { | ||
15 | struct sccb_header header; /* 0-7 */ | ||
16 | u16 rnmax; /* 8-9 */ | ||
17 | u8 rnsize; /* 10 */ | ||
18 | u8 _reserved0[24 - 11]; /* 11-23 */ | ||
19 | u8 loadparm[8]; /* 24-31 */ | ||
20 | u8 _reserved1[48 - 32]; /* 32-47 */ | ||
21 | u64 facilities; /* 48-55 */ | ||
22 | u8 _reserved2[91 - 56]; /* 56-90 */ | ||
23 | u8 flags; /* 91 */ | ||
24 | u8 _reserved3[100 - 92]; /* 92-99 */ | ||
25 | u32 rnsize2; /* 100-103 */ | ||
26 | u64 rnmax2; /* 104-111 */ | ||
27 | u8 _reserved4[4096 - 112]; /* 112-4095 */ | ||
28 | } __attribute__((packed, aligned(4096))); | ||
29 | |||
30 | static struct sclp_readinfo_sccb __initdata early_readinfo_sccb; | ||
31 | static int __initdata early_readinfo_sccb_valid; | ||
32 | |||
33 | u64 sclp_facilities; | ||
34 | |||
35 | void __init sclp_readinfo_early(void) | ||
36 | { | ||
37 | int ret; | ||
38 | int i; | ||
39 | struct sclp_readinfo_sccb *sccb; | ||
40 | sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, | ||
41 | SCLP_CMDW_READ_SCP_INFO}; | ||
42 | |||
43 | /* Enable service signal subclass mask. */ | ||
44 | __ctl_set_bit(0, 9); | ||
45 | sccb = &early_readinfo_sccb; | ||
46 | for (i = 0; i < ARRAY_SIZE(commands); i++) { | ||
47 | do { | ||
48 | memset(sccb, 0, sizeof(*sccb)); | ||
49 | sccb->header.length = sizeof(*sccb); | ||
50 | sccb->header.control_mask[2] = 0x80; | ||
51 | ret = sclp_service_call(commands[i], sccb); | ||
52 | } while (ret == -EBUSY); | ||
53 | |||
54 | if (ret) | ||
55 | break; | ||
56 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | | ||
57 | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | ||
58 | local_irq_disable(); | ||
59 | /* | ||
60 | * Contents of the sccb might have changed | ||
61 | * therefore a barrier is needed. | ||
62 | */ | ||
63 | barrier(); | ||
64 | if (sccb->header.response_code == 0x10) { | ||
65 | early_readinfo_sccb_valid = 1; | ||
66 | break; | ||
67 | } | ||
68 | if (sccb->header.response_code != 0x1f0) | ||
69 | break; | ||
70 | } | ||
71 | /* Disable service signal subclass mask again. */ | ||
72 | __ctl_clear_bit(0, 9); | ||
73 | } | ||
74 | |||
75 | void __init sclp_facilities_detect(void) | ||
76 | { | ||
77 | if (!early_readinfo_sccb_valid) | ||
78 | return; | ||
79 | sclp_facilities = early_readinfo_sccb.facilities; | ||
80 | } | ||
81 | |||
82 | unsigned long long __init sclp_memory_detect(void) | ||
83 | { | ||
84 | unsigned long long memsize; | ||
85 | struct sclp_readinfo_sccb *sccb; | ||
86 | |||
87 | if (!early_readinfo_sccb_valid) | ||
88 | return 0; | ||
89 | sccb = &early_readinfo_sccb; | ||
90 | if (sccb->rnsize) | ||
91 | memsize = sccb->rnsize << 20; | ||
92 | else | ||
93 | memsize = sccb->rnsize2 << 20; | ||
94 | if (sccb->rnmax) | ||
95 | memsize *= sccb->rnmax; | ||
96 | else | ||
97 | memsize *= sccb->rnmax2; | ||
98 | return memsize; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * This function will be called after sclp_memory_detect(), which gets called | ||
103 | * early from early.c code. Therefore the sccb should have valid contents. | ||
104 | */ | ||
105 | void __init sclp_get_ipl_info(struct sclp_ipl_info *info) | ||
106 | { | ||
107 | struct sclp_readinfo_sccb *sccb; | ||
108 | |||
109 | if (!early_readinfo_sccb_valid) | ||
110 | return; | ||
111 | sccb = &early_readinfo_sccb; | ||
112 | info->is_valid = 1; | ||
113 | if (sccb->flags & 0x2) | ||
114 | info->has_dump = 1; | ||
115 | memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); | ||
116 | } | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index d6b06ab81188..ad7195d3de0c 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -76,7 +76,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) | |||
76 | } | 76 | } |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * Return a pointer to the orignal page that has been used to create | 79 | * Return a pointer to the original page that has been used to create |
80 | * the buffer. | 80 | * the buffer. |
81 | */ | 81 | */ |
82 | void * | 82 | void * |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index da25f8e24152..8246ef3ab095 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -1495,7 +1495,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1495 | device->cdev->dev.bus_id); | 1495 | device->cdev->dev.bus_id); |
1496 | return tape_3590_erp_basic(device, request, irb, -EPERM); | 1496 | return tape_3590_erp_basic(device, request, irb, -EPERM); |
1497 | case 0x8013: | 1497 | case 0x8013: |
1498 | PRINT_WARN("(%s): Another host has priviliged access to the " | 1498 | PRINT_WARN("(%s): Another host has privileged access to the " |
1499 | "tape device\n", device->cdev->dev.bus_id); | 1499 | "tape device\n", device->cdev->dev.bus_id); |
1500 | PRINT_WARN("(%s): To solve the problem unload the current " | 1500 | PRINT_WARN("(%s): To solve the problem unload the current " |
1501 | "cartridge!\n", device->cdev->dev.bus_id); | 1501 | "cartridge!\n", device->cdev->dev.bus_id); |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 2fae6338ee1c..7ad8cf157641 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -37,7 +37,7 @@ static void tape_long_busy_timeout(unsigned long data); | |||
37 | * we can assign the devices to minor numbers of the same major | 37 | * we can assign the devices to minor numbers of the same major |
38 | * The list is protected by the rwlock | 38 | * The list is protected by the rwlock |
39 | */ | 39 | */ |
40 | static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list); | 40 | static LIST_HEAD(tape_device_list); |
41 | static DEFINE_RWLOCK(tape_device_lock); | 41 | static DEFINE_RWLOCK(tape_device_lock); |
42 | 42 | ||
43 | /* | 43 | /* |
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c index cea49f001f89..c9b96d51b28f 100644 --- a/drivers/s390/char/tape_proc.c +++ b/drivers/s390/char/tape_proc.c | |||
@@ -97,7 +97,7 @@ static void tape_proc_stop(struct seq_file *m, void *v) | |||
97 | { | 97 | { |
98 | } | 98 | } |
99 | 99 | ||
100 | static struct seq_operations tape_proc_seq = { | 100 | static const struct seq_operations tape_proc_seq = { |
101 | .start = tape_proc_start, | 101 | .start = tape_proc_start, |
102 | .next = tape_proc_next, | 102 | .next = tape_proc_next, |
103 | .stop = tape_proc_stop, | 103 | .stop = tape_proc_stop, |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e0c4c508e121..d364e0bfae12 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -683,7 +683,7 @@ static int vmlogrdr_register_driver(void) | |||
683 | /* Register with iucv driver */ | 683 | /* Register with iucv driver */ |
684 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); | 684 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); |
685 | if (ret) { | 685 | if (ret) { |
686 | printk (KERN_ERR "vmlogrdr: failed to register with" | 686 | printk (KERN_ERR "vmlogrdr: failed to register with " |
687 | "iucv driver\n"); | 687 | "iucv driver\n"); |
688 | goto out; | 688 | goto out; |
689 | } | 689 | } |
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index d70a6e65bf14..7689b500a104 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -759,7 +759,7 @@ static loff_t ur_llseek(struct file *file, loff_t offset, int whence) | |||
759 | return newpos; | 759 | return newpos; |
760 | } | 760 | } |
761 | 761 | ||
762 | static struct file_operations ur_fops = { | 762 | static const struct file_operations ur_fops = { |
763 | .owner = THIS_MODULE, | 763 | .owner = THIS_MODULE, |
764 | .open = ur_open, | 764 | .open = ur_open, |
765 | .release = ur_release, | 765 | .release = ur_release, |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 7073daf77981..f523501e6e6c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -470,7 +470,7 @@ static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) | |||
470 | return rc; | 470 | return rc; |
471 | } | 471 | } |
472 | 472 | ||
473 | static struct file_operations zcore_fops = { | 473 | static const struct file_operations zcore_fops = { |
474 | .owner = THIS_MODULE, | 474 | .owner = THIS_MODULE, |
475 | .llseek = zcore_lseek, | 475 | .llseek = zcore_lseek, |
476 | .read = zcore_read, | 476 | .read = zcore_read, |
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 5287631fbfc8..b7a07a866291 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/airq.c | 2 | * drivers/s390/cio/airq.c |
3 | * S/390 common I/O routines -- support for adapter interruptions | 3 | * Support for adapter interruptions |
4 | * | 4 | * |
5 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | 5 | * Copyright IBM Corp. 1999,2007 |
6 | * IBM Corporation | 6 | * Author(s): Ingo Adlung <adlung@de.ibm.com> |
7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 7 | * Cornelia Huck <cornelia.huck@de.ibm.com> |
8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 8 | * Arnd Bergmann <arndb@de.ibm.com> |
9 | * Arnd Bergmann (arndb@de.ibm.com) | 9 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
@@ -14,72 +14,131 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
16 | 16 | ||
17 | #include <asm/airq.h> | ||
18 | |||
19 | #include "cio.h" | ||
17 | #include "cio_debug.h" | 20 | #include "cio_debug.h" |
18 | #include "airq.h" | ||
19 | 21 | ||
20 | static adapter_int_handler_t adapter_handler; | 22 | #define NR_AIRQS 32 |
23 | #define NR_AIRQS_PER_WORD sizeof(unsigned long) | ||
24 | #define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD) | ||
21 | 25 | ||
22 | /* | 26 | union indicator_t { |
23 | * register for adapter interrupts | 27 | unsigned long word[NR_AIRQ_WORDS]; |
24 | * | 28 | unsigned char byte[NR_AIRQS]; |
25 | * With HiperSockets the zSeries architecture provides for | 29 | } __attribute__((packed)); |
26 | * means of adapter interrups, pseudo I/O interrupts that are | ||
27 | * not tied to an I/O subchannel, but to an adapter. However, | ||
28 | * it doesn't disclose the info how to enable/disable them, but | ||
29 | * to recognize them only. Perhaps we should consider them | ||
30 | * being shared interrupts, and thus build a linked list | ||
31 | * of adapter handlers ... to be evaluated ... | ||
32 | */ | ||
33 | int | ||
34 | s390_register_adapter_interrupt (adapter_int_handler_t handler) | ||
35 | { | ||
36 | int ret; | ||
37 | char dbf_txt[15]; | ||
38 | 30 | ||
39 | CIO_TRACE_EVENT (4, "rgaint"); | 31 | struct airq_t { |
32 | adapter_int_handler_t handler; | ||
33 | void *drv_data; | ||
34 | }; | ||
40 | 35 | ||
41 | if (handler == NULL) | 36 | static union indicator_t indicators; |
42 | ret = -EINVAL; | 37 | static struct airq_t *airqs[NR_AIRQS]; |
43 | else | ||
44 | ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); | ||
45 | if (!ret) | ||
46 | synchronize_sched(); /* Allow interrupts to complete. */ | ||
47 | 38 | ||
48 | sprintf (dbf_txt, "ret:%d", ret); | 39 | static int register_airq(struct airq_t *airq) |
49 | CIO_TRACE_EVENT (4, dbf_txt); | 40 | { |
41 | int i; | ||
50 | 42 | ||
51 | return ret; | 43 | for (i = 0; i < NR_AIRQS; i++) |
44 | if (!cmpxchg(&airqs[i], NULL, airq)) | ||
45 | return i; | ||
46 | return -ENOMEM; | ||
52 | } | 47 | } |
53 | 48 | ||
54 | int | 49 | /** |
55 | s390_unregister_adapter_interrupt (adapter_int_handler_t handler) | 50 | * s390_register_adapter_interrupt() - register adapter interrupt handler |
51 | * @handler: adapter handler to be registered | ||
52 | * @drv_data: driver data passed with each call to the handler | ||
53 | * | ||
54 | * Returns: | ||
55 | * Pointer to the indicator to be used on success | ||
56 | * ERR_PTR() if registration failed | ||
57 | */ | ||
58 | void *s390_register_adapter_interrupt(adapter_int_handler_t handler, | ||
59 | void *drv_data) | ||
56 | { | 60 | { |
61 | struct airq_t *airq; | ||
62 | char dbf_txt[16]; | ||
57 | int ret; | 63 | int ret; |
58 | char dbf_txt[15]; | ||
59 | 64 | ||
60 | CIO_TRACE_EVENT (4, "urgaint"); | 65 | airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); |
61 | 66 | if (!airq) { | |
62 | if (handler == NULL) | 67 | ret = -ENOMEM; |
63 | ret = -EINVAL; | 68 | goto out; |
64 | else { | ||
65 | adapter_handler = NULL; | ||
66 | synchronize_sched(); /* Allow interrupts to complete. */ | ||
67 | ret = 0; | ||
68 | } | 69 | } |
69 | sprintf (dbf_txt, "ret:%d", ret); | 70 | airq->handler = handler; |
70 | CIO_TRACE_EVENT (4, dbf_txt); | 71 | airq->drv_data = drv_data; |
71 | 72 | ret = register_airq(airq); | |
72 | return ret; | 73 | if (ret < 0) |
74 | kfree(airq); | ||
75 | out: | ||
76 | snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); | ||
77 | CIO_TRACE_EVENT(4, dbf_txt); | ||
78 | if (ret < 0) | ||
79 | return ERR_PTR(ret); | ||
80 | else | ||
81 | return &indicators.byte[ret]; | ||
73 | } | 82 | } |
83 | EXPORT_SYMBOL(s390_register_adapter_interrupt); | ||
74 | 84 | ||
75 | void | 85 | /** |
76 | do_adapter_IO (void) | 86 | * s390_unregister_adapter_interrupt - unregister adapter interrupt handler |
87 | * @ind: indicator for which the handler is to be unregistered | ||
88 | */ | ||
89 | void s390_unregister_adapter_interrupt(void *ind) | ||
77 | { | 90 | { |
78 | CIO_TRACE_EVENT (6, "doaio"); | 91 | struct airq_t *airq; |
92 | char dbf_txt[16]; | ||
93 | int i; | ||
79 | 94 | ||
80 | if (adapter_handler) | 95 | i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); |
81 | (*adapter_handler) (); | 96 | snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); |
97 | CIO_TRACE_EVENT(4, dbf_txt); | ||
98 | indicators.byte[i] = 0; | ||
99 | airq = xchg(&airqs[i], NULL); | ||
100 | /* | ||
101 | * Allow interrupts to complete. This will ensure that the airq handle | ||
102 | * is no longer referenced by any interrupt handler. | ||
103 | */ | ||
104 | synchronize_sched(); | ||
105 | kfree(airq); | ||
82 | } | 106 | } |
107 | EXPORT_SYMBOL(s390_unregister_adapter_interrupt); | ||
108 | |||
109 | #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) | ||
83 | 110 | ||
84 | EXPORT_SYMBOL (s390_register_adapter_interrupt); | 111 | void do_adapter_IO(void) |
85 | EXPORT_SYMBOL (s390_unregister_adapter_interrupt); | 112 | { |
113 | int w; | ||
114 | int i; | ||
115 | unsigned long word; | ||
116 | struct airq_t *airq; | ||
117 | |||
118 | /* | ||
119 | * Access indicator array in word-sized chunks to minimize storage | ||
120 | * fetch operations. | ||
121 | */ | ||
122 | for (w = 0; w < NR_AIRQ_WORDS; w++) { | ||
123 | word = indicators.word[w]; | ||
124 | i = w * NR_AIRQS_PER_WORD; | ||
125 | /* | ||
126 | * Check bytes within word for active indicators. | ||
127 | */ | ||
128 | while (word) { | ||
129 | if (word & INDICATOR_MASK) { | ||
130 | airq = airqs[i]; | ||
131 | if (likely(airq)) | ||
132 | airq->handler(&indicators.byte[i], | ||
133 | airq->drv_data); | ||
134 | else | ||
135 | /* | ||
136 | * Reset ill-behaved indicator. | ||
137 | */ | ||
138 | indicators.byte[i] = 0; | ||
139 | } | ||
140 | word <<= 8; | ||
141 | i++; | ||
142 | } | ||
143 | } | ||
144 | } | ||
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h deleted file mode 100644 index 7d6be3fdcd66..000000000000 --- a/drivers/s390/cio/airq.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef S390_AINTERRUPT_H | ||
2 | #define S390_AINTERRUPT_H | ||
3 | |||
4 | typedef int (*adapter_int_handler_t)(void); | ||
5 | |||
6 | extern int s390_register_adapter_interrupt(adapter_int_handler_t handler); | ||
7 | extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler); | ||
8 | extern void do_adapter_IO (void); | ||
9 | |||
10 | #endif | ||
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index bd5f16f80bf8..e8597ec92247 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -348,7 +348,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf, | |||
348 | return user_len; | 348 | return user_len; |
349 | } | 349 | } |
350 | 350 | ||
351 | static struct seq_operations cio_ignore_proc_seq_ops = { | 351 | static const struct seq_operations cio_ignore_proc_seq_ops = { |
352 | .start = cio_ignore_proc_seq_start, | 352 | .start = cio_ignore_proc_seq_start, |
353 | .stop = cio_ignore_proc_seq_stop, | 353 | .stop = cio_ignore_proc_seq_stop, |
354 | .next = cio_ignore_proc_seq_next, | 354 | .next = cio_ignore_proc_seq_next, |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5baa517c3b66..3964056a9a47 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -35,8 +35,8 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv) | |||
35 | struct ccwgroup_device *gdev; | 35 | struct ccwgroup_device *gdev; |
36 | struct ccwgroup_driver *gdrv; | 36 | struct ccwgroup_driver *gdrv; |
37 | 37 | ||
38 | gdev = container_of(dev, struct ccwgroup_device, dev); | 38 | gdev = to_ccwgroupdev(dev); |
39 | gdrv = container_of(drv, struct ccwgroup_driver, driver); | 39 | gdrv = to_ccwgroupdrv(drv); |
40 | 40 | ||
41 | if (gdev->creator_id == gdrv->driver_id) | 41 | if (gdev->creator_id == gdrv->driver_id) |
42 | return 1; | 42 | return 1; |
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) | |||
75 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | 75 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); |
76 | 76 | ||
77 | mutex_lock(&gdev->reg_mutex); | 77 | mutex_lock(&gdev->reg_mutex); |
78 | __ccwgroup_remove_symlinks(gdev); | 78 | if (device_is_registered(&gdev->dev)) { |
79 | device_unregister(dev); | 79 | __ccwgroup_remove_symlinks(gdev); |
80 | device_unregister(dev); | ||
81 | } | ||
80 | mutex_unlock(&gdev->reg_mutex); | 82 | mutex_unlock(&gdev->reg_mutex); |
81 | } | 83 | } |
82 | 84 | ||
@@ -111,7 +113,7 @@ ccwgroup_release (struct device *dev) | |||
111 | gdev = to_ccwgroupdev(dev); | 113 | gdev = to_ccwgroupdev(dev); |
112 | 114 | ||
113 | for (i = 0; i < gdev->count; i++) { | 115 | for (i = 0; i < gdev->count; i++) { |
114 | gdev->cdev[i]->dev.driver_data = NULL; | 116 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); |
115 | put_device(&gdev->cdev[i]->dev); | 117 | put_device(&gdev->cdev[i]->dev); |
116 | } | 118 | } |
117 | kfree(gdev); | 119 | kfree(gdev); |
@@ -196,11 +198,11 @@ int ccwgroup_create(struct device *root, unsigned int creator_id, | |||
196 | goto error; | 198 | goto error; |
197 | } | 199 | } |
198 | /* Don't allow a device to belong to more than one group. */ | 200 | /* Don't allow a device to belong to more than one group. */ |
199 | if (gdev->cdev[i]->dev.driver_data) { | 201 | if (dev_get_drvdata(&gdev->cdev[i]->dev)) { |
200 | rc = -EINVAL; | 202 | rc = -EINVAL; |
201 | goto error; | 203 | goto error; |
202 | } | 204 | } |
203 | gdev->cdev[i]->dev.driver_data = gdev; | 205 | dev_set_drvdata(&gdev->cdev[i]->dev, gdev); |
204 | } | 206 | } |
205 | 207 | ||
206 | gdev->creator_id = creator_id; | 208 | gdev->creator_id = creator_id; |
@@ -234,8 +236,8 @@ int ccwgroup_create(struct device *root, unsigned int creator_id, | |||
234 | error: | 236 | error: |
235 | for (i = 0; i < argc; i++) | 237 | for (i = 0; i < argc; i++) |
236 | if (gdev->cdev[i]) { | 238 | if (gdev->cdev[i]) { |
237 | if (gdev->cdev[i]->dev.driver_data == gdev) | 239 | if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) |
238 | gdev->cdev[i]->dev.driver_data = NULL; | 240 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); |
239 | put_device(&gdev->cdev[i]->dev); | 241 | put_device(&gdev->cdev[i]->dev); |
240 | } | 242 | } |
241 | mutex_unlock(&gdev->reg_mutex); | 243 | mutex_unlock(&gdev->reg_mutex); |
@@ -408,6 +410,7 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) | |||
408 | /* register our new driver with the core */ | 410 | /* register our new driver with the core */ |
409 | cdriver->driver.bus = &ccwgroup_bus_type; | 411 | cdriver->driver.bus = &ccwgroup_bus_type; |
410 | cdriver->driver.name = cdriver->name; | 412 | cdriver->driver.name = cdriver->name; |
413 | cdriver->driver.owner = cdriver->owner; | ||
411 | 414 | ||
412 | return driver_register(&cdriver->driver); | 415 | return driver_register(&cdriver->driver); |
413 | } | 416 | } |
@@ -463,8 +466,8 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | |||
463 | { | 466 | { |
464 | struct ccwgroup_device *gdev; | 467 | struct ccwgroup_device *gdev; |
465 | 468 | ||
466 | if (cdev->dev.driver_data) { | 469 | gdev = dev_get_drvdata(&cdev->dev); |
467 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; | 470 | if (gdev) { |
468 | if (get_device(&gdev->dev)) { | 471 | if (get_device(&gdev->dev)) { |
469 | mutex_lock(&gdev->reg_mutex); | 472 | mutex_lock(&gdev->reg_mutex); |
470 | if (device_is_registered(&gdev->dev)) | 473 | if (device_is_registered(&gdev->dev)) |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 597c0c76a2ad..e7ba16a74ef7 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -89,7 +89,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
89 | /* Copy data */ | 89 | /* Copy data */ |
90 | ret = 0; | 90 | ret = 0; |
91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | 91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
92 | if ((ssd_area->st != 0) && (ssd_area->st != 2)) | 92 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && |
93 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) | ||
93 | goto out_free; | 94 | goto out_free; |
94 | ssd->path_mask = ssd_area->path_mask; | 95 | ssd->path_mask = ssd_area->path_mask; |
95 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; | 96 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
@@ -132,20 +133,16 @@ static void terminate_internal_io(struct subchannel *sch) | |||
132 | device_set_intretry(sch); | 133 | device_set_intretry(sch); |
133 | /* Call handler. */ | 134 | /* Call handler. */ |
134 | if (sch->driver && sch->driver->termination) | 135 | if (sch->driver && sch->driver->termination) |
135 | sch->driver->termination(&sch->dev); | 136 | sch->driver->termination(sch); |
136 | } | 137 | } |
137 | 138 | ||
138 | static int | 139 | static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) |
139 | s390_subchannel_remove_chpid(struct device *dev, void *data) | ||
140 | { | 140 | { |
141 | int j; | 141 | int j; |
142 | int mask; | 142 | int mask; |
143 | struct subchannel *sch; | 143 | struct chp_id *chpid = data; |
144 | struct chp_id *chpid; | ||
145 | struct schib schib; | 144 | struct schib schib; |
146 | 145 | ||
147 | sch = to_subchannel(dev); | ||
148 | chpid = data; | ||
149 | for (j = 0; j < 8; j++) { | 146 | for (j = 0; j < 8; j++) { |
150 | mask = 0x80 >> j; | 147 | mask = 0x80 >> j; |
151 | if ((sch->schib.pmcw.pim & mask) && | 148 | if ((sch->schib.pmcw.pim & mask) && |
@@ -158,7 +155,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
158 | spin_lock_irq(sch->lock); | 155 | spin_lock_irq(sch->lock); |
159 | 156 | ||
160 | stsch(sch->schid, &schib); | 157 | stsch(sch->schid, &schib); |
161 | if (!schib.pmcw.dnv) | 158 | if (!css_sch_is_valid(&schib)) |
162 | goto out_unreg; | 159 | goto out_unreg; |
163 | memcpy(&sch->schib, &schib, sizeof(struct schib)); | 160 | memcpy(&sch->schib, &schib, sizeof(struct schib)); |
164 | /* Check for single path devices. */ | 161 | /* Check for single path devices. */ |
@@ -172,12 +169,12 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
172 | terminate_internal_io(sch); | 169 | terminate_internal_io(sch); |
173 | /* Re-start path verification. */ | 170 | /* Re-start path verification. */ |
174 | if (sch->driver && sch->driver->verify) | 171 | if (sch->driver && sch->driver->verify) |
175 | sch->driver->verify(&sch->dev); | 172 | sch->driver->verify(sch); |
176 | } | 173 | } |
177 | } else { | 174 | } else { |
178 | /* trigger path verification. */ | 175 | /* trigger path verification. */ |
179 | if (sch->driver && sch->driver->verify) | 176 | if (sch->driver && sch->driver->verify) |
180 | sch->driver->verify(&sch->dev); | 177 | sch->driver->verify(sch); |
181 | else if (sch->lpm == mask) | 178 | else if (sch->lpm == mask) |
182 | goto out_unreg; | 179 | goto out_unreg; |
183 | } | 180 | } |
@@ -201,12 +198,10 @@ void chsc_chp_offline(struct chp_id chpid) | |||
201 | 198 | ||
202 | if (chp_get_status(chpid) <= 0) | 199 | if (chp_get_status(chpid) <= 0) |
203 | return; | 200 | return; |
204 | bus_for_each_dev(&css_bus_type, NULL, &chpid, | 201 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); |
205 | s390_subchannel_remove_chpid); | ||
206 | } | 202 | } |
207 | 203 | ||
208 | static int | 204 | static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) |
209 | s390_process_res_acc_new_sch(struct subchannel_id schid) | ||
210 | { | 205 | { |
211 | struct schib schib; | 206 | struct schib schib; |
212 | /* | 207 | /* |
@@ -252,18 +247,10 @@ static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | |||
252 | return 0; | 247 | return 0; |
253 | } | 248 | } |
254 | 249 | ||
255 | static int | 250 | static int __s390_process_res_acc(struct subchannel *sch, void *data) |
256 | __s390_process_res_acc(struct subchannel_id schid, void *data) | ||
257 | { | 251 | { |
258 | int chp_mask, old_lpm; | 252 | int chp_mask, old_lpm; |
259 | struct res_acc_data *res_data; | 253 | struct res_acc_data *res_data = data; |
260 | struct subchannel *sch; | ||
261 | |||
262 | res_data = data; | ||
263 | sch = get_subchannel_by_schid(schid); | ||
264 | if (!sch) | ||
265 | /* Check if a subchannel is newly available. */ | ||
266 | return s390_process_res_acc_new_sch(schid); | ||
267 | 254 | ||
268 | spin_lock_irq(sch->lock); | 255 | spin_lock_irq(sch->lock); |
269 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | 256 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); |
@@ -279,10 +266,10 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
279 | if (!old_lpm && sch->lpm) | 266 | if (!old_lpm && sch->lpm) |
280 | device_trigger_reprobe(sch); | 267 | device_trigger_reprobe(sch); |
281 | else if (sch->driver && sch->driver->verify) | 268 | else if (sch->driver && sch->driver->verify) |
282 | sch->driver->verify(&sch->dev); | 269 | sch->driver->verify(sch); |
283 | out: | 270 | out: |
284 | spin_unlock_irq(sch->lock); | 271 | spin_unlock_irq(sch->lock); |
285 | put_device(&sch->dev); | 272 | |
286 | return 0; | 273 | return 0; |
287 | } | 274 | } |
288 | 275 | ||
@@ -305,7 +292,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data) | |||
305 | * The more information we have (info), the less scanning | 292 | * The more information we have (info), the less scanning |
306 | * will we have to do. | 293 | * will we have to do. |
307 | */ | 294 | */ |
308 | for_each_subchannel(__s390_process_res_acc, res_data); | 295 | for_each_subchannel_staged(__s390_process_res_acc, |
296 | s390_process_res_acc_new_sch, res_data); | ||
309 | } | 297 | } |
310 | 298 | ||
311 | static int | 299 | static int |
@@ -499,8 +487,7 @@ void chsc_process_crw(void) | |||
499 | } while (sei_area->flags & 0x80); | 487 | } while (sei_area->flags & 0x80); |
500 | } | 488 | } |
501 | 489 | ||
502 | static int | 490 | static int __chp_add_new_sch(struct subchannel_id schid, void *data) |
503 | __chp_add_new_sch(struct subchannel_id schid) | ||
504 | { | 491 | { |
505 | struct schib schib; | 492 | struct schib schib; |
506 | 493 | ||
@@ -514,45 +501,37 @@ __chp_add_new_sch(struct subchannel_id schid) | |||
514 | } | 501 | } |
515 | 502 | ||
516 | 503 | ||
517 | static int | 504 | static int __chp_add(struct subchannel *sch, void *data) |
518 | __chp_add(struct subchannel_id schid, void *data) | ||
519 | { | 505 | { |
520 | int i, mask; | 506 | int i, mask; |
521 | struct chp_id *chpid; | 507 | struct chp_id *chpid = data; |
522 | struct subchannel *sch; | 508 | |
523 | |||
524 | chpid = data; | ||
525 | sch = get_subchannel_by_schid(schid); | ||
526 | if (!sch) | ||
527 | /* Check if the subchannel is now available. */ | ||
528 | return __chp_add_new_sch(schid); | ||
529 | spin_lock_irq(sch->lock); | 509 | spin_lock_irq(sch->lock); |
530 | for (i=0; i<8; i++) { | 510 | for (i=0; i<8; i++) { |
531 | mask = 0x80 >> i; | 511 | mask = 0x80 >> i; |
532 | if ((sch->schib.pmcw.pim & mask) && | 512 | if ((sch->schib.pmcw.pim & mask) && |
533 | (sch->schib.pmcw.chpid[i] == chpid->id)) { | 513 | (sch->schib.pmcw.chpid[i] == chpid->id)) |
534 | if (stsch(sch->schid, &sch->schib) != 0) { | ||
535 | /* Endgame. */ | ||
536 | spin_unlock_irq(sch->lock); | ||
537 | return -ENXIO; | ||
538 | } | ||
539 | break; | 514 | break; |
540 | } | ||
541 | } | 515 | } |
542 | if (i==8) { | 516 | if (i==8) { |
543 | spin_unlock_irq(sch->lock); | 517 | spin_unlock_irq(sch->lock); |
544 | return 0; | 518 | return 0; |
545 | } | 519 | } |
520 | if (stsch(sch->schid, &sch->schib)) { | ||
521 | spin_unlock_irq(sch->lock); | ||
522 | css_schedule_eval(sch->schid); | ||
523 | return 0; | ||
524 | } | ||
546 | sch->lpm = ((sch->schib.pmcw.pim & | 525 | sch->lpm = ((sch->schib.pmcw.pim & |
547 | sch->schib.pmcw.pam & | 526 | sch->schib.pmcw.pam & |
548 | sch->schib.pmcw.pom) | 527 | sch->schib.pmcw.pom) |
549 | | mask) & sch->opm; | 528 | | mask) & sch->opm; |
550 | 529 | ||
551 | if (sch->driver && sch->driver->verify) | 530 | if (sch->driver && sch->driver->verify) |
552 | sch->driver->verify(&sch->dev); | 531 | sch->driver->verify(sch); |
553 | 532 | ||
554 | spin_unlock_irq(sch->lock); | 533 | spin_unlock_irq(sch->lock); |
555 | put_device(&sch->dev); | 534 | |
556 | return 0; | 535 | return 0; |
557 | } | 536 | } |
558 | 537 | ||
@@ -564,7 +543,8 @@ void chsc_chp_online(struct chp_id chpid) | |||
564 | CIO_TRACE_EVENT(2, dbf_txt); | 543 | CIO_TRACE_EVENT(2, dbf_txt); |
565 | 544 | ||
566 | if (chp_get_status(chpid) != 0) | 545 | if (chp_get_status(chpid) != 0) |
567 | for_each_subchannel(__chp_add, &chpid); | 546 | for_each_subchannel_staged(__chp_add, __chp_add_new_sch, |
547 | &chpid); | ||
568 | } | 548 | } |
569 | 549 | ||
570 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, | 550 | static void __s390_subchannel_vary_chpid(struct subchannel *sch, |
@@ -589,7 +569,7 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, | |||
589 | if (!old_lpm) | 569 | if (!old_lpm) |
590 | device_trigger_reprobe(sch); | 570 | device_trigger_reprobe(sch); |
591 | else if (sch->driver && sch->driver->verify) | 571 | else if (sch->driver && sch->driver->verify) |
592 | sch->driver->verify(&sch->dev); | 572 | sch->driver->verify(sch); |
593 | break; | 573 | break; |
594 | } | 574 | } |
595 | sch->opm &= ~mask; | 575 | sch->opm &= ~mask; |
@@ -603,37 +583,29 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, | |||
603 | terminate_internal_io(sch); | 583 | terminate_internal_io(sch); |
604 | /* Re-start path verification. */ | 584 | /* Re-start path verification. */ |
605 | if (sch->driver && sch->driver->verify) | 585 | if (sch->driver && sch->driver->verify) |
606 | sch->driver->verify(&sch->dev); | 586 | sch->driver->verify(sch); |
607 | } | 587 | } |
608 | } else if (!sch->lpm) { | 588 | } else if (!sch->lpm) { |
609 | if (device_trigger_verify(sch) != 0) | 589 | if (device_trigger_verify(sch) != 0) |
610 | css_schedule_eval(sch->schid); | 590 | css_schedule_eval(sch->schid); |
611 | } else if (sch->driver && sch->driver->verify) | 591 | } else if (sch->driver && sch->driver->verify) |
612 | sch->driver->verify(&sch->dev); | 592 | sch->driver->verify(sch); |
613 | break; | 593 | break; |
614 | } | 594 | } |
615 | spin_unlock_irqrestore(sch->lock, flags); | 595 | spin_unlock_irqrestore(sch->lock, flags); |
616 | } | 596 | } |
617 | 597 | ||
618 | static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) | 598 | static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) |
619 | { | 599 | { |
620 | struct subchannel *sch; | 600 | struct chp_id *chpid = data; |
621 | struct chp_id *chpid; | ||
622 | |||
623 | sch = to_subchannel(dev); | ||
624 | chpid = data; | ||
625 | 601 | ||
626 | __s390_subchannel_vary_chpid(sch, *chpid, 0); | 602 | __s390_subchannel_vary_chpid(sch, *chpid, 0); |
627 | return 0; | 603 | return 0; |
628 | } | 604 | } |
629 | 605 | ||
630 | static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) | 606 | static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) |
631 | { | 607 | { |
632 | struct subchannel *sch; | 608 | struct chp_id *chpid = data; |
633 | struct chp_id *chpid; | ||
634 | |||
635 | sch = to_subchannel(dev); | ||
636 | chpid = data; | ||
637 | 609 | ||
638 | __s390_subchannel_vary_chpid(sch, *chpid, 1); | 610 | __s390_subchannel_vary_chpid(sch, *chpid, 1); |
639 | return 0; | 611 | return 0; |
@@ -643,13 +615,7 @@ static int | |||
643 | __s390_vary_chpid_on(struct subchannel_id schid, void *data) | 615 | __s390_vary_chpid_on(struct subchannel_id schid, void *data) |
644 | { | 616 | { |
645 | struct schib schib; | 617 | struct schib schib; |
646 | struct subchannel *sch; | ||
647 | 618 | ||
648 | sch = get_subchannel_by_schid(schid); | ||
649 | if (sch) { | ||
650 | put_device(&sch->dev); | ||
651 | return 0; | ||
652 | } | ||
653 | if (stsch_err(schid, &schib)) | 619 | if (stsch_err(schid, &schib)) |
654 | /* We're through */ | 620 | /* We're through */ |
655 | return -ENXIO; | 621 | return -ENXIO; |
@@ -669,12 +635,13 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
669 | * Redo PathVerification on the devices the chpid connects to | 635 | * Redo PathVerification on the devices the chpid connects to |
670 | */ | 636 | */ |
671 | 637 | ||
672 | bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? | ||
673 | s390_subchannel_vary_chpid_on : | ||
674 | s390_subchannel_vary_chpid_off); | ||
675 | if (on) | 638 | if (on) |
676 | /* Scan for new devices on varied on path. */ | 639 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
677 | for_each_subchannel(__s390_vary_chpid_on, NULL); | 640 | __s390_vary_chpid_on, &chpid); |
641 | else | ||
642 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, | ||
643 | NULL, &chpid); | ||
644 | |||
678 | return 0; | 645 | return 0; |
679 | } | 646 | } |
680 | 647 | ||
@@ -1075,7 +1042,7 @@ chsc_determine_css_characteristics(void) | |||
1075 | 1042 | ||
1076 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1043 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1077 | if (!scsc_area) { | 1044 | if (!scsc_area) { |
1078 | CIO_MSG_EVENT(0, "Was not able to determine available" | 1045 | CIO_MSG_EVENT(0, "Was not able to determine available " |
1079 | "CHSCs due to no memory.\n"); | 1046 | "CHSCs due to no memory.\n"); |
1080 | return -ENOMEM; | 1047 | return -ENOMEM; |
1081 | } | 1048 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 46905345159e..60590a12d529 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -23,11 +23,12 @@ | |||
23 | #include <asm/reset.h> | 23 | #include <asm/reset.h> |
24 | #include <asm/ipl.h> | 24 | #include <asm/ipl.h> |
25 | #include <asm/chpid.h> | 25 | #include <asm/chpid.h> |
26 | #include "airq.h" | 26 | #include <asm/airq.h> |
27 | #include "cio.h" | 27 | #include "cio.h" |
28 | #include "css.h" | 28 | #include "css.h" |
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | #include "ioasm.h" | 30 | #include "ioasm.h" |
31 | #include "io_sch.h" | ||
31 | #include "blacklist.h" | 32 | #include "blacklist.h" |
32 | #include "cio_debug.h" | 33 | #include "cio_debug.h" |
33 | #include "chp.h" | 34 | #include "chp.h" |
@@ -56,39 +57,37 @@ __setup ("cio_msg=", cio_setup); | |||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Function: cio_debug_init | 59 | * Function: cio_debug_init |
59 | * Initializes three debug logs (under /proc/s390dbf) for common I/O: | 60 | * Initializes three debug logs for common I/O: |
60 | * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on | 61 | * - cio_msg logs generic cio messages |
61 | * - cio_trace logs the calling of different functions | 62 | * - cio_trace logs the calling of different functions |
62 | * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on | 63 | * - cio_crw logs machine check related cio messages |
63 | * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW | ||
64 | */ | 64 | */ |
65 | static int __init | 65 | static int __init cio_debug_init(void) |
66 | cio_debug_init (void) | ||
67 | { | 66 | { |
68 | cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); | 67 | cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); |
69 | if (!cio_debug_msg_id) | 68 | if (!cio_debug_msg_id) |
70 | goto out_unregister; | 69 | goto out_unregister; |
71 | debug_register_view (cio_debug_msg_id, &debug_sprintf_view); | 70 | debug_register_view(cio_debug_msg_id, &debug_sprintf_view); |
72 | debug_set_level (cio_debug_msg_id, 2); | 71 | debug_set_level(cio_debug_msg_id, 2); |
73 | cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16); | 72 | cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); |
74 | if (!cio_debug_trace_id) | 73 | if (!cio_debug_trace_id) |
75 | goto out_unregister; | 74 | goto out_unregister; |
76 | debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); | 75 | debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); |
77 | debug_set_level (cio_debug_trace_id, 2); | 76 | debug_set_level(cio_debug_trace_id, 2); |
78 | cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); | 77 | cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); |
79 | if (!cio_debug_crw_id) | 78 | if (!cio_debug_crw_id) |
80 | goto out_unregister; | 79 | goto out_unregister; |
81 | debug_register_view (cio_debug_crw_id, &debug_sprintf_view); | 80 | debug_register_view(cio_debug_crw_id, &debug_sprintf_view); |
82 | debug_set_level (cio_debug_crw_id, 2); | 81 | debug_set_level(cio_debug_crw_id, 4); |
83 | return 0; | 82 | return 0; |
84 | 83 | ||
85 | out_unregister: | 84 | out_unregister: |
86 | if (cio_debug_msg_id) | 85 | if (cio_debug_msg_id) |
87 | debug_unregister (cio_debug_msg_id); | 86 | debug_unregister(cio_debug_msg_id); |
88 | if (cio_debug_trace_id) | 87 | if (cio_debug_trace_id) |
89 | debug_unregister (cio_debug_trace_id); | 88 | debug_unregister(cio_debug_trace_id); |
90 | if (cio_debug_crw_id) | 89 | if (cio_debug_crw_id) |
91 | debug_unregister (cio_debug_crw_id); | 90 | debug_unregister(cio_debug_crw_id); |
92 | printk(KERN_WARNING"cio: could not initialize debugging\n"); | 91 | printk(KERN_WARNING"cio: could not initialize debugging\n"); |
93 | return -1; | 92 | return -1; |
94 | } | 93 | } |
@@ -147,7 +146,7 @@ cio_tpi(void) | |||
147 | spin_lock(sch->lock); | 146 | spin_lock(sch->lock); |
148 | memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); | 147 | memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); |
149 | if (sch->driver && sch->driver->irq) | 148 | if (sch->driver && sch->driver->irq) |
150 | sch->driver->irq(&sch->dev); | 149 | sch->driver->irq(sch); |
151 | spin_unlock(sch->lock); | 150 | spin_unlock(sch->lock); |
152 | irq_exit (); | 151 | irq_exit (); |
153 | _local_bh_enable(); | 152 | _local_bh_enable(); |
@@ -184,33 +183,35 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
184 | { | 183 | { |
185 | char dbf_txt[15]; | 184 | char dbf_txt[15]; |
186 | int ccode; | 185 | int ccode; |
186 | struct orb *orb; | ||
187 | 187 | ||
188 | CIO_TRACE_EVENT (4, "stIO"); | 188 | CIO_TRACE_EVENT(4, "stIO"); |
189 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | 189 | CIO_TRACE_EVENT(4, sch->dev.bus_id); |
190 | 190 | ||
191 | orb = &to_io_private(sch)->orb; | ||
191 | /* sch is always under 2G. */ | 192 | /* sch is always under 2G. */ |
192 | sch->orb.intparm = (__u32)(unsigned long)sch; | 193 | orb->intparm = (u32)(addr_t)sch; |
193 | sch->orb.fmt = 1; | 194 | orb->fmt = 1; |
194 | 195 | ||
195 | sch->orb.pfch = sch->options.prefetch == 0; | 196 | orb->pfch = sch->options.prefetch == 0; |
196 | sch->orb.spnd = sch->options.suspend; | 197 | orb->spnd = sch->options.suspend; |
197 | sch->orb.ssic = sch->options.suspend && sch->options.inter; | 198 | orb->ssic = sch->options.suspend && sch->options.inter; |
198 | sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; | 199 | orb->lpm = (lpm != 0) ? lpm : sch->lpm; |
199 | #ifdef CONFIG_64BIT | 200 | #ifdef CONFIG_64BIT |
200 | /* | 201 | /* |
201 | * for 64 bit we always support 64 bit IDAWs with 4k page size only | 202 | * for 64 bit we always support 64 bit IDAWs with 4k page size only |
202 | */ | 203 | */ |
203 | sch->orb.c64 = 1; | 204 | orb->c64 = 1; |
204 | sch->orb.i2k = 0; | 205 | orb->i2k = 0; |
205 | #endif | 206 | #endif |
206 | sch->orb.key = key >> 4; | 207 | orb->key = key >> 4; |
207 | /* issue "Start Subchannel" */ | 208 | /* issue "Start Subchannel" */ |
208 | sch->orb.cpa = (__u32) __pa (cpa); | 209 | orb->cpa = (__u32) __pa(cpa); |
209 | ccode = ssch (sch->schid, &sch->orb); | 210 | ccode = ssch(sch->schid, orb); |
210 | 211 | ||
211 | /* process condition code */ | 212 | /* process condition code */ |
212 | sprintf (dbf_txt, "ccode:%d", ccode); | 213 | sprintf(dbf_txt, "ccode:%d", ccode); |
213 | CIO_TRACE_EVENT (4, dbf_txt); | 214 | CIO_TRACE_EVENT(4, dbf_txt); |
214 | 215 | ||
215 | switch (ccode) { | 216 | switch (ccode) { |
216 | case 0: | 217 | case 0: |
@@ -405,8 +406,8 @@ cio_modify (struct subchannel *sch) | |||
405 | /* | 406 | /* |
406 | * Enable subchannel. | 407 | * Enable subchannel. |
407 | */ | 408 | */ |
408 | int | 409 | int cio_enable_subchannel(struct subchannel *sch, unsigned int isc, |
409 | cio_enable_subchannel (struct subchannel *sch, unsigned int isc) | 410 | u32 intparm) |
410 | { | 411 | { |
411 | char dbf_txt[15]; | 412 | char dbf_txt[15]; |
412 | int ccode; | 413 | int ccode; |
@@ -425,7 +426,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc) | |||
425 | for (retry = 5, ret = 0; retry > 0; retry--) { | 426 | for (retry = 5, ret = 0; retry > 0; retry--) { |
426 | sch->schib.pmcw.ena = 1; | 427 | sch->schib.pmcw.ena = 1; |
427 | sch->schib.pmcw.isc = isc; | 428 | sch->schib.pmcw.isc = isc; |
428 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | 429 | sch->schib.pmcw.intparm = intparm; |
429 | ret = cio_modify(sch); | 430 | ret = cio_modify(sch); |
430 | if (ret == -ENODEV) | 431 | if (ret == -ENODEV) |
431 | break; | 432 | break; |
@@ -567,7 +568,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
567 | */ | 568 | */ |
568 | if (sch->st != 0) { | 569 | if (sch->st != 0) { |
569 | CIO_DEBUG(KERN_INFO, 0, | 570 | CIO_DEBUG(KERN_INFO, 0, |
570 | "cio: Subchannel 0.%x.%04x reports " | 571 | "Subchannel 0.%x.%04x reports " |
571 | "non-I/O subchannel type %04X\n", | 572 | "non-I/O subchannel type %04X\n", |
572 | sch->schid.ssid, sch->schid.sch_no, sch->st); | 573 | sch->schid.ssid, sch->schid.sch_no, sch->st); |
573 | /* We stop here for non-io subchannels. */ | 574 | /* We stop here for non-io subchannels. */ |
@@ -576,11 +577,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
576 | } | 577 | } |
577 | 578 | ||
578 | /* Initialization for io subchannels. */ | 579 | /* Initialization for io subchannels. */ |
579 | if (!sch->schib.pmcw.dnv) { | 580 | if (!css_sch_is_valid(&sch->schib)) { |
580 | /* io subchannel but device number is invalid. */ | ||
581 | err = -ENODEV; | 581 | err = -ENODEV; |
582 | goto out; | 582 | goto out; |
583 | } | 583 | } |
584 | |||
584 | /* Devno is valid. */ | 585 | /* Devno is valid. */ |
585 | if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { | 586 | if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { |
586 | /* | 587 | /* |
@@ -600,7 +601,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
600 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 601 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
601 | 602 | ||
602 | CIO_DEBUG(KERN_INFO, 0, | 603 | CIO_DEBUG(KERN_INFO, 0, |
603 | "cio: Detected device %04x on subchannel 0.%x.%04X" | 604 | "Detected device %04x on subchannel 0.%x.%04X" |
604 | " - PIM = %02X, PAM = %02X, POM = %02X\n", | 605 | " - PIM = %02X, PAM = %02X, POM = %02X\n", |
605 | sch->schib.pmcw.dev, sch->schid.ssid, | 606 | sch->schib.pmcw.dev, sch->schid.ssid, |
606 | sch->schid.sch_no, sch->schib.pmcw.pim, | 607 | sch->schid.sch_no, sch->schib.pmcw.pim, |
@@ -680,7 +681,7 @@ do_IRQ (struct pt_regs *regs) | |||
680 | sizeof (irb->scsw)); | 681 | sizeof (irb->scsw)); |
681 | /* Call interrupt handler if there is one. */ | 682 | /* Call interrupt handler if there is one. */ |
682 | if (sch->driver && sch->driver->irq) | 683 | if (sch->driver && sch->driver->irq) |
683 | sch->driver->irq(&sch->dev); | 684 | sch->driver->irq(sch); |
684 | } | 685 | } |
685 | if (sch) | 686 | if (sch) |
686 | spin_unlock(sch->lock); | 687 | spin_unlock(sch->lock); |
@@ -698,8 +699,14 @@ do_IRQ (struct pt_regs *regs) | |||
698 | 699 | ||
699 | #ifdef CONFIG_CCW_CONSOLE | 700 | #ifdef CONFIG_CCW_CONSOLE |
700 | static struct subchannel console_subchannel; | 701 | static struct subchannel console_subchannel; |
702 | static struct io_subchannel_private console_priv; | ||
701 | static int console_subchannel_in_use; | 703 | static int console_subchannel_in_use; |
702 | 704 | ||
705 | void *cio_get_console_priv(void) | ||
706 | { | ||
707 | return &console_priv; | ||
708 | } | ||
709 | |||
703 | /* | 710 | /* |
704 | * busy wait for the next interrupt on the console | 711 | * busy wait for the next interrupt on the console |
705 | */ | 712 | */ |
@@ -738,9 +745,9 @@ cio_test_for_console(struct subchannel_id schid, void *data) | |||
738 | { | 745 | { |
739 | if (stsch_err(schid, &console_subchannel.schib) != 0) | 746 | if (stsch_err(schid, &console_subchannel.schib) != 0) |
740 | return -ENXIO; | 747 | return -ENXIO; |
741 | if (console_subchannel.schib.pmcw.dnv && | 748 | if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && |
742 | console_subchannel.schib.pmcw.dev == | 749 | console_subchannel.schib.pmcw.dnv && |
743 | console_devno) { | 750 | (console_subchannel.schib.pmcw.dev == console_devno)) { |
744 | console_irq = schid.sch_no; | 751 | console_irq = schid.sch_no; |
745 | return 1; /* found */ | 752 | return 1; /* found */ |
746 | } | 753 | } |
@@ -758,6 +765,7 @@ cio_get_console_sch_no(void) | |||
758 | /* VM provided us with the irq number of the console. */ | 765 | /* VM provided us with the irq number of the console. */ |
759 | schid.sch_no = console_irq; | 766 | schid.sch_no = console_irq; |
760 | if (stsch(schid, &console_subchannel.schib) != 0 || | 767 | if (stsch(schid, &console_subchannel.schib) != 0 || |
768 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | ||
761 | !console_subchannel.schib.pmcw.dnv) | 769 | !console_subchannel.schib.pmcw.dnv) |
762 | return -1; | 770 | return -1; |
763 | console_devno = console_subchannel.schib.pmcw.dev; | 771 | console_devno = console_subchannel.schib.pmcw.dev; |
@@ -804,7 +812,7 @@ cio_probe_console(void) | |||
804 | ctl_set_bit(6, 24); | 812 | ctl_set_bit(6, 24); |
805 | console_subchannel.schib.pmcw.isc = 7; | 813 | console_subchannel.schib.pmcw.isc = 7; |
806 | console_subchannel.schib.pmcw.intparm = | 814 | console_subchannel.schib.pmcw.intparm = |
807 | (__u32)(unsigned long)&console_subchannel; | 815 | (u32)(addr_t)&console_subchannel; |
808 | ret = cio_modify(&console_subchannel); | 816 | ret = cio_modify(&console_subchannel); |
809 | if (ret) { | 817 | if (ret) { |
810 | console_subchannel_in_use = 0; | 818 | console_subchannel_in_use = 0; |
@@ -1022,7 +1030,7 @@ static int __reipl_subchannel_match(struct subchannel_id schid, void *data) | |||
1022 | 1030 | ||
1023 | if (stsch_reset(schid, &schib)) | 1031 | if (stsch_reset(schid, &schib)) |
1024 | return -ENXIO; | 1032 | return -ENXIO; |
1025 | if (schib.pmcw.dnv && | 1033 | if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && |
1026 | (schib.pmcw.dev == match_id->devid.devno) && | 1034 | (schib.pmcw.dev == match_id->devid.devno) && |
1027 | (schid.ssid == match_id->devid.ssid)) { | 1035 | (schid.ssid == match_id->devid.ssid)) { |
1028 | match_id->schid = schid; | 1036 | match_id->schid = schid; |
@@ -1068,6 +1076,8 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) | |||
1068 | return -ENODEV; | 1076 | return -ENODEV; |
1069 | if (stsch(schid, &schib)) | 1077 | if (stsch(schid, &schib)) |
1070 | return -ENODEV; | 1078 | return -ENODEV; |
1079 | if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) | ||
1080 | return -ENODEV; | ||
1071 | if (!schib.pmcw.dnv) | 1081 | if (!schib.pmcw.dnv) |
1072 | return -ENODEV; | 1082 | return -ENODEV; |
1073 | iplinfo->devno = schib.pmcw.dev; | 1083 | iplinfo->devno = schib.pmcw.dev; |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 7446c39951a7..52afa4c784de 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -11,32 +11,32 @@ | |||
11 | * path management control word | 11 | * path management control word |
12 | */ | 12 | */ |
13 | struct pmcw { | 13 | struct pmcw { |
14 | __u32 intparm; /* interruption parameter */ | 14 | u32 intparm; /* interruption parameter */ |
15 | __u32 qf : 1; /* qdio facility */ | 15 | u32 qf : 1; /* qdio facility */ |
16 | __u32 res0 : 1; /* reserved zeros */ | 16 | u32 res0 : 1; /* reserved zeros */ |
17 | __u32 isc : 3; /* interruption sublass */ | 17 | u32 isc : 3; /* interruption sublass */ |
18 | __u32 res5 : 3; /* reserved zeros */ | 18 | u32 res5 : 3; /* reserved zeros */ |
19 | __u32 ena : 1; /* enabled */ | 19 | u32 ena : 1; /* enabled */ |
20 | __u32 lm : 2; /* limit mode */ | 20 | u32 lm : 2; /* limit mode */ |
21 | __u32 mme : 2; /* measurement-mode enable */ | 21 | u32 mme : 2; /* measurement-mode enable */ |
22 | __u32 mp : 1; /* multipath mode */ | 22 | u32 mp : 1; /* multipath mode */ |
23 | __u32 tf : 1; /* timing facility */ | 23 | u32 tf : 1; /* timing facility */ |
24 | __u32 dnv : 1; /* device number valid */ | 24 | u32 dnv : 1; /* device number valid */ |
25 | __u32 dev : 16; /* device number */ | 25 | u32 dev : 16; /* device number */ |
26 | __u8 lpm; /* logical path mask */ | 26 | u8 lpm; /* logical path mask */ |
27 | __u8 pnom; /* path not operational mask */ | 27 | u8 pnom; /* path not operational mask */ |
28 | __u8 lpum; /* last path used mask */ | 28 | u8 lpum; /* last path used mask */ |
29 | __u8 pim; /* path installed mask */ | 29 | u8 pim; /* path installed mask */ |
30 | __u16 mbi; /* measurement-block index */ | 30 | u16 mbi; /* measurement-block index */ |
31 | __u8 pom; /* path operational mask */ | 31 | u8 pom; /* path operational mask */ |
32 | __u8 pam; /* path available mask */ | 32 | u8 pam; /* path available mask */ |
33 | __u8 chpid[8]; /* CHPID 0-7 (if available) */ | 33 | u8 chpid[8]; /* CHPID 0-7 (if available) */ |
34 | __u32 unused1 : 8; /* reserved zeros */ | 34 | u32 unused1 : 8; /* reserved zeros */ |
35 | __u32 st : 3; /* subchannel type */ | 35 | u32 st : 3; /* subchannel type */ |
36 | __u32 unused2 : 18; /* reserved zeros */ | 36 | u32 unused2 : 18; /* reserved zeros */ |
37 | __u32 mbfc : 1; /* measurement block format control */ | 37 | u32 mbfc : 1; /* measurement block format control */ |
38 | __u32 xmwme : 1; /* extended measurement word mode enable */ | 38 | u32 xmwme : 1; /* extended measurement word mode enable */ |
39 | __u32 csense : 1; /* concurrent sense; can be enabled ...*/ | 39 | u32 csense : 1; /* concurrent sense; can be enabled ...*/ |
40 | /* ... per MSCH, however, if facility */ | 40 | /* ... per MSCH, however, if facility */ |
41 | /* ... is not installed, this results */ | 41 | /* ... is not installed, this results */ |
42 | /* ... in an operand exception. */ | 42 | /* ... in an operand exception. */ |
@@ -52,31 +52,6 @@ struct schib { | |||
52 | __u8 mda[4]; /* model dependent area */ | 52 | __u8 mda[4]; /* model dependent area */ |
53 | } __attribute__ ((packed,aligned(4))); | 53 | } __attribute__ ((packed,aligned(4))); |
54 | 54 | ||
55 | /* | ||
56 | * operation request block | ||
57 | */ | ||
58 | struct orb { | ||
59 | __u32 intparm; /* interruption parameter */ | ||
60 | __u32 key : 4; /* flags, like key, suspend control, etc. */ | ||
61 | __u32 spnd : 1; /* suspend control */ | ||
62 | __u32 res1 : 1; /* reserved */ | ||
63 | __u32 mod : 1; /* modification control */ | ||
64 | __u32 sync : 1; /* synchronize control */ | ||
65 | __u32 fmt : 1; /* format control */ | ||
66 | __u32 pfch : 1; /* prefetch control */ | ||
67 | __u32 isic : 1; /* initial-status-interruption control */ | ||
68 | __u32 alcc : 1; /* address-limit-checking control */ | ||
69 | __u32 ssic : 1; /* suppress-suspended-interr. control */ | ||
70 | __u32 res2 : 1; /* reserved */ | ||
71 | __u32 c64 : 1; /* IDAW/QDIO 64 bit control */ | ||
72 | __u32 i2k : 1; /* IDAW 2/4kB block size control */ | ||
73 | __u32 lpm : 8; /* logical path mask */ | ||
74 | __u32 ils : 1; /* incorrect length */ | ||
75 | __u32 zero : 6; /* reserved zeros */ | ||
76 | __u32 orbx : 1; /* ORB extension control */ | ||
77 | __u32 cpa; /* channel program address */ | ||
78 | } __attribute__ ((packed,aligned(4))); | ||
79 | |||
80 | /* subchannel data structure used by I/O subroutines */ | 55 | /* subchannel data structure used by I/O subroutines */ |
81 | struct subchannel { | 56 | struct subchannel { |
82 | struct subchannel_id schid; | 57 | struct subchannel_id schid; |
@@ -85,7 +60,7 @@ struct subchannel { | |||
85 | enum { | 60 | enum { |
86 | SUBCHANNEL_TYPE_IO = 0, | 61 | SUBCHANNEL_TYPE_IO = 0, |
87 | SUBCHANNEL_TYPE_CHSC = 1, | 62 | SUBCHANNEL_TYPE_CHSC = 1, |
88 | SUBCHANNEL_TYPE_MESSAGE = 2, | 63 | SUBCHANNEL_TYPE_MSG = 2, |
89 | SUBCHANNEL_TYPE_ADM = 3, | 64 | SUBCHANNEL_TYPE_ADM = 3, |
90 | } st; /* subchannel type */ | 65 | } st; /* subchannel type */ |
91 | 66 | ||
@@ -99,11 +74,10 @@ struct subchannel { | |||
99 | __u8 lpm; /* logical path mask */ | 74 | __u8 lpm; /* logical path mask */ |
100 | __u8 opm; /* operational path mask */ | 75 | __u8 opm; /* operational path mask */ |
101 | struct schib schib; /* subchannel information block */ | 76 | struct schib schib; /* subchannel information block */ |
102 | struct orb orb; /* operation request block */ | ||
103 | struct ccw1 sense_ccw; /* static ccw for sense command */ | ||
104 | struct chsc_ssd_info ssd_info; /* subchannel description */ | 77 | struct chsc_ssd_info ssd_info; /* subchannel description */ |
105 | struct device dev; /* entry in device tree */ | 78 | struct device dev; /* entry in device tree */ |
106 | struct css_driver *driver; | 79 | struct css_driver *driver; |
80 | void *private; /* private per subchannel type data */ | ||
107 | } __attribute__ ((aligned(8))); | 81 | } __attribute__ ((aligned(8))); |
108 | 82 | ||
109 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ | 83 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ |
@@ -111,7 +85,7 @@ struct subchannel { | |||
111 | #define to_subchannel(n) container_of(n, struct subchannel, dev) | 85 | #define to_subchannel(n) container_of(n, struct subchannel, dev) |
112 | 86 | ||
113 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); | 87 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); |
114 | extern int cio_enable_subchannel (struct subchannel *, unsigned int); | 88 | extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32); |
115 | extern int cio_disable_subchannel (struct subchannel *); | 89 | extern int cio_disable_subchannel (struct subchannel *); |
116 | extern int cio_cancel (struct subchannel *); | 90 | extern int cio_cancel (struct subchannel *); |
117 | extern int cio_clear (struct subchannel *); | 91 | extern int cio_clear (struct subchannel *); |
@@ -125,6 +99,7 @@ extern int cio_get_options (struct subchannel *); | |||
125 | extern int cio_modify (struct subchannel *); | 99 | extern int cio_modify (struct subchannel *); |
126 | 100 | ||
127 | int cio_create_sch_lock(struct subchannel *); | 101 | int cio_create_sch_lock(struct subchannel *); |
102 | void do_adapter_IO(void); | ||
128 | 103 | ||
129 | /* Use with care. */ | 104 | /* Use with care. */ |
130 | #ifdef CONFIG_CCW_CONSOLE | 105 | #ifdef CONFIG_CCW_CONSOLE |
@@ -133,10 +108,12 @@ extern void cio_release_console(void); | |||
133 | extern int cio_is_console(struct subchannel_id); | 108 | extern int cio_is_console(struct subchannel_id); |
134 | extern struct subchannel *cio_get_console_subchannel(void); | 109 | extern struct subchannel *cio_get_console_subchannel(void); |
135 | extern spinlock_t * cio_get_console_lock(void); | 110 | extern spinlock_t * cio_get_console_lock(void); |
111 | extern void *cio_get_console_priv(void); | ||
136 | #else | 112 | #else |
137 | #define cio_is_console(schid) 0 | 113 | #define cio_is_console(schid) 0 |
138 | #define cio_get_console_subchannel() NULL | 114 | #define cio_get_console_subchannel() NULL |
139 | #define cio_get_console_lock() NULL; | 115 | #define cio_get_console_lock() NULL |
116 | #define cio_get_console_priv() NULL | ||
140 | #endif | 117 | #endif |
141 | 118 | ||
142 | extern int cio_show_msg; | 119 | extern int cio_show_msg; |
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h index c9bf8989930f..d7429ef6c666 100644 --- a/drivers/s390/cio/cio_debug.h +++ b/drivers/s390/cio/cio_debug.h | |||
@@ -8,20 +8,19 @@ extern debug_info_t *cio_debug_msg_id; | |||
8 | extern debug_info_t *cio_debug_trace_id; | 8 | extern debug_info_t *cio_debug_trace_id; |
9 | extern debug_info_t *cio_debug_crw_id; | 9 | extern debug_info_t *cio_debug_crw_id; |
10 | 10 | ||
11 | #define CIO_TRACE_EVENT(imp, txt) do { \ | 11 | #define CIO_TRACE_EVENT(imp, txt) do { \ |
12 | debug_text_event(cio_debug_trace_id, imp, txt); \ | 12 | debug_text_event(cio_debug_trace_id, imp, txt); \ |
13 | } while (0) | 13 | } while (0) |
14 | 14 | ||
15 | #define CIO_MSG_EVENT(imp, args...) do { \ | 15 | #define CIO_MSG_EVENT(imp, args...) do { \ |
16 | debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ | 16 | debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ |
17 | } while (0) | 17 | } while (0) |
18 | 18 | ||
19 | #define CIO_CRW_EVENT(imp, args...) do { \ | 19 | #define CIO_CRW_EVENT(imp, args...) do { \ |
20 | debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ | 20 | debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ |
21 | } while (0) | 21 | } while (0) |
22 | 22 | ||
23 | static inline void | 23 | static inline void CIO_HEX_EVENT(int level, void *data, int length) |
24 | CIO_HEX_EVENT(int level, void *data, int length) | ||
25 | { | 24 | { |
26 | if (unlikely(!cio_debug_trace_id)) | 25 | if (unlikely(!cio_debug_trace_id)) |
27 | return; | 26 | return; |
@@ -32,9 +31,10 @@ CIO_HEX_EVENT(int level, void *data, int length) | |||
32 | } | 31 | } |
33 | } | 32 | } |
34 | 33 | ||
35 | #define CIO_DEBUG(printk_level,event_level,msg...) ({ \ | 34 | #define CIO_DEBUG(printk_level, event_level, msg...) do { \ |
36 | if (cio_show_msg) printk(printk_level msg); \ | 35 | if (cio_show_msg) \ |
37 | CIO_MSG_EVENT (event_level, msg); \ | 36 | printk(printk_level "cio: " msg); \ |
38 | }) | 37 | CIO_MSG_EVENT(event_level, msg); \ |
38 | } while (0) | ||
39 | 39 | ||
40 | #endif | 40 | #endif |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index c3df2cd009a4..3b45bbe6cce0 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -51,6 +51,62 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | |||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
53 | 53 | ||
54 | struct cb_data { | ||
55 | void *data; | ||
56 | struct idset *set; | ||
57 | int (*fn_known_sch)(struct subchannel *, void *); | ||
58 | int (*fn_unknown_sch)(struct subchannel_id, void *); | ||
59 | }; | ||
60 | |||
61 | static int call_fn_known_sch(struct device *dev, void *data) | ||
62 | { | ||
63 | struct subchannel *sch = to_subchannel(dev); | ||
64 | struct cb_data *cb = data; | ||
65 | int rc = 0; | ||
66 | |||
67 | idset_sch_del(cb->set, sch->schid); | ||
68 | if (cb->fn_known_sch) | ||
69 | rc = cb->fn_known_sch(sch, cb->data); | ||
70 | return rc; | ||
71 | } | ||
72 | |||
73 | static int call_fn_unknown_sch(struct subchannel_id schid, void *data) | ||
74 | { | ||
75 | struct cb_data *cb = data; | ||
76 | int rc = 0; | ||
77 | |||
78 | if (idset_sch_contains(cb->set, schid)) | ||
79 | rc = cb->fn_unknown_sch(schid, cb->data); | ||
80 | return rc; | ||
81 | } | ||
82 | |||
83 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | ||
84 | int (*fn_unknown)(struct subchannel_id, | ||
85 | void *), void *data) | ||
86 | { | ||
87 | struct cb_data cb; | ||
88 | int rc; | ||
89 | |||
90 | cb.set = idset_sch_new(); | ||
91 | if (!cb.set) | ||
92 | return -ENOMEM; | ||
93 | idset_fill(cb.set); | ||
94 | cb.data = data; | ||
95 | cb.fn_known_sch = fn_known; | ||
96 | cb.fn_unknown_sch = fn_unknown; | ||
97 | /* Process registered subchannels. */ | ||
98 | rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); | ||
99 | if (rc) | ||
100 | goto out; | ||
101 | /* Process unregistered subchannels. */ | ||
102 | if (fn_unknown) | ||
103 | rc = for_each_subchannel(call_fn_unknown_sch, &cb); | ||
104 | out: | ||
105 | idset_free(cb.set); | ||
106 | |||
107 | return rc; | ||
108 | } | ||
109 | |||
54 | static struct subchannel * | 110 | static struct subchannel * |
55 | css_alloc_subchannel(struct subchannel_id schid) | 111 | css_alloc_subchannel(struct subchannel_id schid) |
56 | { | 112 | { |
@@ -77,7 +133,7 @@ css_alloc_subchannel(struct subchannel_id schid) | |||
77 | * This is fine even on 64bit since the subchannel is always located | 133 | * This is fine even on 64bit since the subchannel is always located |
78 | * under 2G. | 134 | * under 2G. |
79 | */ | 135 | */ |
80 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | 136 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; |
81 | ret = cio_modify(sch); | 137 | ret = cio_modify(sch); |
82 | if (ret) { | 138 | if (ret) { |
83 | kfree(sch->lock); | 139 | kfree(sch->lock); |
@@ -237,11 +293,25 @@ get_subchannel_by_schid(struct subchannel_id schid) | |||
237 | return dev ? to_subchannel(dev) : NULL; | 293 | return dev ? to_subchannel(dev) : NULL; |
238 | } | 294 | } |
239 | 295 | ||
296 | /** | ||
297 | * css_sch_is_valid() - check if a subchannel is valid | ||
298 | * @schib: subchannel information block for the subchannel | ||
299 | */ | ||
300 | int css_sch_is_valid(struct schib *schib) | ||
301 | { | ||
302 | if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) | ||
303 | return 0; | ||
304 | return 1; | ||
305 | } | ||
306 | EXPORT_SYMBOL_GPL(css_sch_is_valid); | ||
307 | |||
240 | static int css_get_subchannel_status(struct subchannel *sch) | 308 | static int css_get_subchannel_status(struct subchannel *sch) |
241 | { | 309 | { |
242 | struct schib schib; | 310 | struct schib schib; |
243 | 311 | ||
244 | if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) | 312 | if (stsch(sch->schid, &schib)) |
313 | return CIO_GONE; | ||
314 | if (!css_sch_is_valid(&schib)) | ||
245 | return CIO_GONE; | 315 | return CIO_GONE; |
246 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) | 316 | if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) |
247 | return CIO_REVALIDATE; | 317 | return CIO_REVALIDATE; |
@@ -293,7 +363,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) | |||
293 | action = UNREGISTER; | 363 | action = UNREGISTER; |
294 | if (sch->driver && sch->driver->notify) { | 364 | if (sch->driver && sch->driver->notify) { |
295 | spin_unlock_irqrestore(sch->lock, flags); | 365 | spin_unlock_irqrestore(sch->lock, flags); |
296 | ret = sch->driver->notify(&sch->dev, event); | 366 | ret = sch->driver->notify(sch, event); |
297 | spin_lock_irqsave(sch->lock, flags); | 367 | spin_lock_irqsave(sch->lock, flags); |
298 | if (ret) | 368 | if (ret) |
299 | action = NONE; | 369 | action = NONE; |
@@ -349,7 +419,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
349 | /* Will be done on the slow path. */ | 419 | /* Will be done on the slow path. */ |
350 | return -EAGAIN; | 420 | return -EAGAIN; |
351 | } | 421 | } |
352 | if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { | 422 | if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { |
353 | /* Unusable - ignore. */ | 423 | /* Unusable - ignore. */ |
354 | return 0; | 424 | return 0; |
355 | } | 425 | } |
@@ -388,20 +458,56 @@ static int __init slow_subchannel_init(void) | |||
388 | return 0; | 458 | return 0; |
389 | } | 459 | } |
390 | 460 | ||
391 | static void css_slow_path_func(struct work_struct *unused) | 461 | static int slow_eval_known_fn(struct subchannel *sch, void *data) |
392 | { | 462 | { |
393 | struct subchannel_id schid; | 463 | int eval; |
464 | int rc; | ||
394 | 465 | ||
395 | CIO_TRACE_EVENT(4, "slowpath"); | ||
396 | spin_lock_irq(&slow_subchannel_lock); | 466 | spin_lock_irq(&slow_subchannel_lock); |
397 | init_subchannel_id(&schid); | 467 | eval = idset_sch_contains(slow_subchannel_set, sch->schid); |
398 | while (idset_sch_get_first(slow_subchannel_set, &schid)) { | 468 | idset_sch_del(slow_subchannel_set, sch->schid); |
399 | idset_sch_del(slow_subchannel_set, schid); | 469 | spin_unlock_irq(&slow_subchannel_lock); |
400 | spin_unlock_irq(&slow_subchannel_lock); | 470 | if (eval) { |
401 | css_evaluate_subchannel(schid, 1); | 471 | rc = css_evaluate_known_subchannel(sch, 1); |
402 | spin_lock_irq(&slow_subchannel_lock); | 472 | if (rc == -EAGAIN) |
473 | css_schedule_eval(sch->schid); | ||
403 | } | 474 | } |
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) | ||
479 | { | ||
480 | int eval; | ||
481 | int rc = 0; | ||
482 | |||
483 | spin_lock_irq(&slow_subchannel_lock); | ||
484 | eval = idset_sch_contains(slow_subchannel_set, schid); | ||
485 | idset_sch_del(slow_subchannel_set, schid); | ||
404 | spin_unlock_irq(&slow_subchannel_lock); | 486 | spin_unlock_irq(&slow_subchannel_lock); |
487 | if (eval) { | ||
488 | rc = css_evaluate_new_subchannel(schid, 1); | ||
489 | switch (rc) { | ||
490 | case -EAGAIN: | ||
491 | css_schedule_eval(schid); | ||
492 | rc = 0; | ||
493 | break; | ||
494 | case -ENXIO: | ||
495 | case -ENOMEM: | ||
496 | case -EIO: | ||
497 | /* These should abort looping */ | ||
498 | break; | ||
499 | default: | ||
500 | rc = 0; | ||
501 | } | ||
502 | } | ||
503 | return rc; | ||
504 | } | ||
505 | |||
506 | static void css_slow_path_func(struct work_struct *unused) | ||
507 | { | ||
508 | CIO_TRACE_EVENT(4, "slowpath"); | ||
509 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, | ||
510 | NULL); | ||
405 | } | 511 | } |
406 | 512 | ||
407 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 513 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
@@ -430,7 +536,6 @@ void css_schedule_eval_all(void) | |||
430 | /* Reprobe subchannel if unregistered. */ | 536 | /* Reprobe subchannel if unregistered. */ |
431 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | 537 | static int reprobe_subchannel(struct subchannel_id schid, void *data) |
432 | { | 538 | { |
433 | struct subchannel *sch; | ||
434 | int ret; | 539 | int ret; |
435 | 540 | ||
436 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", | 541 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", |
@@ -438,13 +543,6 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) | |||
438 | if (need_reprobe) | 543 | if (need_reprobe) |
439 | return -EAGAIN; | 544 | return -EAGAIN; |
440 | 545 | ||
441 | sch = get_subchannel_by_schid(schid); | ||
442 | if (sch) { | ||
443 | /* Already known. */ | ||
444 | put_device(&sch->dev); | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | ret = css_probe_device(schid); | 546 | ret = css_probe_device(schid); |
449 | switch (ret) { | 547 | switch (ret) { |
450 | case 0: | 548 | case 0: |
@@ -472,7 +570,7 @@ static void reprobe_all(struct work_struct *unused) | |||
472 | /* Make sure initial subchannel scan is done. */ | 570 | /* Make sure initial subchannel scan is done. */ |
473 | wait_event(ccw_device_init_wq, | 571 | wait_event(ccw_device_init_wq, |
474 | atomic_read(&ccw_device_init_count) == 0); | 572 | atomic_read(&ccw_device_init_count) == 0); |
475 | ret = for_each_subchannel(reprobe_subchannel, NULL); | 573 | ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); |
476 | 574 | ||
477 | CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, | 575 | CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, |
478 | need_reprobe); | 576 | need_reprobe); |
@@ -787,8 +885,8 @@ int sch_is_pseudo_sch(struct subchannel *sch) | |||
787 | static int | 885 | static int |
788 | css_bus_match (struct device *dev, struct device_driver *drv) | 886 | css_bus_match (struct device *dev, struct device_driver *drv) |
789 | { | 887 | { |
790 | struct subchannel *sch = container_of (dev, struct subchannel, dev); | 888 | struct subchannel *sch = to_subchannel(dev); |
791 | struct css_driver *driver = container_of (drv, struct css_driver, drv); | 889 | struct css_driver *driver = to_cssdriver(drv); |
792 | 890 | ||
793 | if (sch->st == driver->subchannel_type) | 891 | if (sch->st == driver->subchannel_type) |
794 | return 1; | 892 | return 1; |
@@ -796,32 +894,36 @@ css_bus_match (struct device *dev, struct device_driver *drv) | |||
796 | return 0; | 894 | return 0; |
797 | } | 895 | } |
798 | 896 | ||
799 | static int | 897 | static int css_probe(struct device *dev) |
800 | css_probe (struct device *dev) | ||
801 | { | 898 | { |
802 | struct subchannel *sch; | 899 | struct subchannel *sch; |
900 | int ret; | ||
803 | 901 | ||
804 | sch = to_subchannel(dev); | 902 | sch = to_subchannel(dev); |
805 | sch->driver = container_of (dev->driver, struct css_driver, drv); | 903 | sch->driver = to_cssdriver(dev->driver); |
806 | return (sch->driver->probe ? sch->driver->probe(sch) : 0); | 904 | ret = sch->driver->probe ? sch->driver->probe(sch) : 0; |
905 | if (ret) | ||
906 | sch->driver = NULL; | ||
907 | return ret; | ||
807 | } | 908 | } |
808 | 909 | ||
809 | static int | 910 | static int css_remove(struct device *dev) |
810 | css_remove (struct device *dev) | ||
811 | { | 911 | { |
812 | struct subchannel *sch; | 912 | struct subchannel *sch; |
913 | int ret; | ||
813 | 914 | ||
814 | sch = to_subchannel(dev); | 915 | sch = to_subchannel(dev); |
815 | return (sch->driver->remove ? sch->driver->remove(sch) : 0); | 916 | ret = sch->driver->remove ? sch->driver->remove(sch) : 0; |
917 | sch->driver = NULL; | ||
918 | return ret; | ||
816 | } | 919 | } |
817 | 920 | ||
818 | static void | 921 | static void css_shutdown(struct device *dev) |
819 | css_shutdown (struct device *dev) | ||
820 | { | 922 | { |
821 | struct subchannel *sch; | 923 | struct subchannel *sch; |
822 | 924 | ||
823 | sch = to_subchannel(dev); | 925 | sch = to_subchannel(dev); |
824 | if (sch->driver->shutdown) | 926 | if (sch->driver && sch->driver->shutdown) |
825 | sch->driver->shutdown(sch); | 927 | sch->driver->shutdown(sch); |
826 | } | 928 | } |
827 | 929 | ||
@@ -833,6 +935,34 @@ struct bus_type css_bus_type = { | |||
833 | .shutdown = css_shutdown, | 935 | .shutdown = css_shutdown, |
834 | }; | 936 | }; |
835 | 937 | ||
938 | /** | ||
939 | * css_driver_register - register a css driver | ||
940 | * @cdrv: css driver to register | ||
941 | * | ||
942 | * This is mainly a wrapper around driver_register that sets name | ||
943 | * and bus_type in the embedded struct device_driver correctly. | ||
944 | */ | ||
945 | int css_driver_register(struct css_driver *cdrv) | ||
946 | { | ||
947 | cdrv->drv.name = cdrv->name; | ||
948 | cdrv->drv.bus = &css_bus_type; | ||
949 | cdrv->drv.owner = cdrv->owner; | ||
950 | return driver_register(&cdrv->drv); | ||
951 | } | ||
952 | EXPORT_SYMBOL_GPL(css_driver_register); | ||
953 | |||
954 | /** | ||
955 | * css_driver_unregister - unregister a css driver | ||
956 | * @cdrv: css driver to unregister | ||
957 | * | ||
958 | * This is a wrapper around driver_unregister. | ||
959 | */ | ||
960 | void css_driver_unregister(struct css_driver *cdrv) | ||
961 | { | ||
962 | driver_unregister(&cdrv->drv); | ||
963 | } | ||
964 | EXPORT_SYMBOL_GPL(css_driver_unregister); | ||
965 | |||
836 | subsys_initcall(init_channel_subsystem); | 966 | subsys_initcall(init_channel_subsystem); |
837 | 967 | ||
838 | MODULE_LICENSE("GPL"); | 968 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 81215ef32435..b70554523552 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -58,64 +58,6 @@ struct pgid { | |||
58 | __u32 tod_high; /* high word TOD clock */ | 58 | __u32 tod_high; /* high word TOD clock */ |
59 | } __attribute__ ((packed)); | 59 | } __attribute__ ((packed)); |
60 | 60 | ||
61 | #define MAX_CIWS 8 | ||
62 | |||
63 | /* | ||
64 | * sense-id response buffer layout | ||
65 | */ | ||
66 | struct senseid { | ||
67 | /* common part */ | ||
68 | __u8 reserved; /* always 0x'FF' */ | ||
69 | __u16 cu_type; /* control unit type */ | ||
70 | __u8 cu_model; /* control unit model */ | ||
71 | __u16 dev_type; /* device type */ | ||
72 | __u8 dev_model; /* device model */ | ||
73 | __u8 unused; /* padding byte */ | ||
74 | /* extended part */ | ||
75 | struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ | ||
76 | } __attribute__ ((packed,aligned(4))); | ||
77 | |||
78 | struct ccw_device_private { | ||
79 | struct ccw_device *cdev; | ||
80 | struct subchannel *sch; | ||
81 | int state; /* device state */ | ||
82 | atomic_t onoff; | ||
83 | unsigned long registered; | ||
84 | struct ccw_dev_id dev_id; /* device id */ | ||
85 | struct subchannel_id schid; /* subchannel number */ | ||
86 | __u8 imask; /* lpm mask for SNID/SID/SPGID */ | ||
87 | int iretry; /* retry counter SNID/SID/SPGID */ | ||
88 | struct { | ||
89 | unsigned int fast:1; /* post with "channel end" */ | ||
90 | unsigned int repall:1; /* report every interrupt status */ | ||
91 | unsigned int pgroup:1; /* do path grouping */ | ||
92 | unsigned int force:1; /* allow forced online */ | ||
93 | } __attribute__ ((packed)) options; | ||
94 | struct { | ||
95 | unsigned int pgid_single:1; /* use single path for Set PGID */ | ||
96 | unsigned int esid:1; /* Ext. SenseID supported by HW */ | ||
97 | unsigned int dosense:1; /* delayed SENSE required */ | ||
98 | unsigned int doverify:1; /* delayed path verification */ | ||
99 | unsigned int donotify:1; /* call notify function */ | ||
100 | unsigned int recog_done:1; /* dev. recog. complete */ | ||
101 | unsigned int fake_irb:1; /* deliver faked irb */ | ||
102 | unsigned int intretry:1; /* retry internal operation */ | ||
103 | } __attribute__((packed)) flags; | ||
104 | unsigned long intparm; /* user interruption parameter */ | ||
105 | struct qdio_irq *qdio_data; | ||
106 | struct irb irb; /* device status */ | ||
107 | struct senseid senseid; /* SenseID info */ | ||
108 | struct pgid pgid[8]; /* path group IDs per chpid*/ | ||
109 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | ||
110 | struct work_struct kick_work; | ||
111 | wait_queue_head_t wait_q; | ||
112 | struct timer_list timer; | ||
113 | void *cmb; /* measurement information */ | ||
114 | struct list_head cmb_list; /* list of measured devices */ | ||
115 | u64 cmb_start_time; /* clock value of cmb reset */ | ||
116 | void *cmb_wait; /* deferred cmb enable/disable */ | ||
117 | }; | ||
118 | |||
119 | /* | 61 | /* |
120 | * A css driver handles all subchannels of one type. | 62 | * A css driver handles all subchannels of one type. |
121 | * Currently, we only care about I/O subchannels (type 0), these | 63 | * Currently, we only care about I/O subchannels (type 0), these |
@@ -123,25 +65,35 @@ struct ccw_device_private { | |||
123 | */ | 65 | */ |
124 | struct subchannel; | 66 | struct subchannel; |
125 | struct css_driver { | 67 | struct css_driver { |
68 | struct module *owner; | ||
126 | unsigned int subchannel_type; | 69 | unsigned int subchannel_type; |
127 | struct device_driver drv; | 70 | struct device_driver drv; |
128 | void (*irq)(struct device *); | 71 | void (*irq)(struct subchannel *); |
129 | int (*notify)(struct device *, int); | 72 | int (*notify)(struct subchannel *, int); |
130 | void (*verify)(struct device *); | 73 | void (*verify)(struct subchannel *); |
131 | void (*termination)(struct device *); | 74 | void (*termination)(struct subchannel *); |
132 | int (*probe)(struct subchannel *); | 75 | int (*probe)(struct subchannel *); |
133 | int (*remove)(struct subchannel *); | 76 | int (*remove)(struct subchannel *); |
134 | void (*shutdown)(struct subchannel *); | 77 | void (*shutdown)(struct subchannel *); |
78 | const char *name; | ||
135 | }; | 79 | }; |
136 | 80 | ||
81 | #define to_cssdriver(n) container_of(n, struct css_driver, drv) | ||
82 | |||
137 | /* | 83 | /* |
138 | * all css_drivers have the css_bus_type | 84 | * all css_drivers have the css_bus_type |
139 | */ | 85 | */ |
140 | extern struct bus_type css_bus_type; | 86 | extern struct bus_type css_bus_type; |
141 | 87 | ||
88 | extern int css_driver_register(struct css_driver *); | ||
89 | extern void css_driver_unregister(struct css_driver *); | ||
90 | |||
142 | extern void css_sch_device_unregister(struct subchannel *); | 91 | extern void css_sch_device_unregister(struct subchannel *); |
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 92 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 93 | extern int css_init_done; |
94 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | ||
95 | int (*fn_unknown)(struct subchannel_id, | ||
96 | void *), void *data); | ||
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 97 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern void css_process_crw(int, int); | 98 | extern void css_process_crw(int, int); |
147 | extern void css_reiterate_subchannels(void); | 99 | extern void css_reiterate_subchannels(void); |
@@ -188,6 +140,8 @@ void css_schedule_eval(struct subchannel_id schid); | |||
188 | void css_schedule_eval_all(void); | 140 | void css_schedule_eval_all(void); |
189 | 141 | ||
190 | int sch_is_pseudo_sch(struct subchannel *); | 142 | int sch_is_pseudo_sch(struct subchannel *); |
143 | struct schib; | ||
144 | int css_sch_is_valid(struct schib *); | ||
191 | 145 | ||
192 | extern struct workqueue_struct *slow_path_wq; | 146 | extern struct workqueue_struct *slow_path_wq; |
193 | 147 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 74f6b539974a..d35dc3f25d06 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/timer.h> | ||
20 | 21 | ||
21 | #include <asm/ccwdev.h> | 22 | #include <asm/ccwdev.h> |
22 | #include <asm/cio.h> | 23 | #include <asm/cio.h> |
@@ -28,6 +29,12 @@ | |||
28 | #include "css.h" | 29 | #include "css.h" |
29 | #include "device.h" | 30 | #include "device.h" |
30 | #include "ioasm.h" | 31 | #include "ioasm.h" |
32 | #include "io_sch.h" | ||
33 | |||
34 | static struct timer_list recovery_timer; | ||
35 | static spinlock_t recovery_lock; | ||
36 | static int recovery_phase; | ||
37 | static const unsigned long recovery_delay[] = { 3, 30, 300 }; | ||
31 | 38 | ||
32 | /******************* bus type handling ***********************/ | 39 | /******************* bus type handling ***********************/ |
33 | 40 | ||
@@ -115,19 +122,18 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
115 | 122 | ||
116 | struct bus_type ccw_bus_type; | 123 | struct bus_type ccw_bus_type; |
117 | 124 | ||
118 | static int io_subchannel_probe (struct subchannel *); | 125 | static void io_subchannel_irq(struct subchannel *); |
119 | static int io_subchannel_remove (struct subchannel *); | 126 | static int io_subchannel_probe(struct subchannel *); |
120 | static int io_subchannel_notify(struct device *, int); | 127 | static int io_subchannel_remove(struct subchannel *); |
121 | static void io_subchannel_verify(struct device *); | 128 | static int io_subchannel_notify(struct subchannel *, int); |
122 | static void io_subchannel_ioterm(struct device *); | 129 | static void io_subchannel_verify(struct subchannel *); |
130 | static void io_subchannel_ioterm(struct subchannel *); | ||
123 | static void io_subchannel_shutdown(struct subchannel *); | 131 | static void io_subchannel_shutdown(struct subchannel *); |
124 | 132 | ||
125 | static struct css_driver io_subchannel_driver = { | 133 | static struct css_driver io_subchannel_driver = { |
134 | .owner = THIS_MODULE, | ||
126 | .subchannel_type = SUBCHANNEL_TYPE_IO, | 135 | .subchannel_type = SUBCHANNEL_TYPE_IO, |
127 | .drv = { | 136 | .name = "io_subchannel", |
128 | .name = "io_subchannel", | ||
129 | .bus = &css_bus_type, | ||
130 | }, | ||
131 | .irq = io_subchannel_irq, | 137 | .irq = io_subchannel_irq, |
132 | .notify = io_subchannel_notify, | 138 | .notify = io_subchannel_notify, |
133 | .verify = io_subchannel_verify, | 139 | .verify = io_subchannel_verify, |
@@ -142,6 +148,8 @@ struct workqueue_struct *ccw_device_notify_work; | |||
142 | wait_queue_head_t ccw_device_init_wq; | 148 | wait_queue_head_t ccw_device_init_wq; |
143 | atomic_t ccw_device_init_count; | 149 | atomic_t ccw_device_init_count; |
144 | 150 | ||
151 | static void recovery_func(unsigned long data); | ||
152 | |||
145 | static int __init | 153 | static int __init |
146 | init_ccw_bus_type (void) | 154 | init_ccw_bus_type (void) |
147 | { | 155 | { |
@@ -149,6 +157,7 @@ init_ccw_bus_type (void) | |||
149 | 157 | ||
150 | init_waitqueue_head(&ccw_device_init_wq); | 158 | init_waitqueue_head(&ccw_device_init_wq); |
151 | atomic_set(&ccw_device_init_count, 0); | 159 | atomic_set(&ccw_device_init_count, 0); |
160 | setup_timer(&recovery_timer, recovery_func, 0); | ||
152 | 161 | ||
153 | ccw_device_work = create_singlethread_workqueue("cio"); | 162 | ccw_device_work = create_singlethread_workqueue("cio"); |
154 | if (!ccw_device_work) | 163 | if (!ccw_device_work) |
@@ -166,7 +175,8 @@ init_ccw_bus_type (void) | |||
166 | if ((ret = bus_register (&ccw_bus_type))) | 175 | if ((ret = bus_register (&ccw_bus_type))) |
167 | goto out_err; | 176 | goto out_err; |
168 | 177 | ||
169 | if ((ret = driver_register(&io_subchannel_driver.drv))) | 178 | ret = css_driver_register(&io_subchannel_driver); |
179 | if (ret) | ||
170 | goto out_err; | 180 | goto out_err; |
171 | 181 | ||
172 | wait_event(ccw_device_init_wq, | 182 | wait_event(ccw_device_init_wq, |
@@ -186,7 +196,7 @@ out_err: | |||
186 | static void __exit | 196 | static void __exit |
187 | cleanup_ccw_bus_type (void) | 197 | cleanup_ccw_bus_type (void) |
188 | { | 198 | { |
189 | driver_unregister(&io_subchannel_driver.drv); | 199 | css_driver_unregister(&io_subchannel_driver); |
190 | bus_unregister(&ccw_bus_type); | 200 | bus_unregister(&ccw_bus_type); |
191 | destroy_workqueue(ccw_device_notify_work); | 201 | destroy_workqueue(ccw_device_notify_work); |
192 | destroy_workqueue(ccw_device_work); | 202 | destroy_workqueue(ccw_device_work); |
@@ -773,7 +783,7 @@ static void sch_attach_device(struct subchannel *sch, | |||
773 | { | 783 | { |
774 | css_update_ssd_info(sch); | 784 | css_update_ssd_info(sch); |
775 | spin_lock_irq(sch->lock); | 785 | spin_lock_irq(sch->lock); |
776 | sch->dev.driver_data = cdev; | 786 | sch_set_cdev(sch, cdev); |
777 | cdev->private->schid = sch->schid; | 787 | cdev->private->schid = sch->schid; |
778 | cdev->ccwlock = sch->lock; | 788 | cdev->ccwlock = sch->lock; |
779 | device_trigger_reprobe(sch); | 789 | device_trigger_reprobe(sch); |
@@ -795,7 +805,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch, | |||
795 | put_device(&other_sch->dev); | 805 | put_device(&other_sch->dev); |
796 | return; | 806 | return; |
797 | } | 807 | } |
798 | other_sch->dev.driver_data = NULL; | 808 | sch_set_cdev(other_sch, NULL); |
799 | /* No need to keep a subchannel without ccw device around. */ | 809 | /* No need to keep a subchannel without ccw device around. */ |
800 | css_sch_device_unregister(other_sch); | 810 | css_sch_device_unregister(other_sch); |
801 | put_device(&other_sch->dev); | 811 | put_device(&other_sch->dev); |
@@ -831,12 +841,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) | |||
831 | return; | 841 | return; |
832 | } | 842 | } |
833 | spin_lock_irq(sch->lock); | 843 | spin_lock_irq(sch->lock); |
834 | sch->dev.driver_data = cdev; | 844 | sch_set_cdev(sch, cdev); |
835 | spin_unlock_irq(sch->lock); | 845 | spin_unlock_irq(sch->lock); |
836 | /* Start recognition for the new ccw device. */ | 846 | /* Start recognition for the new ccw device. */ |
837 | if (io_subchannel_recog(cdev, sch)) { | 847 | if (io_subchannel_recog(cdev, sch)) { |
838 | spin_lock_irq(sch->lock); | 848 | spin_lock_irq(sch->lock); |
839 | sch->dev.driver_data = NULL; | 849 | sch_set_cdev(sch, NULL); |
840 | spin_unlock_irq(sch->lock); | 850 | spin_unlock_irq(sch->lock); |
841 | if (cdev->dev.release) | 851 | if (cdev->dev.release) |
842 | cdev->dev.release(&cdev->dev); | 852 | cdev->dev.release(&cdev->dev); |
@@ -940,7 +950,7 @@ io_subchannel_register(struct work_struct *work) | |||
940 | cdev->private->dev_id.devno, ret); | 950 | cdev->private->dev_id.devno, ret); |
941 | put_device(&cdev->dev); | 951 | put_device(&cdev->dev); |
942 | spin_lock_irqsave(sch->lock, flags); | 952 | spin_lock_irqsave(sch->lock, flags); |
943 | sch->dev.driver_data = NULL; | 953 | sch_set_cdev(sch, NULL); |
944 | spin_unlock_irqrestore(sch->lock, flags); | 954 | spin_unlock_irqrestore(sch->lock, flags); |
945 | kfree (cdev->private); | 955 | kfree (cdev->private); |
946 | kfree (cdev); | 956 | kfree (cdev); |
@@ -1022,7 +1032,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) | |||
1022 | int rc; | 1032 | int rc; |
1023 | struct ccw_device_private *priv; | 1033 | struct ccw_device_private *priv; |
1024 | 1034 | ||
1025 | sch->dev.driver_data = cdev; | 1035 | sch_set_cdev(sch, cdev); |
1026 | sch->driver = &io_subchannel_driver; | 1036 | sch->driver = &io_subchannel_driver; |
1027 | cdev->ccwlock = sch->lock; | 1037 | cdev->ccwlock = sch->lock; |
1028 | 1038 | ||
@@ -1082,7 +1092,7 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1082 | } | 1092 | } |
1083 | if (former_parent) { | 1093 | if (former_parent) { |
1084 | spin_lock_irq(former_parent->lock); | 1094 | spin_lock_irq(former_parent->lock); |
1085 | former_parent->dev.driver_data = NULL; | 1095 | sch_set_cdev(former_parent, NULL); |
1086 | spin_unlock_irq(former_parent->lock); | 1096 | spin_unlock_irq(former_parent->lock); |
1087 | css_sch_device_unregister(former_parent); | 1097 | css_sch_device_unregister(former_parent); |
1088 | /* Reset intparm to zeroes. */ | 1098 | /* Reset intparm to zeroes. */ |
@@ -1096,6 +1106,18 @@ out: | |||
1096 | put_device(&cdev->dev); | 1106 | put_device(&cdev->dev); |
1097 | } | 1107 | } |
1098 | 1108 | ||
1109 | static void io_subchannel_irq(struct subchannel *sch) | ||
1110 | { | ||
1111 | struct ccw_device *cdev; | ||
1112 | |||
1113 | cdev = sch_get_cdev(sch); | ||
1114 | |||
1115 | CIO_TRACE_EVENT(3, "IRQ"); | ||
1116 | CIO_TRACE_EVENT(3, sch->dev.bus_id); | ||
1117 | if (cdev) | ||
1118 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | ||
1119 | } | ||
1120 | |||
1099 | static int | 1121 | static int |
1100 | io_subchannel_probe (struct subchannel *sch) | 1122 | io_subchannel_probe (struct subchannel *sch) |
1101 | { | 1123 | { |
@@ -1104,13 +1126,13 @@ io_subchannel_probe (struct subchannel *sch) | |||
1104 | unsigned long flags; | 1126 | unsigned long flags; |
1105 | struct ccw_dev_id dev_id; | 1127 | struct ccw_dev_id dev_id; |
1106 | 1128 | ||
1107 | if (sch->dev.driver_data) { | 1129 | cdev = sch_get_cdev(sch); |
1130 | if (cdev) { | ||
1108 | /* | 1131 | /* |
1109 | * This subchannel already has an associated ccw_device. | 1132 | * This subchannel already has an associated ccw_device. |
1110 | * Register it and exit. This happens for all early | 1133 | * Register it and exit. This happens for all early |
1111 | * device, e.g. the console. | 1134 | * device, e.g. the console. |
1112 | */ | 1135 | */ |
1113 | cdev = sch->dev.driver_data; | ||
1114 | cdev->dev.groups = ccwdev_attr_groups; | 1136 | cdev->dev.groups = ccwdev_attr_groups; |
1115 | device_initialize(&cdev->dev); | 1137 | device_initialize(&cdev->dev); |
1116 | ccw_device_register(cdev); | 1138 | ccw_device_register(cdev); |
@@ -1132,6 +1154,11 @@ io_subchannel_probe (struct subchannel *sch) | |||
1132 | */ | 1154 | */ |
1133 | dev_id.devno = sch->schib.pmcw.dev; | 1155 | dev_id.devno = sch->schib.pmcw.dev; |
1134 | dev_id.ssid = sch->schid.ssid; | 1156 | dev_id.ssid = sch->schid.ssid; |
1157 | /* Allocate I/O subchannel private data. */ | ||
1158 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | ||
1159 | GFP_KERNEL | GFP_DMA); | ||
1160 | if (!sch->private) | ||
1161 | return -ENOMEM; | ||
1135 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1162 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
1136 | if (!cdev) | 1163 | if (!cdev) |
1137 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1164 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
@@ -1149,16 +1176,18 @@ io_subchannel_probe (struct subchannel *sch) | |||
1149 | return 0; | 1176 | return 0; |
1150 | } | 1177 | } |
1151 | cdev = io_subchannel_create_ccwdev(sch); | 1178 | cdev = io_subchannel_create_ccwdev(sch); |
1152 | if (IS_ERR(cdev)) | 1179 | if (IS_ERR(cdev)) { |
1180 | kfree(sch->private); | ||
1153 | return PTR_ERR(cdev); | 1181 | return PTR_ERR(cdev); |
1154 | 1182 | } | |
1155 | rc = io_subchannel_recog(cdev, sch); | 1183 | rc = io_subchannel_recog(cdev, sch); |
1156 | if (rc) { | 1184 | if (rc) { |
1157 | spin_lock_irqsave(sch->lock, flags); | 1185 | spin_lock_irqsave(sch->lock, flags); |
1158 | sch->dev.driver_data = NULL; | 1186 | sch_set_cdev(sch, NULL); |
1159 | spin_unlock_irqrestore(sch->lock, flags); | 1187 | spin_unlock_irqrestore(sch->lock, flags); |
1160 | if (cdev->dev.release) | 1188 | if (cdev->dev.release) |
1161 | cdev->dev.release(&cdev->dev); | 1189 | cdev->dev.release(&cdev->dev); |
1190 | kfree(sch->private); | ||
1162 | } | 1191 | } |
1163 | 1192 | ||
1164 | return rc; | 1193 | return rc; |
@@ -1170,25 +1199,25 @@ io_subchannel_remove (struct subchannel *sch) | |||
1170 | struct ccw_device *cdev; | 1199 | struct ccw_device *cdev; |
1171 | unsigned long flags; | 1200 | unsigned long flags; |
1172 | 1201 | ||
1173 | if (!sch->dev.driver_data) | 1202 | cdev = sch_get_cdev(sch); |
1203 | if (!cdev) | ||
1174 | return 0; | 1204 | return 0; |
1175 | cdev = sch->dev.driver_data; | ||
1176 | /* Set ccw device to not operational and drop reference. */ | 1205 | /* Set ccw device to not operational and drop reference. */ |
1177 | spin_lock_irqsave(cdev->ccwlock, flags); | 1206 | spin_lock_irqsave(cdev->ccwlock, flags); |
1178 | sch->dev.driver_data = NULL; | 1207 | sch_set_cdev(sch, NULL); |
1179 | cdev->private->state = DEV_STATE_NOT_OPER; | 1208 | cdev->private->state = DEV_STATE_NOT_OPER; |
1180 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 1209 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
1181 | ccw_device_unregister(cdev); | 1210 | ccw_device_unregister(cdev); |
1182 | put_device(&cdev->dev); | 1211 | put_device(&cdev->dev); |
1212 | kfree(sch->private); | ||
1183 | return 0; | 1213 | return 0; |
1184 | } | 1214 | } |
1185 | 1215 | ||
1186 | static int | 1216 | static int io_subchannel_notify(struct subchannel *sch, int event) |
1187 | io_subchannel_notify(struct device *dev, int event) | ||
1188 | { | 1217 | { |
1189 | struct ccw_device *cdev; | 1218 | struct ccw_device *cdev; |
1190 | 1219 | ||
1191 | cdev = dev->driver_data; | 1220 | cdev = sch_get_cdev(sch); |
1192 | if (!cdev) | 1221 | if (!cdev) |
1193 | return 0; | 1222 | return 0; |
1194 | if (!cdev->drv) | 1223 | if (!cdev->drv) |
@@ -1198,22 +1227,20 @@ io_subchannel_notify(struct device *dev, int event) | |||
1198 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | 1227 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; |
1199 | } | 1228 | } |
1200 | 1229 | ||
1201 | static void | 1230 | static void io_subchannel_verify(struct subchannel *sch) |
1202 | io_subchannel_verify(struct device *dev) | ||
1203 | { | 1231 | { |
1204 | struct ccw_device *cdev; | 1232 | struct ccw_device *cdev; |
1205 | 1233 | ||
1206 | cdev = dev->driver_data; | 1234 | cdev = sch_get_cdev(sch); |
1207 | if (cdev) | 1235 | if (cdev) |
1208 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | 1236 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
1209 | } | 1237 | } |
1210 | 1238 | ||
1211 | static void | 1239 | static void io_subchannel_ioterm(struct subchannel *sch) |
1212 | io_subchannel_ioterm(struct device *dev) | ||
1213 | { | 1240 | { |
1214 | struct ccw_device *cdev; | 1241 | struct ccw_device *cdev; |
1215 | 1242 | ||
1216 | cdev = dev->driver_data; | 1243 | cdev = sch_get_cdev(sch); |
1217 | if (!cdev) | 1244 | if (!cdev) |
1218 | return; | 1245 | return; |
1219 | /* Internal I/O will be retried by the interrupt handler. */ | 1246 | /* Internal I/O will be retried by the interrupt handler. */ |
@@ -1231,7 +1258,7 @@ io_subchannel_shutdown(struct subchannel *sch) | |||
1231 | struct ccw_device *cdev; | 1258 | struct ccw_device *cdev; |
1232 | int ret; | 1259 | int ret; |
1233 | 1260 | ||
1234 | cdev = sch->dev.driver_data; | 1261 | cdev = sch_get_cdev(sch); |
1235 | 1262 | ||
1236 | if (cio_is_console(sch->schid)) | 1263 | if (cio_is_console(sch->schid)) |
1237 | return; | 1264 | return; |
@@ -1271,6 +1298,9 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) | |||
1271 | { | 1298 | { |
1272 | int rc; | 1299 | int rc; |
1273 | 1300 | ||
1301 | /* Attach subchannel private data. */ | ||
1302 | sch->private = cio_get_console_priv(); | ||
1303 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | ||
1274 | /* Initialize the ccw_device structure. */ | 1304 | /* Initialize the ccw_device structure. */ |
1275 | cdev->dev.parent= &sch->dev; | 1305 | cdev->dev.parent= &sch->dev; |
1276 | rc = io_subchannel_recog(cdev, sch); | 1306 | rc = io_subchannel_recog(cdev, sch); |
@@ -1456,6 +1486,7 @@ int ccw_driver_register(struct ccw_driver *cdriver) | |||
1456 | 1486 | ||
1457 | drv->bus = &ccw_bus_type; | 1487 | drv->bus = &ccw_bus_type; |
1458 | drv->name = cdriver->name; | 1488 | drv->name = cdriver->name; |
1489 | drv->owner = cdriver->owner; | ||
1459 | 1490 | ||
1460 | return driver_register(drv); | 1491 | return driver_register(drv); |
1461 | } | 1492 | } |
@@ -1481,6 +1512,60 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) | |||
1481 | return sch->schid; | 1512 | return sch->schid; |
1482 | } | 1513 | } |
1483 | 1514 | ||
1515 | static int recovery_check(struct device *dev, void *data) | ||
1516 | { | ||
1517 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1518 | int *redo = data; | ||
1519 | |||
1520 | spin_lock_irq(cdev->ccwlock); | ||
1521 | switch (cdev->private->state) { | ||
1522 | case DEV_STATE_DISCONNECTED: | ||
1523 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", | ||
1524 | cdev->private->dev_id.ssid, | ||
1525 | cdev->private->dev_id.devno); | ||
1526 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
1527 | *redo = 1; | ||
1528 | break; | ||
1529 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
1530 | *redo = 1; | ||
1531 | break; | ||
1532 | } | ||
1533 | spin_unlock_irq(cdev->ccwlock); | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | static void recovery_func(unsigned long data) | ||
1539 | { | ||
1540 | int redo = 0; | ||
1541 | |||
1542 | bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); | ||
1543 | if (redo) { | ||
1544 | spin_lock_irq(&recovery_lock); | ||
1545 | if (!timer_pending(&recovery_timer)) { | ||
1546 | if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) | ||
1547 | recovery_phase++; | ||
1548 | mod_timer(&recovery_timer, jiffies + | ||
1549 | recovery_delay[recovery_phase] * HZ); | ||
1550 | } | ||
1551 | spin_unlock_irq(&recovery_lock); | ||
1552 | } else | ||
1553 | CIO_MSG_EVENT(2, "recovery: end\n"); | ||
1554 | } | ||
1555 | |||
1556 | void ccw_device_schedule_recovery(void) | ||
1557 | { | ||
1558 | unsigned long flags; | ||
1559 | |||
1560 | CIO_MSG_EVENT(2, "recovery: schedule\n"); | ||
1561 | spin_lock_irqsave(&recovery_lock, flags); | ||
1562 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | ||
1563 | recovery_phase = 0; | ||
1564 | mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); | ||
1565 | } | ||
1566 | spin_unlock_irqrestore(&recovery_lock, flags); | ||
1567 | } | ||
1568 | |||
1484 | MODULE_LICENSE("GPL"); | 1569 | MODULE_LICENSE("GPL"); |
1485 | EXPORT_SYMBOL(ccw_device_set_online); | 1570 | EXPORT_SYMBOL(ccw_device_set_online); |
1486 | EXPORT_SYMBOL(ccw_device_set_offline); | 1571 | EXPORT_SYMBOL(ccw_device_set_offline); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 0d4089600439..d40a2ffaa000 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -5,6 +5,8 @@ | |||
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
6 | #include <linux/wait.h> | 6 | #include <linux/wait.h> |
7 | 7 | ||
8 | #include "io_sch.h" | ||
9 | |||
8 | /* | 10 | /* |
9 | * states of the device statemachine | 11 | * states of the device statemachine |
10 | */ | 12 | */ |
@@ -74,7 +76,6 @@ extern struct workqueue_struct *ccw_device_notify_work; | |||
74 | extern wait_queue_head_t ccw_device_init_wq; | 76 | extern wait_queue_head_t ccw_device_init_wq; |
75 | extern atomic_t ccw_device_init_count; | 77 | extern atomic_t ccw_device_init_count; |
76 | 78 | ||
77 | void io_subchannel_irq (struct device *pdev); | ||
78 | void io_subchannel_recog_done(struct ccw_device *cdev); | 79 | void io_subchannel_recog_done(struct ccw_device *cdev); |
79 | 80 | ||
80 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 81 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
@@ -87,6 +88,8 @@ int ccw_device_recognition(struct ccw_device *); | |||
87 | int ccw_device_online(struct ccw_device *); | 88 | int ccw_device_online(struct ccw_device *); |
88 | int ccw_device_offline(struct ccw_device *); | 89 | int ccw_device_offline(struct ccw_device *); |
89 | 90 | ||
91 | void ccw_device_schedule_recovery(void); | ||
92 | |||
90 | /* Function prototypes for device status and basic sense stuff. */ | 93 | /* Function prototypes for device status and basic sense stuff. */ |
91 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); | 94 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); |
92 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); | 95 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index bfad421cda66..4b92c84fb438 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -25,14 +25,16 @@ | |||
25 | #include "ioasm.h" | 25 | #include "ioasm.h" |
26 | #include "chp.h" | 26 | #include "chp.h" |
27 | 27 | ||
28 | static int timeout_log_enabled; | ||
29 | |||
28 | int | 30 | int |
29 | device_is_online(struct subchannel *sch) | 31 | device_is_online(struct subchannel *sch) |
30 | { | 32 | { |
31 | struct ccw_device *cdev; | 33 | struct ccw_device *cdev; |
32 | 34 | ||
33 | if (!sch->dev.driver_data) | 35 | cdev = sch_get_cdev(sch); |
36 | if (!cdev) | ||
34 | return 0; | 37 | return 0; |
35 | cdev = sch->dev.driver_data; | ||
36 | return (cdev->private->state == DEV_STATE_ONLINE); | 38 | return (cdev->private->state == DEV_STATE_ONLINE); |
37 | } | 39 | } |
38 | 40 | ||
@@ -41,9 +43,9 @@ device_is_disconnected(struct subchannel *sch) | |||
41 | { | 43 | { |
42 | struct ccw_device *cdev; | 44 | struct ccw_device *cdev; |
43 | 45 | ||
44 | if (!sch->dev.driver_data) | 46 | cdev = sch_get_cdev(sch); |
47 | if (!cdev) | ||
45 | return 0; | 48 | return 0; |
46 | cdev = sch->dev.driver_data; | ||
47 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | 49 | return (cdev->private->state == DEV_STATE_DISCONNECTED || |
48 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | 50 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); |
49 | } | 51 | } |
@@ -53,19 +55,21 @@ device_set_disconnected(struct subchannel *sch) | |||
53 | { | 55 | { |
54 | struct ccw_device *cdev; | 56 | struct ccw_device *cdev; |
55 | 57 | ||
56 | if (!sch->dev.driver_data) | 58 | cdev = sch_get_cdev(sch); |
59 | if (!cdev) | ||
57 | return; | 60 | return; |
58 | cdev = sch->dev.driver_data; | ||
59 | ccw_device_set_timeout(cdev, 0); | 61 | ccw_device_set_timeout(cdev, 0); |
60 | cdev->private->flags.fake_irb = 0; | 62 | cdev->private->flags.fake_irb = 0; |
61 | cdev->private->state = DEV_STATE_DISCONNECTED; | 63 | cdev->private->state = DEV_STATE_DISCONNECTED; |
64 | if (cdev->online) | ||
65 | ccw_device_schedule_recovery(); | ||
62 | } | 66 | } |
63 | 67 | ||
64 | void device_set_intretry(struct subchannel *sch) | 68 | void device_set_intretry(struct subchannel *sch) |
65 | { | 69 | { |
66 | struct ccw_device *cdev; | 70 | struct ccw_device *cdev; |
67 | 71 | ||
68 | cdev = sch->dev.driver_data; | 72 | cdev = sch_get_cdev(sch); |
69 | if (!cdev) | 73 | if (!cdev) |
70 | return; | 74 | return; |
71 | cdev->private->flags.intretry = 1; | 75 | cdev->private->flags.intretry = 1; |
@@ -75,13 +79,62 @@ int device_trigger_verify(struct subchannel *sch) | |||
75 | { | 79 | { |
76 | struct ccw_device *cdev; | 80 | struct ccw_device *cdev; |
77 | 81 | ||
78 | cdev = sch->dev.driver_data; | 82 | cdev = sch_get_cdev(sch); |
79 | if (!cdev || !cdev->online) | 83 | if (!cdev || !cdev->online) |
80 | return -EINVAL; | 84 | return -EINVAL; |
81 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | 85 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); |
82 | return 0; | 86 | return 0; |
83 | } | 87 | } |
84 | 88 | ||
89 | static int __init ccw_timeout_log_setup(char *unused) | ||
90 | { | ||
91 | timeout_log_enabled = 1; | ||
92 | return 1; | ||
93 | } | ||
94 | |||
95 | __setup("ccw_timeout_log", ccw_timeout_log_setup); | ||
96 | |||
97 | static void ccw_timeout_log(struct ccw_device *cdev) | ||
98 | { | ||
99 | struct schib schib; | ||
100 | struct subchannel *sch; | ||
101 | struct io_subchannel_private *private; | ||
102 | int cc; | ||
103 | |||
104 | sch = to_subchannel(cdev->dev.parent); | ||
105 | private = to_io_private(sch); | ||
106 | cc = stsch(sch->schid, &schib); | ||
107 | |||
108 | printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " | ||
109 | "device information:\n", get_clock()); | ||
110 | printk(KERN_WARNING "cio: orb:\n"); | ||
111 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
112 | &private->orb, sizeof(private->orb), 0); | ||
113 | printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); | ||
114 | printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); | ||
115 | printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " | ||
116 | "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); | ||
117 | |||
118 | if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || | ||
119 | (void *)(addr_t)private->orb.cpa == cdev->private->iccws) | ||
120 | printk(KERN_WARNING "cio: last channel program (intern):\n"); | ||
121 | else | ||
122 | printk(KERN_WARNING "cio: last channel program:\n"); | ||
123 | |||
124 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
125 | (void *)(addr_t)private->orb.cpa, | ||
126 | sizeof(struct ccw1), 0); | ||
127 | printk(KERN_WARNING "cio: ccw device state: %d\n", | ||
128 | cdev->private->state); | ||
129 | printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); | ||
130 | printk(KERN_WARNING "cio: schib:\n"); | ||
131 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
132 | &schib, sizeof(schib), 0); | ||
133 | printk(KERN_WARNING "cio: ccw device flags:\n"); | ||
134 | print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, | ||
135 | &cdev->private->flags, sizeof(cdev->private->flags), 0); | ||
136 | } | ||
137 | |||
85 | /* | 138 | /* |
86 | * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. | 139 | * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. |
87 | */ | 140 | */ |
@@ -92,6 +145,8 @@ ccw_device_timeout(unsigned long data) | |||
92 | 145 | ||
93 | cdev = (struct ccw_device *) data; | 146 | cdev = (struct ccw_device *) data; |
94 | spin_lock_irq(cdev->ccwlock); | 147 | spin_lock_irq(cdev->ccwlock); |
148 | if (timeout_log_enabled) | ||
149 | ccw_timeout_log(cdev); | ||
95 | dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); | 150 | dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); |
96 | spin_unlock_irq(cdev->ccwlock); | 151 | spin_unlock_irq(cdev->ccwlock); |
97 | } | 152 | } |
@@ -122,9 +177,9 @@ device_kill_pending_timer(struct subchannel *sch) | |||
122 | { | 177 | { |
123 | struct ccw_device *cdev; | 178 | struct ccw_device *cdev; |
124 | 179 | ||
125 | if (!sch->dev.driver_data) | 180 | cdev = sch_get_cdev(sch); |
181 | if (!cdev) | ||
126 | return; | 182 | return; |
127 | cdev = sch->dev.driver_data; | ||
128 | ccw_device_set_timeout(cdev, 0); | 183 | ccw_device_set_timeout(cdev, 0); |
129 | } | 184 | } |
130 | 185 | ||
@@ -268,7 +323,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
268 | switch (state) { | 323 | switch (state) { |
269 | case DEV_STATE_NOT_OPER: | 324 | case DEV_STATE_NOT_OPER: |
270 | CIO_DEBUG(KERN_WARNING, 2, | 325 | CIO_DEBUG(KERN_WARNING, 2, |
271 | "cio: SenseID : unknown device %04x on subchannel " | 326 | "SenseID : unknown device %04x on subchannel " |
272 | "0.%x.%04x\n", cdev->private->dev_id.devno, | 327 | "0.%x.%04x\n", cdev->private->dev_id.devno, |
273 | sch->schid.ssid, sch->schid.sch_no); | 328 | sch->schid.ssid, sch->schid.sch_no); |
274 | break; | 329 | break; |
@@ -294,7 +349,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
294 | } | 349 | } |
295 | /* Issue device info message. */ | 350 | /* Issue device info message. */ |
296 | CIO_DEBUG(KERN_INFO, 2, | 351 | CIO_DEBUG(KERN_INFO, 2, |
297 | "cio: SenseID : device 0.%x.%04x reports: " | 352 | "SenseID : device 0.%x.%04x reports: " |
298 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " | 353 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " |
299 | "%04X/%02X\n", | 354 | "%04X/%02X\n", |
300 | cdev->private->dev_id.ssid, | 355 | cdev->private->dev_id.ssid, |
@@ -304,7 +359,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
304 | break; | 359 | break; |
305 | case DEV_STATE_BOXED: | 360 | case DEV_STATE_BOXED: |
306 | CIO_DEBUG(KERN_WARNING, 2, | 361 | CIO_DEBUG(KERN_WARNING, 2, |
307 | "cio: SenseID : boxed device %04x on subchannel " | 362 | "SenseID : boxed device %04x on subchannel " |
308 | "0.%x.%04x\n", cdev->private->dev_id.devno, | 363 | "0.%x.%04x\n", cdev->private->dev_id.devno, |
309 | sch->schid.ssid, sch->schid.sch_no); | 364 | sch->schid.ssid, sch->schid.sch_no); |
310 | break; | 365 | break; |
@@ -349,7 +404,7 @@ ccw_device_oper_notify(struct work_struct *work) | |||
349 | sch = to_subchannel(cdev->dev.parent); | 404 | sch = to_subchannel(cdev->dev.parent); |
350 | if (sch->driver && sch->driver->notify) { | 405 | if (sch->driver && sch->driver->notify) { |
351 | spin_unlock_irqrestore(cdev->ccwlock, flags); | 406 | spin_unlock_irqrestore(cdev->ccwlock, flags); |
352 | ret = sch->driver->notify(&sch->dev, CIO_OPER); | 407 | ret = sch->driver->notify(sch, CIO_OPER); |
353 | spin_lock_irqsave(cdev->ccwlock, flags); | 408 | spin_lock_irqsave(cdev->ccwlock, flags); |
354 | } else | 409 | } else |
355 | ret = 0; | 410 | ret = 0; |
@@ -389,7 +444,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
389 | 444 | ||
390 | if (state == DEV_STATE_BOXED) | 445 | if (state == DEV_STATE_BOXED) |
391 | CIO_DEBUG(KERN_WARNING, 2, | 446 | CIO_DEBUG(KERN_WARNING, 2, |
392 | "cio: Boxed device %04x on subchannel %04x\n", | 447 | "Boxed device %04x on subchannel %04x\n", |
393 | cdev->private->dev_id.devno, sch->schid.sch_no); | 448 | cdev->private->dev_id.devno, sch->schid.sch_no); |
394 | 449 | ||
395 | if (cdev->private->flags.donotify) { | 450 | if (cdev->private->flags.donotify) { |
@@ -500,7 +555,8 @@ ccw_device_recognition(struct ccw_device *cdev) | |||
500 | (cdev->private->state != DEV_STATE_BOXED)) | 555 | (cdev->private->state != DEV_STATE_BOXED)) |
501 | return -EINVAL; | 556 | return -EINVAL; |
502 | sch = to_subchannel(cdev->dev.parent); | 557 | sch = to_subchannel(cdev->dev.parent); |
503 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); | 558 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, |
559 | (u32)(addr_t)sch); | ||
504 | if (ret != 0) | 560 | if (ret != 0) |
505 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 561 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
506 | return ret; | 562 | return ret; |
@@ -587,9 +643,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
587 | default: | 643 | default: |
588 | /* Reset oper notify indication after verify error. */ | 644 | /* Reset oper notify indication after verify error. */ |
589 | cdev->private->flags.donotify = 0; | 645 | cdev->private->flags.donotify = 0; |
590 | if (cdev->online) | 646 | if (cdev->online) { |
647 | ccw_device_set_timeout(cdev, 0); | ||
591 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 648 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
592 | else | 649 | } else |
593 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 650 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
594 | break; | 651 | break; |
595 | } | 652 | } |
@@ -610,7 +667,8 @@ ccw_device_online(struct ccw_device *cdev) | |||
610 | sch = to_subchannel(cdev->dev.parent); | 667 | sch = to_subchannel(cdev->dev.parent); |
611 | if (css_init_done && !get_device(&cdev->dev)) | 668 | if (css_init_done && !get_device(&cdev->dev)) |
612 | return -ENODEV; | 669 | return -ENODEV; |
613 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); | 670 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, |
671 | (u32)(addr_t)sch); | ||
614 | if (ret != 0) { | 672 | if (ret != 0) { |
615 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 673 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
616 | if (ret == -ENODEV) | 674 | if (ret == -ENODEV) |
@@ -937,7 +995,7 @@ void device_kill_io(struct subchannel *sch) | |||
937 | int ret; | 995 | int ret; |
938 | struct ccw_device *cdev; | 996 | struct ccw_device *cdev; |
939 | 997 | ||
940 | cdev = sch->dev.driver_data; | 998 | cdev = sch_get_cdev(sch); |
941 | ret = ccw_device_cancel_halt_clear(cdev); | 999 | ret = ccw_device_cancel_halt_clear(cdev); |
942 | if (ret == -EBUSY) { | 1000 | if (ret == -EBUSY) { |
943 | ccw_device_set_timeout(cdev, 3*HZ); | 1001 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -990,7 +1048,8 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
990 | struct subchannel *sch; | 1048 | struct subchannel *sch; |
991 | 1049 | ||
992 | sch = to_subchannel(cdev->dev.parent); | 1050 | sch = to_subchannel(cdev->dev.parent); |
993 | if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) | 1051 | if (cio_enable_subchannel(sch, sch->schib.pmcw.isc, |
1052 | (u32)(addr_t)sch) != 0) | ||
994 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 1053 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
995 | return; | 1054 | return; |
996 | 1055 | ||
@@ -1006,9 +1065,9 @@ device_trigger_reprobe(struct subchannel *sch) | |||
1006 | { | 1065 | { |
1007 | struct ccw_device *cdev; | 1066 | struct ccw_device *cdev; |
1008 | 1067 | ||
1009 | if (!sch->dev.driver_data) | 1068 | cdev = sch_get_cdev(sch); |
1069 | if (!cdev) | ||
1010 | return; | 1070 | return; |
1011 | cdev = sch->dev.driver_data; | ||
1012 | if (cdev->private->state != DEV_STATE_DISCONNECTED) | 1071 | if (cdev->private->state != DEV_STATE_DISCONNECTED) |
1013 | return; | 1072 | return; |
1014 | 1073 | ||
@@ -1028,7 +1087,7 @@ device_trigger_reprobe(struct subchannel *sch) | |||
1028 | sch->schib.pmcw.ena = 0; | 1087 | sch->schib.pmcw.ena = 0; |
1029 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1088 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
1030 | sch->schib.pmcw.mp = 1; | 1089 | sch->schib.pmcw.mp = 1; |
1031 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | 1090 | sch->schib.pmcw.intparm = (u32)(addr_t)sch; |
1032 | /* We should also udate ssd info, but this has to wait. */ | 1091 | /* We should also udate ssd info, but this has to wait. */ |
1033 | /* Check if this is another device which appeared on the same sch. */ | 1092 | /* Check if this is another device which appeared on the same sch. */ |
1034 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1093 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
@@ -1223,21 +1282,4 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | |||
1223 | }, | 1282 | }, |
1224 | }; | 1283 | }; |
1225 | 1284 | ||
1226 | /* | ||
1227 | * io_subchannel_irq is called for "real" interrupts or for status | ||
1228 | * pending conditions on msch. | ||
1229 | */ | ||
1230 | void | ||
1231 | io_subchannel_irq (struct device *pdev) | ||
1232 | { | ||
1233 | struct ccw_device *cdev; | ||
1234 | |||
1235 | cdev = to_subchannel(pdev)->dev.driver_data; | ||
1236 | |||
1237 | CIO_TRACE_EVENT (3, "IRQ"); | ||
1238 | CIO_TRACE_EVENT (3, pdev->bus_id); | ||
1239 | if (cdev) | ||
1240 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | ||
1241 | } | ||
1242 | |||
1243 | EXPORT_SYMBOL_GPL(ccw_device_set_timeout); | 1285 | EXPORT_SYMBOL_GPL(ccw_device_set_timeout); |
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index 156f3f9786b5..918b8b89cf9a 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "css.h" | 24 | #include "css.h" |
25 | #include "device.h" | 25 | #include "device.h" |
26 | #include "ioasm.h" | 26 | #include "ioasm.h" |
27 | #include "io_sch.h" | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Input : | 30 | * Input : |
@@ -219,11 +220,13 @@ ccw_device_check_sense_id(struct ccw_device *cdev) | |||
219 | return -EAGAIN; | 220 | return -EAGAIN; |
220 | } | 221 | } |
221 | if (irb->scsw.cc == 3) { | 222 | if (irb->scsw.cc == 3) { |
222 | if ((sch->orb.lpm & | 223 | u8 lpm; |
223 | sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) | 224 | |
225 | lpm = to_io_private(sch)->orb.lpm; | ||
226 | if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) | ||
224 | CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " | 227 | CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " |
225 | "on subchannel 0.%x.%04x is " | 228 | "on subchannel 0.%x.%04x is " |
226 | "'not operational'\n", sch->orb.lpm, | 229 | "'not operational'\n", lpm, |
227 | cdev->private->dev_id.devno, | 230 | cdev->private->dev_id.devno, |
228 | sch->schid.ssid, sch->schid.sch_no); | 231 | sch->schid.ssid, sch->schid.sch_no); |
229 | return -EACCES; | 232 | return -EACCES; |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 7fd2dadc3297..49b58eb0fab8 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -501,7 +501,7 @@ ccw_device_stlck(struct ccw_device *cdev) | |||
501 | return -ENOMEM; | 501 | return -ENOMEM; |
502 | } | 502 | } |
503 | spin_lock_irqsave(sch->lock, flags); | 503 | spin_lock_irqsave(sch->lock, flags); |
504 | ret = cio_enable_subchannel(sch, 3); | 504 | ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch); |
505 | if (ret) | 505 | if (ret) |
506 | goto out_unlock; | 506 | goto out_unlock; |
507 | /* | 507 | /* |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index cb1879a96818..c52449a1f9fc 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "css.h" | 22 | #include "css.h" |
23 | #include "device.h" | 23 | #include "device.h" |
24 | #include "ioasm.h" | 24 | #include "ioasm.h" |
25 | #include "io_sch.h" | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * Helper function called from interrupt context to decide whether an | 28 | * Helper function called from interrupt context to decide whether an |
@@ -155,10 +156,13 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
155 | return -EAGAIN; | 156 | return -EAGAIN; |
156 | } | 157 | } |
157 | if (irb->scsw.cc == 3) { | 158 | if (irb->scsw.cc == 3) { |
159 | u8 lpm; | ||
160 | |||
161 | lpm = to_io_private(sch)->orb.lpm; | ||
158 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," | 162 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," |
159 | " lpm %02X, became 'not operational'\n", | 163 | " lpm %02X, became 'not operational'\n", |
160 | cdev->private->dev_id.devno, sch->schid.ssid, | 164 | cdev->private->dev_id.devno, sch->schid.ssid, |
161 | sch->schid.sch_no, sch->orb.lpm); | 165 | sch->schid.sch_no, lpm); |
162 | return -EACCES; | 166 | return -EACCES; |
163 | } | 167 | } |
164 | i = 8 - ffs(cdev->private->imask); | 168 | i = 8 - ffs(cdev->private->imask); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index aa96e6752592..ebe0848cfe33 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "css.h" | 20 | #include "css.h" |
21 | #include "device.h" | 21 | #include "device.h" |
22 | #include "ioasm.h" | 22 | #include "ioasm.h" |
23 | #include "io_sch.h" | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * Check for any kind of channel or interface control check but don't | 26 | * Check for any kind of channel or interface control check but don't |
@@ -310,6 +311,7 @@ int | |||
310 | ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | 311 | ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) |
311 | { | 312 | { |
312 | struct subchannel *sch; | 313 | struct subchannel *sch; |
314 | struct ccw1 *sense_ccw; | ||
313 | 315 | ||
314 | sch = to_subchannel(cdev->dev.parent); | 316 | sch = to_subchannel(cdev->dev.parent); |
315 | 317 | ||
@@ -326,15 +328,16 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | |||
326 | /* | 328 | /* |
327 | * We have ending status but no sense information. Do a basic sense. | 329 | * We have ending status but no sense information. Do a basic sense. |
328 | */ | 330 | */ |
329 | sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; | 331 | sense_ccw = &to_io_private(sch)->sense_ccw; |
330 | sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); | 332 | sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; |
331 | sch->sense_ccw.count = SENSE_MAX_COUNT; | 333 | sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw); |
332 | sch->sense_ccw.flags = CCW_FLAG_SLI; | 334 | sense_ccw->count = SENSE_MAX_COUNT; |
335 | sense_ccw->flags = CCW_FLAG_SLI; | ||
333 | 336 | ||
334 | /* Reset internal retry indication. */ | 337 | /* Reset internal retry indication. */ |
335 | cdev->private->flags.intretry = 0; | 338 | cdev->private->flags.intretry = 0; |
336 | 339 | ||
337 | return cio_start (sch, &sch->sense_ccw, 0xff); | 340 | return cio_start(sch, sense_ccw, 0xff); |
338 | } | 341 | } |
339 | 342 | ||
340 | /* | 343 | /* |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h new file mode 100644 index 000000000000..8c613160bfce --- /dev/null +++ b/drivers/s390/cio/io_sch.h | |||
@@ -0,0 +1,163 @@ | |||
1 | #ifndef S390_IO_SCH_H | ||
2 | #define S390_IO_SCH_H | ||
3 | |||
4 | #include "schid.h" | ||
5 | |||
6 | /* | ||
7 | * operation request block | ||
8 | */ | ||
9 | struct orb { | ||
10 | u32 intparm; /* interruption parameter */ | ||
11 | u32 key : 4; /* flags, like key, suspend control, etc. */ | ||
12 | u32 spnd : 1; /* suspend control */ | ||
13 | u32 res1 : 1; /* reserved */ | ||
14 | u32 mod : 1; /* modification control */ | ||
15 | u32 sync : 1; /* synchronize control */ | ||
16 | u32 fmt : 1; /* format control */ | ||
17 | u32 pfch : 1; /* prefetch control */ | ||
18 | u32 isic : 1; /* initial-status-interruption control */ | ||
19 | u32 alcc : 1; /* address-limit-checking control */ | ||
20 | u32 ssic : 1; /* suppress-suspended-interr. control */ | ||
21 | u32 res2 : 1; /* reserved */ | ||
22 | u32 c64 : 1; /* IDAW/QDIO 64 bit control */ | ||
23 | u32 i2k : 1; /* IDAW 2/4kB block size control */ | ||
24 | u32 lpm : 8; /* logical path mask */ | ||
25 | u32 ils : 1; /* incorrect length */ | ||
26 | u32 zero : 6; /* reserved zeros */ | ||
27 | u32 orbx : 1; /* ORB extension control */ | ||
28 | u32 cpa; /* channel program address */ | ||
29 | } __attribute__ ((packed, aligned(4))); | ||
30 | |||
31 | struct io_subchannel_private { | ||
32 | struct orb orb; /* operation request block */ | ||
33 | struct ccw1 sense_ccw; /* static ccw for sense command */ | ||
34 | } __attribute__ ((aligned(8))); | ||
35 | |||
36 | #define to_io_private(n) ((struct io_subchannel_private *)n->private) | ||
37 | #define sch_get_cdev(n) (dev_get_drvdata(&n->dev)) | ||
38 | #define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c)) | ||
39 | |||
40 | #define MAX_CIWS 8 | ||
41 | |||
42 | /* | ||
43 | * sense-id response buffer layout | ||
44 | */ | ||
45 | struct senseid { | ||
46 | /* common part */ | ||
47 | u8 reserved; /* always 0x'FF' */ | ||
48 | u16 cu_type; /* control unit type */ | ||
49 | u8 cu_model; /* control unit model */ | ||
50 | u16 dev_type; /* device type */ | ||
51 | u8 dev_model; /* device model */ | ||
52 | u8 unused; /* padding byte */ | ||
53 | /* extended part */ | ||
54 | struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ | ||
55 | } __attribute__ ((packed, aligned(4))); | ||
56 | |||
57 | struct ccw_device_private { | ||
58 | struct ccw_device *cdev; | ||
59 | struct subchannel *sch; | ||
60 | int state; /* device state */ | ||
61 | atomic_t onoff; | ||
62 | unsigned long registered; | ||
63 | struct ccw_dev_id dev_id; /* device id */ | ||
64 | struct subchannel_id schid; /* subchannel number */ | ||
65 | u8 imask; /* lpm mask for SNID/SID/SPGID */ | ||
66 | int iretry; /* retry counter SNID/SID/SPGID */ | ||
67 | struct { | ||
68 | unsigned int fast:1; /* post with "channel end" */ | ||
69 | unsigned int repall:1; /* report every interrupt status */ | ||
70 | unsigned int pgroup:1; /* do path grouping */ | ||
71 | unsigned int force:1; /* allow forced online */ | ||
72 | } __attribute__ ((packed)) options; | ||
73 | struct { | ||
74 | unsigned int pgid_single:1; /* use single path for Set PGID */ | ||
75 | unsigned int esid:1; /* Ext. SenseID supported by HW */ | ||
76 | unsigned int dosense:1; /* delayed SENSE required */ | ||
77 | unsigned int doverify:1; /* delayed path verification */ | ||
78 | unsigned int donotify:1; /* call notify function */ | ||
79 | unsigned int recog_done:1; /* dev. recog. complete */ | ||
80 | unsigned int fake_irb:1; /* deliver faked irb */ | ||
81 | unsigned int intretry:1; /* retry internal operation */ | ||
82 | } __attribute__((packed)) flags; | ||
83 | unsigned long intparm; /* user interruption parameter */ | ||
84 | struct qdio_irq *qdio_data; | ||
85 | struct irb irb; /* device status */ | ||
86 | struct senseid senseid; /* SenseID info */ | ||
87 | struct pgid pgid[8]; /* path group IDs per chpid*/ | ||
88 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | ||
89 | struct work_struct kick_work; | ||
90 | wait_queue_head_t wait_q; | ||
91 | struct timer_list timer; | ||
92 | void *cmb; /* measurement information */ | ||
93 | struct list_head cmb_list; /* list of measured devices */ | ||
94 | u64 cmb_start_time; /* clock value of cmb reset */ | ||
95 | void *cmb_wait; /* deferred cmb enable/disable */ | ||
96 | }; | ||
97 | |||
98 | static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) | ||
99 | { | ||
100 | register struct subchannel_id reg1 asm("1") = schid; | ||
101 | int ccode; | ||
102 | |||
103 | asm volatile( | ||
104 | " ssch 0(%2)\n" | ||
105 | " ipm %0\n" | ||
106 | " srl %0,28" | ||
107 | : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | ||
108 | return ccode; | ||
109 | } | ||
110 | |||
111 | static inline int rsch(struct subchannel_id schid) | ||
112 | { | ||
113 | register struct subchannel_id reg1 asm("1") = schid; | ||
114 | int ccode; | ||
115 | |||
116 | asm volatile( | ||
117 | " rsch\n" | ||
118 | " ipm %0\n" | ||
119 | " srl %0,28" | ||
120 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
121 | return ccode; | ||
122 | } | ||
123 | |||
124 | static inline int csch(struct subchannel_id schid) | ||
125 | { | ||
126 | register struct subchannel_id reg1 asm("1") = schid; | ||
127 | int ccode; | ||
128 | |||
129 | asm volatile( | ||
130 | " csch\n" | ||
131 | " ipm %0\n" | ||
132 | " srl %0,28" | ||
133 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
134 | return ccode; | ||
135 | } | ||
136 | |||
137 | static inline int hsch(struct subchannel_id schid) | ||
138 | { | ||
139 | register struct subchannel_id reg1 asm("1") = schid; | ||
140 | int ccode; | ||
141 | |||
142 | asm volatile( | ||
143 | " hsch\n" | ||
144 | " ipm %0\n" | ||
145 | " srl %0,28" | ||
146 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
147 | return ccode; | ||
148 | } | ||
149 | |||
150 | static inline int xsch(struct subchannel_id schid) | ||
151 | { | ||
152 | register struct subchannel_id reg1 asm("1") = schid; | ||
153 | int ccode; | ||
154 | |||
155 | asm volatile( | ||
156 | " .insn rre,0xb2760000,%1,0\n" | ||
157 | " ipm %0\n" | ||
158 | " srl %0,28" | ||
159 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
160 | return ccode; | ||
161 | } | ||
162 | |||
163 | #endif | ||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 7153dd959082..652ea3625f9d 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h | |||
@@ -109,72 +109,6 @@ static inline int tpi( volatile struct tpi_info *addr) | |||
109 | return ccode; | 109 | return ccode; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int ssch(struct subchannel_id schid, | ||
113 | volatile struct orb *addr) | ||
114 | { | ||
115 | register struct subchannel_id reg1 asm ("1") = schid; | ||
116 | int ccode; | ||
117 | |||
118 | asm volatile( | ||
119 | " ssch 0(%2)\n" | ||
120 | " ipm %0\n" | ||
121 | " srl %0,28" | ||
122 | : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); | ||
123 | return ccode; | ||
124 | } | ||
125 | |||
126 | static inline int rsch(struct subchannel_id schid) | ||
127 | { | ||
128 | register struct subchannel_id reg1 asm ("1") = schid; | ||
129 | int ccode; | ||
130 | |||
131 | asm volatile( | ||
132 | " rsch\n" | ||
133 | " ipm %0\n" | ||
134 | " srl %0,28" | ||
135 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
136 | return ccode; | ||
137 | } | ||
138 | |||
139 | static inline int csch(struct subchannel_id schid) | ||
140 | { | ||
141 | register struct subchannel_id reg1 asm ("1") = schid; | ||
142 | int ccode; | ||
143 | |||
144 | asm volatile( | ||
145 | " csch\n" | ||
146 | " ipm %0\n" | ||
147 | " srl %0,28" | ||
148 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
149 | return ccode; | ||
150 | } | ||
151 | |||
152 | static inline int hsch(struct subchannel_id schid) | ||
153 | { | ||
154 | register struct subchannel_id reg1 asm ("1") = schid; | ||
155 | int ccode; | ||
156 | |||
157 | asm volatile( | ||
158 | " hsch\n" | ||
159 | " ipm %0\n" | ||
160 | " srl %0,28" | ||
161 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
162 | return ccode; | ||
163 | } | ||
164 | |||
165 | static inline int xsch(struct subchannel_id schid) | ||
166 | { | ||
167 | register struct subchannel_id reg1 asm ("1") = schid; | ||
168 | int ccode; | ||
169 | |||
170 | asm volatile( | ||
171 | " .insn rre,0xb2760000,%1,0\n" | ||
172 | " ipm %0\n" | ||
173 | " srl %0,28" | ||
174 | : "=d" (ccode) : "d" (reg1) : "cc"); | ||
175 | return ccode; | ||
176 | } | ||
177 | |||
178 | static inline int chsc(void *chsc_area) | 112 | static inline int chsc(void *chsc_area) |
179 | { | 113 | { |
180 | typedef struct { char _[4096]; } addr_type; | 114 | typedef struct { char _[4096]; } addr_type; |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 40a3208c7cf3..e2a781b6b21d 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -48,11 +48,11 @@ | |||
48 | #include <asm/debug.h> | 48 | #include <asm/debug.h> |
49 | #include <asm/s390_rdev.h> | 49 | #include <asm/s390_rdev.h> |
50 | #include <asm/qdio.h> | 50 | #include <asm/qdio.h> |
51 | #include <asm/airq.h> | ||
51 | 52 | ||
52 | #include "cio.h" | 53 | #include "cio.h" |
53 | #include "css.h" | 54 | #include "css.h" |
54 | #include "device.h" | 55 | #include "device.h" |
55 | #include "airq.h" | ||
56 | #include "qdio.h" | 56 | #include "qdio.h" |
57 | #include "ioasm.h" | 57 | #include "ioasm.h" |
58 | #include "chsc.h" | 58 | #include "chsc.h" |
@@ -96,7 +96,7 @@ static debug_info_t *qdio_dbf_slsb_in; | |||
96 | static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change | 96 | static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change |
97 | during a while loop */ | 97 | during a while loop */ |
98 | static DEFINE_SPINLOCK(ttiq_list_lock); | 98 | static DEFINE_SPINLOCK(ttiq_list_lock); |
99 | static int register_thinint_result; | 99 | static void *tiqdio_ind; |
100 | static void tiqdio_tl(unsigned long); | 100 | static void tiqdio_tl(unsigned long); |
101 | static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); | 101 | static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); |
102 | 102 | ||
@@ -399,7 +399,7 @@ qdio_get_indicator(void) | |||
399 | { | 399 | { |
400 | int i; | 400 | int i; |
401 | 401 | ||
402 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) | 402 | for (i = 0; i < INDICATORS_PER_CACHELINE; i++) |
403 | if (!indicator_used[i]) { | 403 | if (!indicator_used[i]) { |
404 | indicator_used[i]=1; | 404 | indicator_used[i]=1; |
405 | return indicators+i; | 405 | return indicators+i; |
@@ -1408,8 +1408,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) | |||
1408 | if (q->hydra_gives_outbound_pcis) { | 1408 | if (q->hydra_gives_outbound_pcis) { |
1409 | if (!q->siga_sync_done_on_thinints) { | 1409 | if (!q->siga_sync_done_on_thinints) { |
1410 | SYNC_MEMORY_ALL; | 1410 | SYNC_MEMORY_ALL; |
1411 | } else if ((!q->siga_sync_done_on_outb_tis)&& | 1411 | } else if (!q->siga_sync_done_on_outb_tis) { |
1412 | (q->hydra_gives_outbound_pcis)) { | ||
1413 | SYNC_MEMORY_ALL_OUTB; | 1412 | SYNC_MEMORY_ALL_OUTB; |
1414 | } | 1413 | } |
1415 | } else { | 1414 | } else { |
@@ -1911,8 +1910,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr, | |||
1911 | } | 1910 | } |
1912 | } | 1911 | } |
1913 | 1912 | ||
1914 | static int | 1913 | static void tiqdio_thinint_handler(void *ind, void *drv_data) |
1915 | tiqdio_thinint_handler(void) | ||
1916 | { | 1914 | { |
1917 | QDIO_DBF_TEXT4(0,trace,"thin_int"); | 1915 | QDIO_DBF_TEXT4(0,trace,"thin_int"); |
1918 | 1916 | ||
@@ -1925,7 +1923,6 @@ tiqdio_thinint_handler(void) | |||
1925 | tiqdio_clear_global_summary(); | 1923 | tiqdio_clear_global_summary(); |
1926 | 1924 | ||
1927 | tiqdio_inbound_checks(); | 1925 | tiqdio_inbound_checks(); |
1928 | return 0; | ||
1929 | } | 1926 | } |
1930 | 1927 | ||
1931 | static void | 1928 | static void |
@@ -2445,7 +2442,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) | |||
2445 | real_addr_dev_st_chg_ind=0; | 2442 | real_addr_dev_st_chg_ind=0; |
2446 | } else { | 2443 | } else { |
2447 | real_addr_local_summary_bit= | 2444 | real_addr_local_summary_bit= |
2448 | virt_to_phys((volatile void *)indicators); | 2445 | virt_to_phys((volatile void *)tiqdio_ind); |
2449 | real_addr_dev_st_chg_ind= | 2446 | real_addr_dev_st_chg_ind= |
2450 | virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); | 2447 | virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); |
2451 | } | 2448 | } |
@@ -3740,23 +3737,25 @@ static void | |||
3740 | tiqdio_register_thinints(void) | 3737 | tiqdio_register_thinints(void) |
3741 | { | 3738 | { |
3742 | char dbf_text[20]; | 3739 | char dbf_text[20]; |
3743 | register_thinint_result= | 3740 | |
3744 | s390_register_adapter_interrupt(&tiqdio_thinint_handler); | 3741 | tiqdio_ind = |
3745 | if (register_thinint_result) { | 3742 | s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); |
3746 | sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff)); | 3743 | if (IS_ERR(tiqdio_ind)) { |
3744 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); | ||
3747 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 3745 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
3748 | QDIO_PRINT_ERR("failed to register adapter handler " \ | 3746 | QDIO_PRINT_ERR("failed to register adapter handler " \ |
3749 | "(rc=%i).\nAdapter interrupts might " \ | 3747 | "(rc=%li).\nAdapter interrupts might " \ |
3750 | "not work. Continuing.\n", | 3748 | "not work. Continuing.\n", |
3751 | register_thinint_result); | 3749 | PTR_ERR(tiqdio_ind)); |
3750 | tiqdio_ind = NULL; | ||
3752 | } | 3751 | } |
3753 | } | 3752 | } |
3754 | 3753 | ||
3755 | static void | 3754 | static void |
3756 | tiqdio_unregister_thinints(void) | 3755 | tiqdio_unregister_thinints(void) |
3757 | { | 3756 | { |
3758 | if (!register_thinint_result) | 3757 | if (tiqdio_ind) |
3759 | s390_unregister_adapter_interrupt(&tiqdio_thinint_handler); | 3758 | s390_unregister_adapter_interrupt(tiqdio_ind); |
3760 | } | 3759 | } |
3761 | 3760 | ||
3762 | static int | 3761 | static int |
@@ -3768,8 +3767,8 @@ qdio_get_qdio_memory(void) | |||
3768 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) | 3767 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) |
3769 | indicator_used[i]=0; | 3768 | indicator_used[i]=0; |
3770 | indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), | 3769 | indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), |
3771 | GFP_KERNEL); | 3770 | GFP_KERNEL); |
3772 | if (!indicators) | 3771 | if (!indicators) |
3773 | return -ENOMEM; | 3772 | return -ENOMEM; |
3774 | return 0; | 3773 | return 0; |
3775 | } | 3774 | } |
@@ -3780,7 +3779,6 @@ qdio_release_qdio_memory(void) | |||
3780 | kfree(indicators); | 3779 | kfree(indicators); |
3781 | } | 3780 | } |
3782 | 3781 | ||
3783 | |||
3784 | static void | 3782 | static void |
3785 | qdio_unregister_dbf_views(void) | 3783 | qdio_unregister_dbf_views(void) |
3786 | { | 3784 | { |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 6d7aad18f6f0..37870e4e938e 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -57,7 +57,7 @@ | |||
57 | of the queue to 0 */ | 57 | of the queue to 0 */ |
58 | 58 | ||
59 | #define QDIO_ESTABLISH_TIMEOUT (1*HZ) | 59 | #define QDIO_ESTABLISH_TIMEOUT (1*HZ) |
60 | #define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) | 60 | #define QDIO_ACTIVATE_TIMEOUT (5*HZ) |
61 | #define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) | 61 | #define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) |
62 | #define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) | 62 | #define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) |
63 | #define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) | 63 | #define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 3561982749e3..c3076217871e 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -2416,7 +2416,7 @@ init_ccw_bk(struct net_device *dev) | |||
2416 | privptr->p_buff_pages_perwrite); | 2416 | privptr->p_buff_pages_perwrite); |
2417 | #endif | 2417 | #endif |
2418 | if (p_buff==NULL) { | 2418 | if (p_buff==NULL) { |
2419 | printk(KERN_INFO "%s:%s __get_free_pages" | 2419 | printk(KERN_INFO "%s:%s __get_free_pages " |
2420 | "for writes buf failed : get is for %d pages\n", | 2420 | "for writes buf failed : get is for %d pages\n", |
2421 | dev->name, | 2421 | dev->name, |
2422 | __FUNCTION__, | 2422 | __FUNCTION__, |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0fd663b23d76..7bfe8d707a34 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1115,7 +1115,7 @@ list_modified: | |||
1115 | rc = lcs_send_setipm(card, ipm); | 1115 | rc = lcs_send_setipm(card, ipm); |
1116 | spin_lock_irqsave(&card->ipm_lock, flags); | 1116 | spin_lock_irqsave(&card->ipm_lock, flags); |
1117 | if (rc) { | 1117 | if (rc) { |
1118 | PRINT_INFO("Adding multicast address failed." | 1118 | PRINT_INFO("Adding multicast address failed. " |
1119 | "Table possibly full!\n"); | 1119 | "Table possibly full!\n"); |
1120 | /* store ipm in failed list -> will be added | 1120 | /* store ipm in failed list -> will be added |
1121 | * to ipm_list again, so a retry will be done | 1121 | * to ipm_list again, so a retry will be done |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index d6e93f15440e..f3d893cfe61d 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -198,8 +198,7 @@ struct iucv_connection { | |||
198 | /** | 198 | /** |
199 | * Linked list of all connection structs. | 199 | * Linked list of all connection structs. |
200 | */ | 200 | */ |
201 | static struct list_head iucv_connection_list = | 201 | static LIST_HEAD(iucv_connection_list); |
202 | LIST_HEAD_INIT(iucv_connection_list); | ||
203 | static DEFINE_RWLOCK(iucv_connection_rwlock); | 202 | static DEFINE_RWLOCK(iucv_connection_rwlock); |
204 | 203 | ||
205 | /** | 204 | /** |
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c index f1ff165a5e05..46ecd03a597e 100644 --- a/drivers/s390/net/qeth_proc.c +++ b/drivers/s390/net/qeth_proc.c | |||
@@ -146,7 +146,7 @@ qeth_procfile_seq_show(struct seq_file *s, void *it) | |||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
149 | static struct seq_operations qeth_procfile_seq_ops = { | 149 | static const struct seq_operations qeth_procfile_seq_ops = { |
150 | .start = qeth_procfile_seq_start, | 150 | .start = qeth_procfile_seq_start, |
151 | .stop = qeth_procfile_seq_stop, | 151 | .stop = qeth_procfile_seq_stop, |
152 | .next = qeth_procfile_seq_next, | 152 | .next = qeth_procfile_seq_next, |
@@ -264,7 +264,7 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it) | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static struct seq_operations qeth_perf_procfile_seq_ops = { | 267 | static const struct seq_operations qeth_perf_procfile_seq_ops = { |
268 | .start = qeth_procfile_seq_start, | 268 | .start = qeth_procfile_seq_start, |
269 | .stop = qeth_procfile_seq_stop, | 269 | .stop = qeth_procfile_seq_stop, |
270 | .next = qeth_procfile_seq_next, | 270 | .next = qeth_procfile_seq_next, |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 47bb47b48581..8735a415a116 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); | |||
42 | static struct iucv_path *smsg_path; | 42 | static struct iucv_path *smsg_path; |
43 | 43 | ||
44 | static DEFINE_SPINLOCK(smsg_list_lock); | 44 | static DEFINE_SPINLOCK(smsg_list_lock); |
45 | static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); | 45 | static LIST_HEAD(smsg_list); |
46 | 46 | ||
47 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); | 47 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); |
48 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); | 48 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 4f86c0e12961..2dc8110ebf74 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -1286,7 +1286,7 @@ zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) | |||
1286 | * note: no lock in subsequent strategy routines | 1286 | * note: no lock in subsequent strategy routines |
1287 | * (this allows these routine to call schedule, e.g. | 1287 | * (this allows these routine to call schedule, e.g. |
1288 | * kmalloc with such flags or qdio_initialize & friends) | 1288 | * kmalloc with such flags or qdio_initialize & friends) |
1289 | * Note: in case of timeout, the seperate strategies will fail | 1289 | * Note: in case of timeout, the separate strategies will fail |
1290 | * anyhow. No need for a special action. Even worse, a nameserver | 1290 | * anyhow. No need for a special action. Even worse, a nameserver |
1291 | * failure would not wake up waiting ports without the call. | 1291 | * failure would not wake up waiting ports without the call. |
1292 | */ | 1292 | */ |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index fe57941ab55d..e45f85f7c7ed 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -502,7 +502,7 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req) | |||
502 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 502 | fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
503 | break; | 503 | break; |
504 | case FSF_SQ_NO_RECOM: | 504 | case FSF_SQ_NO_RECOM: |
505 | ZFCP_LOG_NORMAL("bug: No recommendation could be given for a" | 505 | ZFCP_LOG_NORMAL("bug: No recommendation could be given for a " |
506 | "problem on the adapter %s " | 506 | "problem on the adapter %s " |
507 | "Stopping all operations on this adapter. ", | 507 | "Stopping all operations on this adapter. ", |
508 | zfcp_get_busid_by_adapter(fsf_req->adapter)); | 508 | zfcp_get_busid_by_adapter(fsf_req->adapter)); |
@@ -813,7 +813,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req) | |||
813 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); | 813 | read_unlock_irqrestore(&zfcp_data.config_lock, flags); |
814 | 814 | ||
815 | if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { | 815 | if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { |
816 | ZFCP_LOG_NORMAL("bug: Reopen port indication received for" | 816 | ZFCP_LOG_NORMAL("bug: Reopen port indication received for " |
817 | "nonexisting port with d_id 0x%06x on " | 817 | "nonexisting port with d_id 0x%06x on " |
818 | "adapter %s. Ignored.\n", | 818 | "adapter %s. Ignored.\n", |
819 | status_buffer->d_id & ZFCP_DID_MASK, | 819 | status_buffer->d_id & ZFCP_DID_MASK, |
@@ -2281,7 +2281,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) | |||
2281 | &lock_flags, &fsf_req); | 2281 | &lock_flags, &fsf_req); |
2282 | if (retval) { | 2282 | if (retval) { |
2283 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " | 2283 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " |
2284 | "exchange port data request for" | 2284 | "exchange port data request for " |
2285 | "the adapter %s.\n", | 2285 | "the adapter %s.\n", |
2286 | zfcp_get_busid_by_adapter(adapter)); | 2286 | zfcp_get_busid_by_adapter(adapter)); |
2287 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 2287 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, |
@@ -2340,7 +2340,7 @@ zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, | |||
2340 | 0, NULL, &lock_flags, &fsf_req); | 2340 | 0, NULL, &lock_flags, &fsf_req); |
2341 | if (retval) { | 2341 | if (retval) { |
2342 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " | 2342 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " |
2343 | "exchange port data request for" | 2343 | "exchange port data request for " |
2344 | "the adapter %s.\n", | 2344 | "the adapter %s.\n", |
2345 | zfcp_get_busid_by_adapter(adapter)); | 2345 | zfcp_get_busid_by_adapter(adapter)); |
2346 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, | 2346 | write_unlock_irqrestore(&adapter->request_queue.queue_lock, |
@@ -4725,7 +4725,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4725 | /* allocate new FSF request */ | 4725 | /* allocate new FSF request */ |
4726 | fsf_req = zfcp_fsf_req_alloc(pool, req_flags); | 4726 | fsf_req = zfcp_fsf_req_alloc(pool, req_flags); |
4727 | if (unlikely(NULL == fsf_req)) { | 4727 | if (unlikely(NULL == fsf_req)) { |
4728 | ZFCP_LOG_DEBUG("error: Could not put an FSF request into" | 4728 | ZFCP_LOG_DEBUG("error: Could not put an FSF request into " |
4729 | "the outbound (send) queue.\n"); | 4729 | "the outbound (send) queue.\n"); |
4730 | ret = -ENOMEM; | 4730 | ret = -ENOMEM; |
4731 | goto failed_fsf_req; | 4731 | goto failed_fsf_req; |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 51d92b196ee7..22fdc17e0d0e 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -529,7 +529,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | |||
529 | 529 | ||
530 | 530 | ||
531 | /** | 531 | /** |
532 | * zfcp_qdio_sbale_fill - set address and lenght in current SBALE | 532 | * zfcp_qdio_sbale_fill - set address and length in current SBALE |
533 | * on request_queue | 533 | * on request_queue |
534 | */ | 534 | */ |
535 | static void | 535 | static void |
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c index facb67855619..6a48dfa1efe8 100644 --- a/drivers/serial/21285.c +++ b/drivers/serial/21285.c | |||
@@ -277,6 +277,8 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios, | |||
277 | if (termios->c_iflag & INPCK) | 277 | if (termios->c_iflag & INPCK) |
278 | port->read_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY; | 278 | port->read_status_mask |= RXSTAT_FRAME | RXSTAT_PARITY; |
279 | 279 | ||
280 | tty_encode_baud_rate(tty, baud, baud); | ||
281 | |||
280 | /* | 282 | /* |
281 | * Which character status flags should we ignore? | 283 | * Which character status flags should we ignore? |
282 | */ | 284 | */ |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index 6f475b609864..ac2a3ef28d55 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -442,7 +442,8 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
442 | set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, | 442 | set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, |
443 | INTR_ON_BUF, | 443 | INTR_ON_BUF, |
444 | DIMENSION_LINEAR, | 444 | DIMENSION_LINEAR, |
445 | DATA_SIZE_8)); | 445 | DATA_SIZE_8, |
446 | DMA_SYNC_RESTART)); | ||
446 | set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); | 447 | set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); |
447 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); | 448 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); |
448 | set_dma_x_modify(uart->tx_dma_channel, 1); | 449 | set_dma_x_modify(uart->tx_dma_channel, 1); |
@@ -689,7 +690,8 @@ static int bfin_serial_startup(struct uart_port *port) | |||
689 | set_dma_config(uart->rx_dma_channel, | 690 | set_dma_config(uart->rx_dma_channel, |
690 | set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, | 691 | set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, |
691 | INTR_ON_ROW, DIMENSION_2D, | 692 | INTR_ON_ROW, DIMENSION_2D, |
692 | DATA_SIZE_8)); | 693 | DATA_SIZE_8, |
694 | DMA_SYNC_RESTART)); | ||
693 | set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); | 695 | set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); |
694 | set_dma_x_modify(uart->rx_dma_channel, 1); | 696 | set_dma_x_modify(uart->rx_dma_channel, 1); |
695 | set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); | 697 | set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 5cfa3d1c4413..74e1f4be10bb 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
@@ -47,7 +47,7 @@ | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_TPS65010 | 49 | #ifdef CONFIG_TPS65010 |
50 | #include <asm/arch/tps65010.h> | 50 | #include <linux/i2c/tps65010.h> |
51 | #else | 51 | #else |
52 | 52 | ||
53 | #define LOW 0 | 53 | #define LOW 0 |
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c index ca2a6abbc117..6c52c66b659f 100644 --- a/drivers/usb/host/ohci-pnx4008.c +++ b/drivers/usb/host/ohci-pnx4008.c | |||
@@ -112,9 +112,9 @@ static int isp1301_detach(struct i2c_client *client); | |||
112 | static int isp1301_command(struct i2c_client *client, unsigned int cmd, | 112 | static int isp1301_command(struct i2c_client *client, unsigned int cmd, |
113 | void *arg); | 113 | void *arg); |
114 | 114 | ||
115 | static unsigned short normal_i2c[] = | 115 | static const unsigned short normal_i2c[] = |
116 | { ISP1301_I2C_ADDR, ISP1301_I2C_ADDR + 1, I2C_CLIENT_END }; | 116 | { ISP1301_I2C_ADDR, ISP1301_I2C_ADDR + 1, I2C_CLIENT_END }; |
117 | static unsigned short dummy_i2c_addrlist[] = { I2C_CLIENT_END }; | 117 | static const unsigned short dummy_i2c_addrlist[] = { I2C_CLIENT_END }; |
118 | 118 | ||
119 | static struct i2c_client_address_data addr_data = { | 119 | static struct i2c_client_address_data addr_data = { |
120 | .normal_i2c = normal_i2c, | 120 | .normal_i2c = normal_i2c, |
@@ -123,7 +123,6 @@ static struct i2c_client_address_data addr_data = { | |||
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct i2c_driver isp1301_driver = { | 125 | struct i2c_driver isp1301_driver = { |
126 | .id = I2C_DRIVERID_I2CDEV, /* Fake Id */ | ||
127 | .class = I2C_CLASS_HWMON, | 126 | .class = I2C_CLASS_HWMON, |
128 | .attach_adapter = isp1301_probe, | 127 | .attach_adapter = isp1301_probe, |
129 | .detach_client = isp1301_detach, | 128 | .detach_client = isp1301_detach, |
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 74d11c318987..c8e7427a0bc8 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c | |||
@@ -224,7 +224,8 @@ static int config_dma(struct bfin_bf54xfb_info *fbi) | |||
224 | set_dma_config(CH_EPPI0, | 224 | set_dma_config(CH_EPPI0, |
225 | set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, | 225 | set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, |
226 | INTR_DISABLE, DIMENSION_2D, | 226 | INTR_DISABLE, DIMENSION_2D, |
227 | DATA_SIZE_32)); | 227 | DATA_SIZE_32, |
228 | DMA_NOSYNC_KEEP_DMA_BUF)); | ||
228 | set_dma_x_count(CH_EPPI0, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE); | 229 | set_dma_x_count(CH_EPPI0, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE); |
229 | set_dma_x_modify(CH_EPPI0, DMA_BUS_SIZE / 8); | 230 | set_dma_x_modify(CH_EPPI0, DMA_BUS_SIZE / 8); |
230 | set_dma_y_count(CH_EPPI0, LCD_Y_RES); | 231 | set_dma_y_count(CH_EPPI0, LCD_Y_RES); |
@@ -263,8 +264,7 @@ static int request_ports(struct bfin_bf54xfb_info *fbi) | |||
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
266 | gpio_direction_output(disp); | 267 | gpio_direction_output(disp, 1); |
267 | gpio_set_value(disp, 1); | ||
268 | 268 | ||
269 | return 0; | 269 | return 0; |
270 | } | 270 | } |
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c index 49cd53e46c0a..0cd58f84fb46 100644 --- a/drivers/video/matrox/matroxfb_maven.c +++ b/drivers/video/matrox/matroxfb_maven.c | |||
@@ -1232,7 +1232,7 @@ static int maven_shutdown_client(struct i2c_client* clnt) { | |||
1232 | return 0; | 1232 | return 0; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | static unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END }; | 1235 | static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END }; |
1236 | I2C_CLIENT_INSMOD; | 1236 | I2C_CLIENT_INSMOD; |
1237 | 1237 | ||
1238 | static struct i2c_driver maven_driver; | 1238 | static struct i2c_driver maven_driver; |
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c index c604d935c188..31e978349a80 100644 --- a/drivers/video/omap/lcd_h3.c +++ b/drivers/video/omap/lcd_h3.c | |||
@@ -21,9 +21,9 @@ | |||
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/i2c/tps65010.h> | ||
24 | 25 | ||
25 | #include <asm/arch/gpio.h> | 26 | #include <asm/arch/gpio.h> |
26 | #include <asm/arch/tps65010.h> | ||
27 | #include <asm/arch/omapfb.h> | 27 | #include <asm/arch/omapfb.h> |
28 | 28 | ||
29 | #define MODULE_NAME "omapfb-lcd_h3" | 29 | #define MODULE_NAME "omapfb-lcd_h3" |
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index d93eb626b2f0..0fd5820d5c61 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c | |||
@@ -29,7 +29,7 @@ | |||
29 | * However, the chip cannot be detected without doing an i2c write, | 29 | * However, the chip cannot be detected without doing an i2c write, |
30 | * so use the force module parameter. | 30 | * so use the force module parameter. |
31 | */ | 31 | */ |
32 | static unsigned short normal_i2c[] = {I2C_CLIENT_END}; | 32 | static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * Insmod parameters | 35 | * Insmod parameters |