diff options
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r-- | drivers/ata/libata-sff.c | 641 |
1 files changed, 180 insertions, 461 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 12c88c588039..16bc3e35bdd4 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -56,10 +56,7 @@ u8 ata_irq_on(struct ata_port *ap) | |||
56 | ap->ctl &= ~ATA_NIEN; | 56 | ap->ctl &= ~ATA_NIEN; |
57 | ap->last_ctl = ap->ctl; | 57 | ap->last_ctl = ap->ctl; |
58 | 58 | ||
59 | if (ap->flags & ATA_FLAG_MMIO) | 59 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
60 | writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); | ||
61 | else | ||
62 | outb(ap->ctl, ioaddr->ctl_addr); | ||
63 | tmp = ata_wait_idle(ap); | 60 | tmp = ata_wait_idle(ap); |
64 | 61 | ||
65 | ap->ops->irq_clear(ap); | 62 | ap->ops->irq_clear(ap); |
@@ -67,92 +64,74 @@ u8 ata_irq_on(struct ata_port *ap) | |||
67 | return tmp; | 64 | return tmp; |
68 | } | 65 | } |
69 | 66 | ||
67 | u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; } | ||
68 | |||
70 | /** | 69 | /** |
71 | * ata_tf_load_pio - send taskfile registers to host controller | 70 | * ata_irq_ack - Acknowledge a device interrupt. |
72 | * @ap: Port to which output is sent | 71 | * @ap: Port on which interrupts are enabled. |
73 | * @tf: ATA taskfile register set | ||
74 | * | 72 | * |
75 | * Outputs ATA taskfile to standard ATA host controller. | 73 | * Wait up to 10 ms for legacy IDE device to become idle (BUSY |
74 | * or BUSY+DRQ clear). Obtain dma status and port status from | ||
75 | * device. Clear the interrupt. Return port status. | ||
76 | * | 76 | * |
77 | * LOCKING: | 77 | * LOCKING: |
78 | * Inherited from caller. | ||
79 | */ | 78 | */ |
80 | 79 | ||
81 | static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) | 80 | u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) |
82 | { | 81 | { |
83 | struct ata_ioports *ioaddr = &ap->ioaddr; | 82 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; |
84 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | 83 | u8 host_stat, post_stat, status; |
85 | 84 | ||
86 | if (tf->ctl != ap->last_ctl) { | 85 | status = ata_busy_wait(ap, bits, 1000); |
87 | outb(tf->ctl, ioaddr->ctl_addr); | 86 | if (status & bits) |
88 | ap->last_ctl = tf->ctl; | 87 | if (ata_msg_err(ap)) |
89 | ata_wait_idle(ap); | 88 | printk(KERN_ERR "abnormal status 0x%X\n", status); |
90 | } | ||
91 | 89 | ||
92 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 90 | /* get controller status; clear intr, err bits */ |
93 | outb(tf->hob_feature, ioaddr->feature_addr); | 91 | host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
94 | outb(tf->hob_nsect, ioaddr->nsect_addr); | 92 | iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, |
95 | outb(tf->hob_lbal, ioaddr->lbal_addr); | 93 | ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
96 | outb(tf->hob_lbam, ioaddr->lbam_addr); | ||
97 | outb(tf->hob_lbah, ioaddr->lbah_addr); | ||
98 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | ||
99 | tf->hob_feature, | ||
100 | tf->hob_nsect, | ||
101 | tf->hob_lbal, | ||
102 | tf->hob_lbam, | ||
103 | tf->hob_lbah); | ||
104 | } | ||
105 | 94 | ||
106 | if (is_addr) { | 95 | post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
107 | outb(tf->feature, ioaddr->feature_addr); | ||
108 | outb(tf->nsect, ioaddr->nsect_addr); | ||
109 | outb(tf->lbal, ioaddr->lbal_addr); | ||
110 | outb(tf->lbam, ioaddr->lbam_addr); | ||
111 | outb(tf->lbah, ioaddr->lbah_addr); | ||
112 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | ||
113 | tf->feature, | ||
114 | tf->nsect, | ||
115 | tf->lbal, | ||
116 | tf->lbam, | ||
117 | tf->lbah); | ||
118 | } | ||
119 | 96 | ||
120 | if (tf->flags & ATA_TFLAG_DEVICE) { | 97 | if (ata_msg_intr(ap)) |
121 | outb(tf->device, ioaddr->device_addr); | 98 | printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", |
122 | VPRINTK("device 0x%X\n", tf->device); | 99 | __FUNCTION__, |
123 | } | 100 | host_stat, post_stat, status); |
124 | 101 | ||
125 | ata_wait_idle(ap); | 102 | return status; |
126 | } | 103 | } |
127 | 104 | ||
105 | u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; } | ||
106 | |||
128 | /** | 107 | /** |
129 | * ata_tf_load_mmio - send taskfile registers to host controller | 108 | * ata_tf_load - send taskfile registers to host controller |
130 | * @ap: Port to which output is sent | 109 | * @ap: Port to which output is sent |
131 | * @tf: ATA taskfile register set | 110 | * @tf: ATA taskfile register set |
132 | * | 111 | * |
133 | * Outputs ATA taskfile to standard ATA host controller using MMIO. | 112 | * Outputs ATA taskfile to standard ATA host controller. |
134 | * | 113 | * |
135 | * LOCKING: | 114 | * LOCKING: |
136 | * Inherited from caller. | 115 | * Inherited from caller. |
137 | */ | 116 | */ |
138 | 117 | ||
139 | static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 118 | void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
140 | { | 119 | { |
141 | struct ata_ioports *ioaddr = &ap->ioaddr; | 120 | struct ata_ioports *ioaddr = &ap->ioaddr; |
142 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | 121 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
143 | 122 | ||
144 | if (tf->ctl != ap->last_ctl) { | 123 | if (tf->ctl != ap->last_ctl) { |
145 | writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); | 124 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
146 | ap->last_ctl = tf->ctl; | 125 | ap->last_ctl = tf->ctl; |
147 | ata_wait_idle(ap); | 126 | ata_wait_idle(ap); |
148 | } | 127 | } |
149 | 128 | ||
150 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 129 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
151 | writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); | 130 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
152 | writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); | 131 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
153 | writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); | 132 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
154 | writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); | 133 | iowrite8(tf->hob_lbam, ioaddr->lbam_addr); |
155 | writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); | 134 | iowrite8(tf->hob_lbah, ioaddr->lbah_addr); |
156 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | 135 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", |
157 | tf->hob_feature, | 136 | tf->hob_feature, |
158 | tf->hob_nsect, | 137 | tf->hob_nsect, |
@@ -162,11 +141,11 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | |||
162 | } | 141 | } |
163 | 142 | ||
164 | if (is_addr) { | 143 | if (is_addr) { |
165 | writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); | 144 | iowrite8(tf->feature, ioaddr->feature_addr); |
166 | writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); | 145 | iowrite8(tf->nsect, ioaddr->nsect_addr); |
167 | writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); | 146 | iowrite8(tf->lbal, ioaddr->lbal_addr); |
168 | writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); | 147 | iowrite8(tf->lbam, ioaddr->lbam_addr); |
169 | writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); | 148 | iowrite8(tf->lbah, ioaddr->lbah_addr); |
170 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | 149 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", |
171 | tf->feature, | 150 | tf->feature, |
172 | tf->nsect, | 151 | tf->nsect, |
@@ -176,108 +155,34 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | |||
176 | } | 155 | } |
177 | 156 | ||
178 | if (tf->flags & ATA_TFLAG_DEVICE) { | 157 | if (tf->flags & ATA_TFLAG_DEVICE) { |
179 | writeb(tf->device, (void __iomem *) ioaddr->device_addr); | 158 | iowrite8(tf->device, ioaddr->device_addr); |
180 | VPRINTK("device 0x%X\n", tf->device); | 159 | VPRINTK("device 0x%X\n", tf->device); |
181 | } | 160 | } |
182 | 161 | ||
183 | ata_wait_idle(ap); | 162 | ata_wait_idle(ap); |
184 | } | 163 | } |
185 | 164 | ||
186 | |||
187 | /** | ||
188 | * ata_tf_load - send taskfile registers to host controller | ||
189 | * @ap: Port to which output is sent | ||
190 | * @tf: ATA taskfile register set | ||
191 | * | ||
192 | * Outputs ATA taskfile to standard ATA host controller using MMIO | ||
193 | * or PIO as indicated by the ATA_FLAG_MMIO flag. | ||
194 | * Writes the control, feature, nsect, lbal, lbam, and lbah registers. | ||
195 | * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, | ||
196 | * hob_lbal, hob_lbam, and hob_lbah. | ||
197 | * | ||
198 | * This function waits for idle (!BUSY and !DRQ) after writing | ||
199 | * registers. If the control register has a new value, this | ||
200 | * function also waits for idle after writing control and before | ||
201 | * writing the remaining registers. | ||
202 | * | ||
203 | * May be used as the tf_load() entry in ata_port_operations. | ||
204 | * | ||
205 | * LOCKING: | ||
206 | * Inherited from caller. | ||
207 | */ | ||
208 | void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | ||
209 | { | ||
210 | if (ap->flags & ATA_FLAG_MMIO) | ||
211 | ata_tf_load_mmio(ap, tf); | ||
212 | else | ||
213 | ata_tf_load_pio(ap, tf); | ||
214 | } | ||
215 | |||
216 | /** | 165 | /** |
217 | * ata_exec_command_pio - issue ATA command to host controller | 166 | * ata_exec_command - issue ATA command to host controller |
218 | * @ap: port to which command is being issued | ||
219 | * @tf: ATA taskfile register set | ||
220 | * | ||
221 | * Issues PIO write to ATA command register, with proper | ||
222 | * synchronization with interrupt handler / other threads. | ||
223 | * | ||
224 | * LOCKING: | ||
225 | * spin_lock_irqsave(host lock) | ||
226 | */ | ||
227 | |||
228 | static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) | ||
229 | { | ||
230 | DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); | ||
231 | |||
232 | outb(tf->command, ap->ioaddr.command_addr); | ||
233 | ata_pause(ap); | ||
234 | } | ||
235 | |||
236 | |||
237 | /** | ||
238 | * ata_exec_command_mmio - issue ATA command to host controller | ||
239 | * @ap: port to which command is being issued | 167 | * @ap: port to which command is being issued |
240 | * @tf: ATA taskfile register set | 168 | * @tf: ATA taskfile register set |
241 | * | 169 | * |
242 | * Issues MMIO write to ATA command register, with proper | 170 | * Issues ATA command, with proper synchronization with interrupt |
243 | * synchronization with interrupt handler / other threads. | 171 | * handler / other threads. |
244 | * | ||
245 | * FIXME: missing write posting for 400nS delay enforcement | ||
246 | * | 172 | * |
247 | * LOCKING: | 173 | * LOCKING: |
248 | * spin_lock_irqsave(host lock) | 174 | * spin_lock_irqsave(host lock) |
249 | */ | 175 | */ |
250 | 176 | void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | |
251 | static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | ||
252 | { | 177 | { |
253 | DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); | 178 | DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); |
254 | 179 | ||
255 | writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); | 180 | iowrite8(tf->command, ap->ioaddr.command_addr); |
256 | ata_pause(ap); | 181 | ata_pause(ap); |
257 | } | 182 | } |
258 | 183 | ||
259 | |||
260 | /** | ||
261 | * ata_exec_command - issue ATA command to host controller | ||
262 | * @ap: port to which command is being issued | ||
263 | * @tf: ATA taskfile register set | ||
264 | * | ||
265 | * Issues PIO/MMIO write to ATA command register, with proper | ||
266 | * synchronization with interrupt handler / other threads. | ||
267 | * | ||
268 | * LOCKING: | ||
269 | * spin_lock_irqsave(host lock) | ||
270 | */ | ||
271 | void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | ||
272 | { | ||
273 | if (ap->flags & ATA_FLAG_MMIO) | ||
274 | ata_exec_command_mmio(ap, tf); | ||
275 | else | ||
276 | ata_exec_command_pio(ap, tf); | ||
277 | } | ||
278 | |||
279 | /** | 184 | /** |
280 | * ata_tf_read_pio - input device's ATA taskfile shadow registers | 185 | * ata_tf_read - input device's ATA taskfile shadow registers |
281 | * @ap: Port from which input is read | 186 | * @ap: Port from which input is read |
282 | * @tf: ATA taskfile register set for storing input | 187 | * @tf: ATA taskfile register set for storing input |
283 | * | 188 | * |
@@ -287,121 +192,28 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) | |||
287 | * LOCKING: | 192 | * LOCKING: |
288 | * Inherited from caller. | 193 | * Inherited from caller. |
289 | */ | 194 | */ |
290 | 195 | void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |
291 | static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) | ||
292 | { | ||
293 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
294 | |||
295 | tf->command = ata_check_status(ap); | ||
296 | tf->feature = inb(ioaddr->error_addr); | ||
297 | tf->nsect = inb(ioaddr->nsect_addr); | ||
298 | tf->lbal = inb(ioaddr->lbal_addr); | ||
299 | tf->lbam = inb(ioaddr->lbam_addr); | ||
300 | tf->lbah = inb(ioaddr->lbah_addr); | ||
301 | tf->device = inb(ioaddr->device_addr); | ||
302 | |||
303 | if (tf->flags & ATA_TFLAG_LBA48) { | ||
304 | outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); | ||
305 | tf->hob_feature = inb(ioaddr->error_addr); | ||
306 | tf->hob_nsect = inb(ioaddr->nsect_addr); | ||
307 | tf->hob_lbal = inb(ioaddr->lbal_addr); | ||
308 | tf->hob_lbam = inb(ioaddr->lbam_addr); | ||
309 | tf->hob_lbah = inb(ioaddr->lbah_addr); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * ata_tf_read_mmio - input device's ATA taskfile shadow registers | ||
315 | * @ap: Port from which input is read | ||
316 | * @tf: ATA taskfile register set for storing input | ||
317 | * | ||
318 | * Reads ATA taskfile registers for currently-selected device | ||
319 | * into @tf via MMIO. | ||
320 | * | ||
321 | * LOCKING: | ||
322 | * Inherited from caller. | ||
323 | */ | ||
324 | |||
325 | static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) | ||
326 | { | 196 | { |
327 | struct ata_ioports *ioaddr = &ap->ioaddr; | 197 | struct ata_ioports *ioaddr = &ap->ioaddr; |
328 | 198 | ||
329 | tf->command = ata_check_status(ap); | 199 | tf->command = ata_check_status(ap); |
330 | tf->feature = readb((void __iomem *)ioaddr->error_addr); | 200 | tf->feature = ioread8(ioaddr->error_addr); |
331 | tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); | 201 | tf->nsect = ioread8(ioaddr->nsect_addr); |
332 | tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); | 202 | tf->lbal = ioread8(ioaddr->lbal_addr); |
333 | tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); | 203 | tf->lbam = ioread8(ioaddr->lbam_addr); |
334 | tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); | 204 | tf->lbah = ioread8(ioaddr->lbah_addr); |
335 | tf->device = readb((void __iomem *)ioaddr->device_addr); | 205 | tf->device = ioread8(ioaddr->device_addr); |
336 | 206 | ||
337 | if (tf->flags & ATA_TFLAG_LBA48) { | 207 | if (tf->flags & ATA_TFLAG_LBA48) { |
338 | writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); | 208 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); |
339 | tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); | 209 | tf->hob_feature = ioread8(ioaddr->error_addr); |
340 | tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); | 210 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); |
341 | tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); | 211 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); |
342 | tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); | 212 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); |
343 | tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); | 213 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); |
344 | } | 214 | } |
345 | } | 215 | } |
346 | 216 | ||
347 | |||
348 | /** | ||
349 | * ata_tf_read - input device's ATA taskfile shadow registers | ||
350 | * @ap: Port from which input is read | ||
351 | * @tf: ATA taskfile register set for storing input | ||
352 | * | ||
353 | * Reads ATA taskfile registers for currently-selected device | ||
354 | * into @tf. | ||
355 | * | ||
356 | * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48 | ||
357 | * is set, also reads the hob registers. | ||
358 | * | ||
359 | * May be used as the tf_read() entry in ata_port_operations. | ||
360 | * | ||
361 | * LOCKING: | ||
362 | * Inherited from caller. | ||
363 | */ | ||
364 | void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | ||
365 | { | ||
366 | if (ap->flags & ATA_FLAG_MMIO) | ||
367 | ata_tf_read_mmio(ap, tf); | ||
368 | else | ||
369 | ata_tf_read_pio(ap, tf); | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * ata_check_status_pio - Read device status reg & clear interrupt | ||
374 | * @ap: port where the device is | ||
375 | * | ||
376 | * Reads ATA taskfile status register for currently-selected device | ||
377 | * and return its value. This also clears pending interrupts | ||
378 | * from this device | ||
379 | * | ||
380 | * LOCKING: | ||
381 | * Inherited from caller. | ||
382 | */ | ||
383 | static u8 ata_check_status_pio(struct ata_port *ap) | ||
384 | { | ||
385 | return inb(ap->ioaddr.status_addr); | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * ata_check_status_mmio - Read device status reg & clear interrupt | ||
390 | * @ap: port where the device is | ||
391 | * | ||
392 | * Reads ATA taskfile status register for currently-selected device | ||
393 | * via MMIO and return its value. This also clears pending interrupts | ||
394 | * from this device | ||
395 | * | ||
396 | * LOCKING: | ||
397 | * Inherited from caller. | ||
398 | */ | ||
399 | static u8 ata_check_status_mmio(struct ata_port *ap) | ||
400 | { | ||
401 | return readb((void __iomem *) ap->ioaddr.status_addr); | ||
402 | } | ||
403 | |||
404 | |||
405 | /** | 217 | /** |
406 | * ata_check_status - Read device status reg & clear interrupt | 218 | * ata_check_status - Read device status reg & clear interrupt |
407 | * @ap: port where the device is | 219 | * @ap: port where the device is |
@@ -410,19 +222,14 @@ static u8 ata_check_status_mmio(struct ata_port *ap) | |||
410 | * and return its value. This also clears pending interrupts | 222 | * and return its value. This also clears pending interrupts |
411 | * from this device | 223 | * from this device |
412 | * | 224 | * |
413 | * May be used as the check_status() entry in ata_port_operations. | ||
414 | * | ||
415 | * LOCKING: | 225 | * LOCKING: |
416 | * Inherited from caller. | 226 | * Inherited from caller. |
417 | */ | 227 | */ |
418 | u8 ata_check_status(struct ata_port *ap) | 228 | u8 ata_check_status(struct ata_port *ap) |
419 | { | 229 | { |
420 | if (ap->flags & ATA_FLAG_MMIO) | 230 | return ioread8(ap->ioaddr.status_addr); |
421 | return ata_check_status_mmio(ap); | ||
422 | return ata_check_status_pio(ap); | ||
423 | } | 231 | } |
424 | 232 | ||
425 | |||
426 | /** | 233 | /** |
427 | * ata_altstatus - Read device alternate status reg | 234 | * ata_altstatus - Read device alternate status reg |
428 | * @ap: port where the device is | 235 | * @ap: port where the device is |
@@ -441,58 +248,52 @@ u8 ata_altstatus(struct ata_port *ap) | |||
441 | if (ap->ops->check_altstatus) | 248 | if (ap->ops->check_altstatus) |
442 | return ap->ops->check_altstatus(ap); | 249 | return ap->ops->check_altstatus(ap); |
443 | 250 | ||
444 | if (ap->flags & ATA_FLAG_MMIO) | 251 | return ioread8(ap->ioaddr.altstatus_addr); |
445 | return readb((void __iomem *)ap->ioaddr.altstatus_addr); | ||
446 | return inb(ap->ioaddr.altstatus_addr); | ||
447 | } | 252 | } |
448 | 253 | ||
449 | /** | 254 | /** |
450 | * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction | 255 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction |
451 | * @qc: Info associated with this ATA transaction. | 256 | * @qc: Info associated with this ATA transaction. |
452 | * | 257 | * |
453 | * LOCKING: | 258 | * LOCKING: |
454 | * spin_lock_irqsave(host lock) | 259 | * spin_lock_irqsave(host lock) |
455 | */ | 260 | */ |
456 | 261 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | |
457 | static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) | ||
458 | { | 262 | { |
459 | struct ata_port *ap = qc->ap; | 263 | struct ata_port *ap = qc->ap; |
460 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | 264 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
461 | u8 dmactl; | 265 | u8 dmactl; |
462 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
463 | 266 | ||
464 | /* load PRD table addr. */ | 267 | /* load PRD table addr. */ |
465 | mb(); /* make sure PRD table writes are visible to controller */ | 268 | mb(); /* make sure PRD table writes are visible to controller */ |
466 | writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); | 269 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); |
467 | 270 | ||
468 | /* specify data direction, triple-check start bit is clear */ | 271 | /* specify data direction, triple-check start bit is clear */ |
469 | dmactl = readb(mmio + ATA_DMA_CMD); | 272 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
470 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | 273 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); |
471 | if (!rw) | 274 | if (!rw) |
472 | dmactl |= ATA_DMA_WR; | 275 | dmactl |= ATA_DMA_WR; |
473 | writeb(dmactl, mmio + ATA_DMA_CMD); | 276 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
474 | 277 | ||
475 | /* issue r/w command */ | 278 | /* issue r/w command */ |
476 | ap->ops->exec_command(ap, &qc->tf); | 279 | ap->ops->exec_command(ap, &qc->tf); |
477 | } | 280 | } |
478 | 281 | ||
479 | /** | 282 | /** |
480 | * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction | 283 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction |
481 | * @qc: Info associated with this ATA transaction. | 284 | * @qc: Info associated with this ATA transaction. |
482 | * | 285 | * |
483 | * LOCKING: | 286 | * LOCKING: |
484 | * spin_lock_irqsave(host lock) | 287 | * spin_lock_irqsave(host lock) |
485 | */ | 288 | */ |
486 | 289 | void ata_bmdma_start (struct ata_queued_cmd *qc) | |
487 | static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) | ||
488 | { | 290 | { |
489 | struct ata_port *ap = qc->ap; | 291 | struct ata_port *ap = qc->ap; |
490 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
491 | u8 dmactl; | 292 | u8 dmactl; |
492 | 293 | ||
493 | /* start host DMA transaction */ | 294 | /* start host DMA transaction */ |
494 | dmactl = readb(mmio + ATA_DMA_CMD); | 295 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
495 | writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); | 296 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
496 | 297 | ||
497 | /* Strictly, one may wish to issue a readb() here, to | 298 | /* Strictly, one may wish to issue a readb() here, to |
498 | * flush the mmio write. However, control also passes | 299 | * flush the mmio write. However, control also passes |
@@ -508,96 +309,6 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) | |||
508 | } | 309 | } |
509 | 310 | ||
510 | /** | 311 | /** |
511 | * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) | ||
512 | * @qc: Info associated with this ATA transaction. | ||
513 | * | ||
514 | * LOCKING: | ||
515 | * spin_lock_irqsave(host lock) | ||
516 | */ | ||
517 | |||
518 | static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) | ||
519 | { | ||
520 | struct ata_port *ap = qc->ap; | ||
521 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
522 | u8 dmactl; | ||
523 | |||
524 | /* load PRD table addr. */ | ||
525 | outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
526 | |||
527 | /* specify data direction, triple-check start bit is clear */ | ||
528 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
529 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
530 | if (!rw) | ||
531 | dmactl |= ATA_DMA_WR; | ||
532 | outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
533 | |||
534 | /* issue r/w command */ | ||
535 | ap->ops->exec_command(ap, &qc->tf); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) | ||
540 | * @qc: Info associated with this ATA transaction. | ||
541 | * | ||
542 | * LOCKING: | ||
543 | * spin_lock_irqsave(host lock) | ||
544 | */ | ||
545 | |||
546 | static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) | ||
547 | { | ||
548 | struct ata_port *ap = qc->ap; | ||
549 | u8 dmactl; | ||
550 | |||
551 | /* start host DMA transaction */ | ||
552 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
553 | outb(dmactl | ATA_DMA_START, | ||
554 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
555 | } | ||
556 | |||
557 | |||
558 | /** | ||
559 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
560 | * @qc: Info associated with this ATA transaction. | ||
561 | * | ||
562 | * Writes the ATA_DMA_START flag to the DMA command register. | ||
563 | * | ||
564 | * May be used as the bmdma_start() entry in ata_port_operations. | ||
565 | * | ||
566 | * LOCKING: | ||
567 | * spin_lock_irqsave(host lock) | ||
568 | */ | ||
569 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
570 | { | ||
571 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
572 | ata_bmdma_start_mmio(qc); | ||
573 | else | ||
574 | ata_bmdma_start_pio(qc); | ||
575 | } | ||
576 | |||
577 | |||
578 | /** | ||
579 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
580 | * @qc: Info associated with this ATA transaction. | ||
581 | * | ||
582 | * Writes address of PRD table to device's PRD Table Address | ||
583 | * register, sets the DMA control register, and calls | ||
584 | * ops->exec_command() to start the transfer. | ||
585 | * | ||
586 | * May be used as the bmdma_setup() entry in ata_port_operations. | ||
587 | * | ||
588 | * LOCKING: | ||
589 | * spin_lock_irqsave(host lock) | ||
590 | */ | ||
591 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
592 | { | ||
593 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
594 | ata_bmdma_setup_mmio(qc); | ||
595 | else | ||
596 | ata_bmdma_setup_pio(qc); | ||
597 | } | ||
598 | |||
599 | |||
600 | /** | ||
601 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | 312 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. |
602 | * @ap: Port associated with this ATA transaction. | 313 | * @ap: Port associated with this ATA transaction. |
603 | * | 314 | * |
@@ -608,23 +319,16 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc) | |||
608 | * LOCKING: | 319 | * LOCKING: |
609 | * spin_lock_irqsave(host lock) | 320 | * spin_lock_irqsave(host lock) |
610 | */ | 321 | */ |
611 | |||
612 | void ata_bmdma_irq_clear(struct ata_port *ap) | 322 | void ata_bmdma_irq_clear(struct ata_port *ap) |
613 | { | 323 | { |
614 | if (!ap->ioaddr.bmdma_addr) | 324 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
325 | |||
326 | if (!mmio) | ||
615 | return; | 327 | return; |
616 | 328 | ||
617 | if (ap->flags & ATA_FLAG_MMIO) { | 329 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); |
618 | void __iomem *mmio = | ||
619 | ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; | ||
620 | writeb(readb(mmio), mmio); | ||
621 | } else { | ||
622 | unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; | ||
623 | outb(inb(addr), addr); | ||
624 | } | ||
625 | } | 330 | } |
626 | 331 | ||
627 | |||
628 | /** | 332 | /** |
629 | * ata_bmdma_status - Read PCI IDE BMDMA status | 333 | * ata_bmdma_status - Read PCI IDE BMDMA status |
630 | * @ap: Port associated with this ATA transaction. | 334 | * @ap: Port associated with this ATA transaction. |
@@ -636,19 +340,11 @@ void ata_bmdma_irq_clear(struct ata_port *ap) | |||
636 | * LOCKING: | 340 | * LOCKING: |
637 | * spin_lock_irqsave(host lock) | 341 | * spin_lock_irqsave(host lock) |
638 | */ | 342 | */ |
639 | |||
640 | u8 ata_bmdma_status(struct ata_port *ap) | 343 | u8 ata_bmdma_status(struct ata_port *ap) |
641 | { | 344 | { |
642 | u8 host_stat; | 345 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
643 | if (ap->flags & ATA_FLAG_MMIO) { | ||
644 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
645 | host_stat = readb(mmio + ATA_DMA_STATUS); | ||
646 | } else | ||
647 | host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
648 | return host_stat; | ||
649 | } | 346 | } |
650 | 347 | ||
651 | |||
652 | /** | 348 | /** |
653 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | 349 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer |
654 | * @qc: Command we are ending DMA for | 350 | * @qc: Command we are ending DMA for |
@@ -660,21 +356,14 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
660 | * LOCKING: | 356 | * LOCKING: |
661 | * spin_lock_irqsave(host lock) | 357 | * spin_lock_irqsave(host lock) |
662 | */ | 358 | */ |
663 | |||
664 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | 359 | void ata_bmdma_stop(struct ata_queued_cmd *qc) |
665 | { | 360 | { |
666 | struct ata_port *ap = qc->ap; | 361 | struct ata_port *ap = qc->ap; |
667 | if (ap->flags & ATA_FLAG_MMIO) { | 362 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
668 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
669 | 363 | ||
670 | /* clear start/stop bit */ | 364 | /* clear start/stop bit */ |
671 | writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | 365 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, |
672 | mmio + ATA_DMA_CMD); | 366 | mmio + ATA_DMA_CMD); |
673 | } else { | ||
674 | /* clear start/stop bit */ | ||
675 | outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
676 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
677 | } | ||
678 | 367 | ||
679 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 368 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
680 | ata_altstatus(ap); /* dummy read */ | 369 | ata_altstatus(ap); /* dummy read */ |
@@ -696,10 +385,7 @@ void ata_bmdma_freeze(struct ata_port *ap) | |||
696 | ap->ctl |= ATA_NIEN; | 385 | ap->ctl |= ATA_NIEN; |
697 | ap->last_ctl = ap->ctl; | 386 | ap->last_ctl = ap->ctl; |
698 | 387 | ||
699 | if (ap->flags & ATA_FLAG_MMIO) | 388 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
700 | writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr); | ||
701 | else | ||
702 | outb(ap->ctl, ioaddr->ctl_addr); | ||
703 | 389 | ||
704 | /* Under certain circumstances, some controllers raise IRQ on | 390 | /* Under certain circumstances, some controllers raise IRQ on |
705 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 391 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
@@ -724,8 +410,7 @@ void ata_bmdma_thaw(struct ata_port *ap) | |||
724 | /* clear & re-enable interrupts */ | 410 | /* clear & re-enable interrupts */ |
725 | ata_chk_status(ap); | 411 | ata_chk_status(ap); |
726 | ap->ops->irq_clear(ap); | 412 | ap->ops->irq_clear(ap); |
727 | if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ | 413 | ap->ops->irq_on(ap); |
728 | ata_irq_on(ap); | ||
729 | } | 414 | } |
730 | 415 | ||
731 | /** | 416 | /** |
@@ -775,7 +460,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
775 | * really a timeout event, adjust error mask and | 460 | * really a timeout event, adjust error mask and |
776 | * cancel frozen state. | 461 | * cancel frozen state. |
777 | */ | 462 | */ |
778 | if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) { | 463 | if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { |
779 | qc->err_mask = AC_ERR_HOST_BUS; | 464 | qc->err_mask = AC_ERR_HOST_BUS; |
780 | thaw = 1; | 465 | thaw = 1; |
781 | } | 466 | } |
@@ -832,6 +517,21 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
832 | } | 517 | } |
833 | 518 | ||
834 | #ifdef CONFIG_PCI | 519 | #ifdef CONFIG_PCI |
520 | |||
521 | static int ata_resources_present(struct pci_dev *pdev, int port) | ||
522 | { | ||
523 | int i; | ||
524 | |||
525 | /* Check the PCI resources for this channel are enabled */ | ||
526 | port = port * 2; | ||
527 | for (i = 0; i < 2; i ++) { | ||
528 | if (pci_resource_start(pdev, port + i) == 0 || | ||
529 | pci_resource_len(pdev, port + i) == 0) | ||
530 | return 0; | ||
531 | } | ||
532 | return 1; | ||
533 | } | ||
534 | |||
835 | /** | 535 | /** |
836 | * ata_pci_init_native_mode - Initialize native-mode driver | 536 | * ata_pci_init_native_mode - Initialize native-mode driver |
837 | * @pdev: pci device to be initialized | 537 | * @pdev: pci device to be initialized |
@@ -853,45 +553,62 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
853 | struct ata_probe_ent * | 553 | struct ata_probe_ent * |
854 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) | 554 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) |
855 | { | 555 | { |
856 | struct ata_probe_ent *probe_ent = | 556 | struct ata_probe_ent *probe_ent; |
857 | ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | 557 | int i, p = 0; |
858 | int p = 0; | 558 | void __iomem * const *iomap; |
859 | unsigned long bmdma; | 559 | |
560 | /* iomap BARs */ | ||
561 | for (i = 0; i < 4; i++) { | ||
562 | if (pcim_iomap(pdev, i, 0) == NULL) { | ||
563 | dev_printk(KERN_ERR, &pdev->dev, | ||
564 | "failed to iomap PCI BAR %d\n", i); | ||
565 | return NULL; | ||
566 | } | ||
567 | } | ||
860 | 568 | ||
569 | pcim_iomap(pdev, 4, 0); /* may fail */ | ||
570 | iomap = pcim_iomap_table(pdev); | ||
571 | |||
572 | /* alloc and init probe_ent */ | ||
573 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | ||
861 | if (!probe_ent) | 574 | if (!probe_ent) |
862 | return NULL; | 575 | return NULL; |
863 | 576 | ||
864 | probe_ent->irq = pdev->irq; | 577 | probe_ent->irq = pdev->irq; |
865 | probe_ent->irq_flags = IRQF_SHARED; | 578 | probe_ent->irq_flags = IRQF_SHARED; |
579 | |||
580 | /* Discard disabled ports. Some controllers show their | ||
581 | unused channels this way */ | ||
582 | if (ata_resources_present(pdev, 0) == 0) | ||
583 | ports &= ~ATA_PORT_PRIMARY; | ||
584 | if (ata_resources_present(pdev, 1) == 0) | ||
585 | ports &= ~ATA_PORT_SECONDARY; | ||
866 | 586 | ||
867 | if (ports & ATA_PORT_PRIMARY) { | 587 | if (ports & ATA_PORT_PRIMARY) { |
868 | probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); | 588 | probe_ent->port[p].cmd_addr = iomap[0]; |
869 | probe_ent->port[p].altstatus_addr = | 589 | probe_ent->port[p].altstatus_addr = |
870 | probe_ent->port[p].ctl_addr = | 590 | probe_ent->port[p].ctl_addr = (void __iomem *) |
871 | pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; | 591 | ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS); |
872 | bmdma = pci_resource_start(pdev, 4); | 592 | if (iomap[4]) { |
873 | if (bmdma) { | ||
874 | if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 593 | if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && |
875 | (inb(bmdma + 2) & 0x80)) | 594 | (ioread8(iomap[4] + 2) & 0x80)) |
876 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 595 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; |
877 | probe_ent->port[p].bmdma_addr = bmdma; | 596 | probe_ent->port[p].bmdma_addr = iomap[4]; |
878 | } | 597 | } |
879 | ata_std_ports(&probe_ent->port[p]); | 598 | ata_std_ports(&probe_ent->port[p]); |
880 | p++; | 599 | p++; |
881 | } | 600 | } |
882 | 601 | ||
883 | if (ports & ATA_PORT_SECONDARY) { | 602 | if (ports & ATA_PORT_SECONDARY) { |
884 | probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); | 603 | probe_ent->port[p].cmd_addr = iomap[2]; |
885 | probe_ent->port[p].altstatus_addr = | 604 | probe_ent->port[p].altstatus_addr = |
886 | probe_ent->port[p].ctl_addr = | 605 | probe_ent->port[p].ctl_addr = (void __iomem *) |
887 | pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; | 606 | ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS); |
888 | bmdma = pci_resource_start(pdev, 4); | 607 | if (iomap[4]) { |
889 | if (bmdma) { | ||
890 | bmdma += 8; | ||
891 | if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 608 | if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && |
892 | (inb(bmdma + 2) & 0x80)) | 609 | (ioread8(iomap[4] + 10) & 0x80)) |
893 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 610 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; |
894 | probe_ent->port[p].bmdma_addr = bmdma; | 611 | probe_ent->port[p].bmdma_addr = iomap[4] + 8; |
895 | } | 612 | } |
896 | ata_std_ports(&probe_ent->port[p]); | 613 | ata_std_ports(&probe_ent->port[p]); |
897 | probe_ent->pinfo2 = port[1]; | 614 | probe_ent->pinfo2 = port[1]; |
@@ -902,13 +619,29 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int | |||
902 | return probe_ent; | 619 | return probe_ent; |
903 | } | 620 | } |
904 | 621 | ||
905 | |||
906 | static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, | 622 | static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, |
907 | struct ata_port_info **port, int port_mask) | 623 | struct ata_port_info **port, int port_mask) |
908 | { | 624 | { |
909 | struct ata_probe_ent *probe_ent; | 625 | struct ata_probe_ent *probe_ent; |
910 | unsigned long bmdma = pci_resource_start(pdev, 4); | 626 | void __iomem *iomap[5] = { }, *bmdma; |
627 | |||
628 | if (port_mask & ATA_PORT_PRIMARY) { | ||
629 | iomap[0] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CMD, 8); | ||
630 | iomap[1] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CTL, 1); | ||
631 | if (!iomap[0] || !iomap[1]) | ||
632 | return NULL; | ||
633 | } | ||
634 | |||
635 | if (port_mask & ATA_PORT_SECONDARY) { | ||
636 | iomap[2] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CMD, 8); | ||
637 | iomap[3] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CTL, 1); | ||
638 | if (!iomap[2] || !iomap[3]) | ||
639 | return NULL; | ||
640 | } | ||
911 | 641 | ||
642 | bmdma = pcim_iomap(pdev, 4, 16); /* may fail */ | ||
643 | |||
644 | /* alloc and init probe_ent */ | ||
912 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); | 645 | probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); |
913 | if (!probe_ent) | 646 | if (!probe_ent) |
914 | return NULL; | 647 | return NULL; |
@@ -918,13 +651,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, | |||
918 | 651 | ||
919 | if (port_mask & ATA_PORT_PRIMARY) { | 652 | if (port_mask & ATA_PORT_PRIMARY) { |
920 | probe_ent->irq = ATA_PRIMARY_IRQ(pdev); | 653 | probe_ent->irq = ATA_PRIMARY_IRQ(pdev); |
921 | probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD; | 654 | probe_ent->port[0].cmd_addr = iomap[0]; |
922 | probe_ent->port[0].altstatus_addr = | 655 | probe_ent->port[0].altstatus_addr = |
923 | probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL; | 656 | probe_ent->port[0].ctl_addr = iomap[1]; |
924 | if (bmdma) { | 657 | if (bmdma) { |
925 | probe_ent->port[0].bmdma_addr = bmdma; | 658 | probe_ent->port[0].bmdma_addr = bmdma; |
926 | if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 659 | if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) && |
927 | (inb(bmdma + 2) & 0x80)) | 660 | (ioread8(bmdma + 2) & 0x80)) |
928 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 661 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; |
929 | } | 662 | } |
930 | ata_std_ports(&probe_ent->port[0]); | 663 | ata_std_ports(&probe_ent->port[0]); |
@@ -936,13 +669,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, | |||
936 | probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); | 669 | probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); |
937 | else | 670 | else |
938 | probe_ent->irq = ATA_SECONDARY_IRQ(pdev); | 671 | probe_ent->irq = ATA_SECONDARY_IRQ(pdev); |
939 | probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD; | 672 | probe_ent->port[1].cmd_addr = iomap[2]; |
940 | probe_ent->port[1].altstatus_addr = | 673 | probe_ent->port[1].altstatus_addr = |
941 | probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL; | 674 | probe_ent->port[1].ctl_addr = iomap[3]; |
942 | if (bmdma) { | 675 | if (bmdma) { |
943 | probe_ent->port[1].bmdma_addr = bmdma + 8; | 676 | probe_ent->port[1].bmdma_addr = bmdma + 8; |
944 | if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && | 677 | if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && |
945 | (inb(bmdma + 10) & 0x80)) | 678 | (ioread8(bmdma + 10) & 0x80)) |
946 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; | 679 | probe_ent->_host_flags |= ATA_HOST_SIMPLEX; |
947 | } | 680 | } |
948 | ata_std_ports(&probe_ent->port[1]); | 681 | ata_std_ports(&probe_ent->port[1]); |
@@ -984,15 +717,18 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, | |||
984 | int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | 717 | int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, |
985 | unsigned int n_ports) | 718 | unsigned int n_ports) |
986 | { | 719 | { |
720 | struct device *dev = &pdev->dev; | ||
987 | struct ata_probe_ent *probe_ent = NULL; | 721 | struct ata_probe_ent *probe_ent = NULL; |
988 | struct ata_port_info *port[2]; | 722 | struct ata_port_info *port[2]; |
989 | u8 mask; | 723 | u8 mask; |
990 | unsigned int legacy_mode = 0; | 724 | unsigned int legacy_mode = 0; |
991 | int disable_dev_on_err = 1; | ||
992 | int rc; | 725 | int rc; |
993 | 726 | ||
994 | DPRINTK("ENTER\n"); | 727 | DPRINTK("ENTER\n"); |
995 | 728 | ||
729 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | ||
730 | return -ENOMEM; | ||
731 | |||
996 | BUG_ON(n_ports < 1 || n_ports > 2); | 732 | BUG_ON(n_ports < 1 || n_ports > 2); |
997 | 733 | ||
998 | port[0] = port_info[0]; | 734 | port[0] = port_info[0]; |
@@ -1009,9 +745,9 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1009 | boot for the primary video which is BIOS enabled | 745 | boot for the primary video which is BIOS enabled |
1010 | */ | 746 | */ |
1011 | 747 | ||
1012 | rc = pci_enable_device(pdev); | 748 | rc = pcim_enable_device(pdev); |
1013 | if (rc) | 749 | if (rc) |
1014 | return rc; | 750 | goto err_out; |
1015 | 751 | ||
1016 | if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { | 752 | if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { |
1017 | u8 tmp8; | 753 | u8 tmp8; |
@@ -1027,7 +763,8 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1027 | left a device in compatibility mode */ | 763 | left a device in compatibility mode */ |
1028 | if (legacy_mode) { | 764 | if (legacy_mode) { |
1029 | printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); | 765 | printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); |
1030 | return -EOPNOTSUPP; | 766 | rc = -EOPNOTSUPP; |
767 | goto err_out; | ||
1031 | } | 768 | } |
1032 | #endif | 769 | #endif |
1033 | } | 770 | } |
@@ -1035,13 +772,13 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1035 | if (!legacy_mode) { | 772 | if (!legacy_mode) { |
1036 | rc = pci_request_regions(pdev, DRV_NAME); | 773 | rc = pci_request_regions(pdev, DRV_NAME); |
1037 | if (rc) { | 774 | if (rc) { |
1038 | disable_dev_on_err = 0; | 775 | pcim_pin_device(pdev); |
1039 | goto err_out; | 776 | goto err_out; |
1040 | } | 777 | } |
1041 | } else { | 778 | } else { |
1042 | /* Deal with combined mode hack. This side of the logic all | 779 | /* Deal with combined mode hack. This side of the logic all |
1043 | goes away once the combined mode hack is killed in 2.6.21 */ | 780 | goes away once the combined mode hack is killed in 2.6.21 */ |
1044 | if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) { | 781 | if (!devm_request_region(dev, ATA_PRIMARY_CMD, 8, "libata")) { |
1045 | struct resource *conflict, res; | 782 | struct resource *conflict, res; |
1046 | res.start = ATA_PRIMARY_CMD; | 783 | res.start = ATA_PRIMARY_CMD; |
1047 | res.end = ATA_PRIMARY_CMD + 8 - 1; | 784 | res.end = ATA_PRIMARY_CMD + 8 - 1; |
@@ -1051,7 +788,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1051 | if (!strcmp(conflict->name, "libata")) | 788 | if (!strcmp(conflict->name, "libata")) |
1052 | legacy_mode |= ATA_PORT_PRIMARY; | 789 | legacy_mode |= ATA_PORT_PRIMARY; |
1053 | else { | 790 | else { |
1054 | disable_dev_on_err = 0; | 791 | pcim_pin_device(pdev); |
1055 | printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \ | 792 | printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \ |
1056 | "ata: conflict with %s\n", | 793 | "ata: conflict with %s\n", |
1057 | ATA_PRIMARY_CMD, | 794 | ATA_PRIMARY_CMD, |
@@ -1060,7 +797,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1060 | } else | 797 | } else |
1061 | legacy_mode |= ATA_PORT_PRIMARY; | 798 | legacy_mode |= ATA_PORT_PRIMARY; |
1062 | 799 | ||
1063 | if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) { | 800 | if (!devm_request_region(dev, ATA_SECONDARY_CMD, 8, "libata")) { |
1064 | struct resource *conflict, res; | 801 | struct resource *conflict, res; |
1065 | res.start = ATA_SECONDARY_CMD; | 802 | res.start = ATA_SECONDARY_CMD; |
1066 | res.end = ATA_SECONDARY_CMD + 8 - 1; | 803 | res.end = ATA_SECONDARY_CMD + 8 - 1; |
@@ -1070,7 +807,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1070 | if (!strcmp(conflict->name, "libata")) | 807 | if (!strcmp(conflict->name, "libata")) |
1071 | legacy_mode |= ATA_PORT_SECONDARY; | 808 | legacy_mode |= ATA_PORT_SECONDARY; |
1072 | else { | 809 | else { |
1073 | disable_dev_on_err = 0; | 810 | pcim_pin_device(pdev); |
1074 | printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \ | 811 | printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \ |
1075 | "ata: conflict with %s\n", | 812 | "ata: conflict with %s\n", |
1076 | ATA_SECONDARY_CMD, | 813 | ATA_SECONDARY_CMD, |
@@ -1090,16 +827,16 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1090 | /* we have legacy mode, but all ports are unavailable */ | 827 | /* we have legacy mode, but all ports are unavailable */ |
1091 | if (legacy_mode == (1 << 3)) { | 828 | if (legacy_mode == (1 << 3)) { |
1092 | rc = -EBUSY; | 829 | rc = -EBUSY; |
1093 | goto err_out_regions; | 830 | goto err_out; |
1094 | } | 831 | } |
1095 | 832 | ||
1096 | /* TODO: If we get no DMA mask we should fall back to PIO */ | 833 | /* TODO: If we get no DMA mask we should fall back to PIO */ |
1097 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 834 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
1098 | if (rc) | 835 | if (rc) |
1099 | goto err_out_regions; | 836 | goto err_out; |
1100 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 837 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); |
1101 | if (rc) | 838 | if (rc) |
1102 | goto err_out_regions; | 839 | goto err_out; |
1103 | 840 | ||
1104 | if (legacy_mode) { | 841 | if (legacy_mode) { |
1105 | probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); | 842 | probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); |
@@ -1111,40 +848,22 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, | |||
1111 | } | 848 | } |
1112 | if (!probe_ent) { | 849 | if (!probe_ent) { |
1113 | rc = -ENOMEM; | 850 | rc = -ENOMEM; |
1114 | goto err_out_regions; | 851 | goto err_out; |
1115 | } | 852 | } |
1116 | 853 | ||
1117 | pci_set_master(pdev); | 854 | pci_set_master(pdev); |
1118 | 855 | ||
1119 | if (!ata_device_add(probe_ent)) { | 856 | if (!ata_device_add(probe_ent)) { |
1120 | rc = -ENODEV; | 857 | rc = -ENODEV; |
1121 | goto err_out_ent; | 858 | goto err_out; |
1122 | } | 859 | } |
1123 | 860 | ||
1124 | kfree(probe_ent); | 861 | devm_kfree(dev, probe_ent); |
1125 | 862 | devres_remove_group(dev, NULL); | |
1126 | return 0; | 863 | return 0; |
1127 | 864 | ||
1128 | err_out_ent: | ||
1129 | kfree(probe_ent); | ||
1130 | err_out_regions: | ||
1131 | /* All this conditional stuff is needed for the combined mode hack | ||
1132 | until 2.6.21 when it can go */ | ||
1133 | if (legacy_mode) { | ||
1134 | pci_release_region(pdev, 4); | ||
1135 | if (legacy_mode & ATA_PORT_PRIMARY) { | ||
1136 | release_region(ATA_PRIMARY_CMD, 8); | ||
1137 | pci_release_region(pdev, 1); | ||
1138 | } | ||
1139 | if (legacy_mode & ATA_PORT_SECONDARY) { | ||
1140 | release_region(ATA_SECONDARY_CMD, 8); | ||
1141 | pci_release_region(pdev, 3); | ||
1142 | } | ||
1143 | } else | ||
1144 | pci_release_regions(pdev); | ||
1145 | err_out: | 865 | err_out: |
1146 | if (disable_dev_on_err) | 866 | devres_release_group(dev, NULL); |
1147 | pci_disable_device(pdev); | ||
1148 | return rc; | 867 | return rc; |
1149 | } | 868 | } |
1150 | 869 | ||