diff options
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r-- | drivers/ata/libata-sff.c | 1805 |
1 files changed, 1008 insertions, 797 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index e3877b6843c9..e30c537cce32 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -40,10 +40,12 @@ | |||
40 | 40 | ||
41 | #include "libata.h" | 41 | #include "libata.h" |
42 | 42 | ||
43 | static struct workqueue_struct *ata_sff_wq; | ||
44 | |||
43 | const struct ata_port_operations ata_sff_port_ops = { | 45 | const struct ata_port_operations ata_sff_port_ops = { |
44 | .inherits = &ata_base_port_ops, | 46 | .inherits = &ata_base_port_ops, |
45 | 47 | ||
46 | .qc_prep = ata_sff_qc_prep, | 48 | .qc_prep = ata_noop_qc_prep, |
47 | .qc_issue = ata_sff_qc_issue, | 49 | .qc_issue = ata_sff_qc_issue, |
48 | .qc_fill_rtf = ata_sff_qc_fill_rtf, | 50 | .qc_fill_rtf = ata_sff_qc_fill_rtf, |
49 | 51 | ||
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
53 | .softreset = ata_sff_softreset, | 55 | .softreset = ata_sff_softreset, |
54 | .hardreset = sata_sff_hardreset, | 56 | .hardreset = sata_sff_hardreset, |
55 | .postreset = ata_sff_postreset, | 57 | .postreset = ata_sff_postreset, |
56 | .drain_fifo = ata_sff_drain_fifo, | ||
57 | .error_handler = ata_sff_error_handler, | 58 | .error_handler = ata_sff_error_handler, |
58 | .post_internal_cmd = ata_sff_post_internal_cmd, | ||
59 | 59 | ||
60 | .sff_dev_select = ata_sff_dev_select, | 60 | .sff_dev_select = ata_sff_dev_select, |
61 | .sff_check_status = ata_sff_check_status, | 61 | .sff_check_status = ata_sff_check_status, |
@@ -63,178 +63,12 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
63 | .sff_tf_read = ata_sff_tf_read, | 63 | .sff_tf_read = ata_sff_tf_read, |
64 | .sff_exec_command = ata_sff_exec_command, | 64 | .sff_exec_command = ata_sff_exec_command, |
65 | .sff_data_xfer = ata_sff_data_xfer, | 65 | .sff_data_xfer = ata_sff_data_xfer, |
66 | .sff_irq_on = ata_sff_irq_on, | 66 | .sff_drain_fifo = ata_sff_drain_fifo, |
67 | .sff_irq_clear = ata_sff_irq_clear, | ||
68 | 67 | ||
69 | .lost_interrupt = ata_sff_lost_interrupt, | 68 | .lost_interrupt = ata_sff_lost_interrupt, |
70 | |||
71 | .port_start = ata_sff_port_start, | ||
72 | }; | 69 | }; |
73 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); | 70 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); |
74 | 71 | ||
75 | const struct ata_port_operations ata_bmdma_port_ops = { | ||
76 | .inherits = &ata_sff_port_ops, | ||
77 | |||
78 | .mode_filter = ata_bmdma_mode_filter, | ||
79 | |||
80 | .bmdma_setup = ata_bmdma_setup, | ||
81 | .bmdma_start = ata_bmdma_start, | ||
82 | .bmdma_stop = ata_bmdma_stop, | ||
83 | .bmdma_status = ata_bmdma_status, | ||
84 | }; | ||
85 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
86 | |||
87 | const struct ata_port_operations ata_bmdma32_port_ops = { | ||
88 | .inherits = &ata_bmdma_port_ops, | ||
89 | |||
90 | .sff_data_xfer = ata_sff_data_xfer32, | ||
91 | .port_start = ata_sff_port_start32, | ||
92 | }; | ||
93 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | ||
94 | |||
95 | /** | ||
96 | * ata_fill_sg - Fill PCI IDE PRD table | ||
97 | * @qc: Metadata associated with taskfile to be transferred | ||
98 | * | ||
99 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
100 | * associated with the current disk command. | ||
101 | * | ||
102 | * LOCKING: | ||
103 | * spin_lock_irqsave(host lock) | ||
104 | * | ||
105 | */ | ||
106 | static void ata_fill_sg(struct ata_queued_cmd *qc) | ||
107 | { | ||
108 | struct ata_port *ap = qc->ap; | ||
109 | struct scatterlist *sg; | ||
110 | unsigned int si, pi; | ||
111 | |||
112 | pi = 0; | ||
113 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
114 | u32 addr, offset; | ||
115 | u32 sg_len, len; | ||
116 | |||
117 | /* determine if physical DMA addr spans 64K boundary. | ||
118 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
119 | * truncate dma_addr_t to u32. | ||
120 | */ | ||
121 | addr = (u32) sg_dma_address(sg); | ||
122 | sg_len = sg_dma_len(sg); | ||
123 | |||
124 | while (sg_len) { | ||
125 | offset = addr & 0xffff; | ||
126 | len = sg_len; | ||
127 | if ((offset + sg_len) > 0x10000) | ||
128 | len = 0x10000 - offset; | ||
129 | |||
130 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
131 | ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); | ||
132 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
133 | |||
134 | pi++; | ||
135 | sg_len -= len; | ||
136 | addr += len; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * ata_fill_sg_dumb - Fill PCI IDE PRD table | ||
145 | * @qc: Metadata associated with taskfile to be transferred | ||
146 | * | ||
147 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
148 | * associated with the current disk command. Perform the fill | ||
149 | * so that we avoid writing any length 64K records for | ||
150 | * controllers that don't follow the spec. | ||
151 | * | ||
152 | * LOCKING: | ||
153 | * spin_lock_irqsave(host lock) | ||
154 | * | ||
155 | */ | ||
156 | static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | ||
157 | { | ||
158 | struct ata_port *ap = qc->ap; | ||
159 | struct scatterlist *sg; | ||
160 | unsigned int si, pi; | ||
161 | |||
162 | pi = 0; | ||
163 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
164 | u32 addr, offset; | ||
165 | u32 sg_len, len, blen; | ||
166 | |||
167 | /* determine if physical DMA addr spans 64K boundary. | ||
168 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
169 | * truncate dma_addr_t to u32. | ||
170 | */ | ||
171 | addr = (u32) sg_dma_address(sg); | ||
172 | sg_len = sg_dma_len(sg); | ||
173 | |||
174 | while (sg_len) { | ||
175 | offset = addr & 0xffff; | ||
176 | len = sg_len; | ||
177 | if ((offset + sg_len) > 0x10000) | ||
178 | len = 0x10000 - offset; | ||
179 | |||
180 | blen = len & 0xffff; | ||
181 | ap->prd[pi].addr = cpu_to_le32(addr); | ||
182 | if (blen == 0) { | ||
183 | /* Some PATA chipsets like the CS5530 can't | ||
184 | cope with 0x0000 meaning 64K as the spec | ||
185 | says */ | ||
186 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); | ||
187 | blen = 0x8000; | ||
188 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); | ||
189 | } | ||
190 | ap->prd[pi].flags_len = cpu_to_le32(blen); | ||
191 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
192 | |||
193 | pi++; | ||
194 | sg_len -= len; | ||
195 | addr += len; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * ata_sff_qc_prep - Prepare taskfile for submission | ||
204 | * @qc: Metadata associated with taskfile to be prepared | ||
205 | * | ||
206 | * Prepare ATA taskfile for submission. | ||
207 | * | ||
208 | * LOCKING: | ||
209 | * spin_lock_irqsave(host lock) | ||
210 | */ | ||
211 | void ata_sff_qc_prep(struct ata_queued_cmd *qc) | ||
212 | { | ||
213 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
214 | return; | ||
215 | |||
216 | ata_fill_sg(qc); | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | ||
219 | |||
220 | /** | ||
221 | * ata_sff_dumb_qc_prep - Prepare taskfile for submission | ||
222 | * @qc: Metadata associated with taskfile to be prepared | ||
223 | * | ||
224 | * Prepare ATA taskfile for submission. | ||
225 | * | ||
226 | * LOCKING: | ||
227 | * spin_lock_irqsave(host lock) | ||
228 | */ | ||
229 | void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) | ||
230 | { | ||
231 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
232 | return; | ||
233 | |||
234 | ata_fill_sg_dumb(qc); | ||
235 | } | ||
236 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | ||
237 | |||
238 | /** | 72 | /** |
239 | * ata_sff_check_status - Read device status reg & clear interrupt | 73 | * ata_sff_check_status - Read device status reg & clear interrupt |
240 | * @ap: port where the device is | 74 | * @ap: port where the device is |
@@ -446,6 +280,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) | |||
446 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | 280 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); |
447 | 281 | ||
448 | /** | 282 | /** |
283 | * ata_sff_set_devctl - Write device control reg | ||
284 | * @ap: port where the device is | ||
285 | * @ctl: value to write | ||
286 | * | ||
287 | * Writes ATA taskfile device control register. | ||
288 | * | ||
289 | * Note: may NOT be used as the sff_set_devctl() entry in | ||
290 | * ata_port_operations. | ||
291 | * | ||
292 | * LOCKING: | ||
293 | * Inherited from caller. | ||
294 | */ | ||
295 | static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) | ||
296 | { | ||
297 | if (ap->ops->sff_set_devctl) | ||
298 | ap->ops->sff_set_devctl(ap, ctl); | ||
299 | else | ||
300 | iowrite8(ctl, ap->ioaddr.ctl_addr); | ||
301 | } | ||
302 | |||
303 | /** | ||
449 | * ata_sff_dev_select - Select device 0/1 on ATA bus | 304 | * ata_sff_dev_select - Select device 0/1 on ATA bus |
450 | * @ap: ATA channel to manipulate | 305 | * @ap: ATA channel to manipulate |
451 | * @device: ATA device (numbered from zero) to select | 306 | * @device: ATA device (numbered from zero) to select |
@@ -491,7 +346,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select); | |||
491 | * LOCKING: | 346 | * LOCKING: |
492 | * caller. | 347 | * caller. |
493 | */ | 348 | */ |
494 | void ata_dev_select(struct ata_port *ap, unsigned int device, | 349 | static void ata_dev_select(struct ata_port *ap, unsigned int device, |
495 | unsigned int wait, unsigned int can_sleep) | 350 | unsigned int wait, unsigned int can_sleep) |
496 | { | 351 | { |
497 | if (ata_msg_probe(ap)) | 352 | if (ata_msg_probe(ap)) |
@@ -517,50 +372,34 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, | |||
517 | * Enable interrupts on a legacy IDE device using MMIO or PIO, | 372 | * Enable interrupts on a legacy IDE device using MMIO or PIO, |
518 | * wait for idle, clear any pending interrupts. | 373 | * wait for idle, clear any pending interrupts. |
519 | * | 374 | * |
375 | * Note: may NOT be used as the sff_irq_on() entry in | ||
376 | * ata_port_operations. | ||
377 | * | ||
520 | * LOCKING: | 378 | * LOCKING: |
521 | * Inherited from caller. | 379 | * Inherited from caller. |
522 | */ | 380 | */ |
523 | u8 ata_sff_irq_on(struct ata_port *ap) | 381 | void ata_sff_irq_on(struct ata_port *ap) |
524 | { | 382 | { |
525 | struct ata_ioports *ioaddr = &ap->ioaddr; | 383 | struct ata_ioports *ioaddr = &ap->ioaddr; |
526 | u8 tmp; | 384 | |
385 | if (ap->ops->sff_irq_on) { | ||
386 | ap->ops->sff_irq_on(ap); | ||
387 | return; | ||
388 | } | ||
527 | 389 | ||
528 | ap->ctl &= ~ATA_NIEN; | 390 | ap->ctl &= ~ATA_NIEN; |
529 | ap->last_ctl = ap->ctl; | 391 | ap->last_ctl = ap->ctl; |
530 | 392 | ||
531 | if (ioaddr->ctl_addr) | 393 | if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) |
532 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 394 | ata_sff_set_devctl(ap, ap->ctl); |
533 | tmp = ata_wait_idle(ap); | 395 | ata_wait_idle(ap); |
534 | |||
535 | ap->ops->sff_irq_clear(ap); | ||
536 | 396 | ||
537 | return tmp; | 397 | if (ap->ops->sff_irq_clear) |
398 | ap->ops->sff_irq_clear(ap); | ||
538 | } | 399 | } |
539 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); | 400 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); |
540 | 401 | ||
541 | /** | 402 | /** |
542 | * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
543 | * @ap: Port associated with this ATA transaction. | ||
544 | * | ||
545 | * Clear interrupt and error flags in DMA status register. | ||
546 | * | ||
547 | * May be used as the irq_clear() entry in ata_port_operations. | ||
548 | * | ||
549 | * LOCKING: | ||
550 | * spin_lock_irqsave(host lock) | ||
551 | */ | ||
552 | void ata_sff_irq_clear(struct ata_port *ap) | ||
553 | { | ||
554 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
555 | |||
556 | if (!mmio) | ||
557 | return; | ||
558 | |||
559 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | ||
560 | } | ||
561 | EXPORT_SYMBOL_GPL(ata_sff_irq_clear); | ||
562 | |||
563 | /** | ||
564 | * ata_sff_tf_load - send taskfile registers to host controller | 403 | * ata_sff_tf_load - send taskfile registers to host controller |
565 | * @ap: Port to which output is sent | 404 | * @ap: Port to which output is sent |
566 | * @tf: ATA taskfile register set | 405 | * @tf: ATA taskfile register set |
@@ -894,7 +733,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
894 | do_write); | 733 | do_write); |
895 | } | 734 | } |
896 | 735 | ||
897 | if (!do_write) | 736 | if (!do_write && !PageSlab(page)) |
898 | flush_dcache_page(page); | 737 | flush_dcache_page(page); |
899 | 738 | ||
900 | qc->curbytes += qc->sect_size; | 739 | qc->curbytes += qc->sect_size; |
@@ -962,11 +801,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
962 | case ATAPI_PROT_NODATA: | 801 | case ATAPI_PROT_NODATA: |
963 | ap->hsm_task_state = HSM_ST_LAST; | 802 | ap->hsm_task_state = HSM_ST_LAST; |
964 | break; | 803 | break; |
804 | #ifdef CONFIG_ATA_BMDMA | ||
965 | case ATAPI_PROT_DMA: | 805 | case ATAPI_PROT_DMA: |
966 | ap->hsm_task_state = HSM_ST_LAST; | 806 | ap->hsm_task_state = HSM_ST_LAST; |
967 | /* initiate bmdma */ | 807 | /* initiate bmdma */ |
968 | ap->ops->bmdma_start(qc); | 808 | ap->ops->bmdma_start(qc); |
969 | break; | 809 | break; |
810 | #endif /* CONFIG_ATA_BMDMA */ | ||
811 | default: | ||
812 | BUG(); | ||
970 | } | 813 | } |
971 | } | 814 | } |
972 | 815 | ||
@@ -1165,7 +1008,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
1165 | qc = ata_qc_from_tag(ap, qc->tag); | 1008 | qc = ata_qc_from_tag(ap, qc->tag); |
1166 | if (qc) { | 1009 | if (qc) { |
1167 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { | 1010 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { |
1168 | ap->ops->sff_irq_on(ap); | 1011 | ata_sff_irq_on(ap); |
1169 | ata_qc_complete(qc); | 1012 | ata_qc_complete(qc); |
1170 | } else | 1013 | } else |
1171 | ata_port_freeze(ap); | 1014 | ata_port_freeze(ap); |
@@ -1181,7 +1024,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
1181 | } else { | 1024 | } else { |
1182 | if (in_wq) { | 1025 | if (in_wq) { |
1183 | spin_lock_irqsave(ap->lock, flags); | 1026 | spin_lock_irqsave(ap->lock, flags); |
1184 | ap->ops->sff_irq_on(ap); | 1027 | ata_sff_irq_on(ap); |
1185 | ata_qc_complete(qc); | 1028 | ata_qc_complete(qc); |
1186 | spin_unlock_irqrestore(ap->lock, flags); | 1029 | spin_unlock_irqrestore(ap->lock, flags); |
1187 | } else | 1030 | } else |
@@ -1202,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
1202 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1045 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1203 | u8 status, int in_wq) | 1046 | u8 status, int in_wq) |
1204 | { | 1047 | { |
1205 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1048 | struct ata_link *link = qc->dev->link; |
1049 | struct ata_eh_info *ehi = &link->eh_info; | ||
1206 | unsigned long flags = 0; | 1050 | unsigned long flags = 0; |
1207 | int poll_next; | 1051 | int poll_next; |
1208 | 1052 | ||
@@ -1293,7 +1137,7 @@ fsm_start: | |||
1293 | if (in_wq) | 1137 | if (in_wq) |
1294 | spin_unlock_irqrestore(ap->lock, flags); | 1138 | spin_unlock_irqrestore(ap->lock, flags); |
1295 | 1139 | ||
1296 | /* if polling, ata_pio_task() handles the rest. | 1140 | /* if polling, ata_sff_pio_task() handles the rest. |
1297 | * otherwise, interrupt handler takes over from here. | 1141 | * otherwise, interrupt handler takes over from here. |
1298 | */ | 1142 | */ |
1299 | break; | 1143 | break; |
@@ -1458,14 +1302,48 @@ fsm_start: | |||
1458 | } | 1302 | } |
1459 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); | 1303 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); |
1460 | 1304 | ||
1461 | void ata_pio_task(struct work_struct *work) | 1305 | void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) |
1306 | { | ||
1307 | struct ata_port *ap = link->ap; | ||
1308 | |||
1309 | WARN_ON((ap->sff_pio_task_link != NULL) && | ||
1310 | (ap->sff_pio_task_link != link)); | ||
1311 | ap->sff_pio_task_link = link; | ||
1312 | |||
1313 | /* may fail if ata_sff_flush_pio_task() in progress */ | ||
1314 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, | ||
1315 | msecs_to_jiffies(delay)); | ||
1316 | } | ||
1317 | EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); | ||
1318 | |||
1319 | void ata_sff_flush_pio_task(struct ata_port *ap) | ||
1320 | { | ||
1321 | DPRINTK("ENTER\n"); | ||
1322 | |||
1323 | cancel_rearming_delayed_work(&ap->sff_pio_task); | ||
1324 | ap->hsm_task_state = HSM_ST_IDLE; | ||
1325 | |||
1326 | if (ata_msg_ctl(ap)) | ||
1327 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); | ||
1328 | } | ||
1329 | |||
1330 | static void ata_sff_pio_task(struct work_struct *work) | ||
1462 | { | 1331 | { |
1463 | struct ata_port *ap = | 1332 | struct ata_port *ap = |
1464 | container_of(work, struct ata_port, port_task.work); | 1333 | container_of(work, struct ata_port, sff_pio_task.work); |
1465 | struct ata_queued_cmd *qc = ap->port_task_data; | 1334 | struct ata_link *link = ap->sff_pio_task_link; |
1335 | struct ata_queued_cmd *qc; | ||
1466 | u8 status; | 1336 | u8 status; |
1467 | int poll_next; | 1337 | int poll_next; |
1468 | 1338 | ||
1339 | BUG_ON(ap->sff_pio_task_link == NULL); | ||
1340 | /* qc can be NULL if timeout occurred */ | ||
1341 | qc = ata_qc_from_tag(ap, link->active_tag); | ||
1342 | if (!qc) { | ||
1343 | ap->sff_pio_task_link = NULL; | ||
1344 | return; | ||
1345 | } | ||
1346 | |||
1469 | fsm_start: | 1347 | fsm_start: |
1470 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); | 1348 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
1471 | 1349 | ||
@@ -1481,11 +1359,16 @@ fsm_start: | |||
1481 | msleep(2); | 1359 | msleep(2); |
1482 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); | 1360 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); |
1483 | if (status & ATA_BUSY) { | 1361 | if (status & ATA_BUSY) { |
1484 | ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); | 1362 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
1485 | return; | 1363 | return; |
1486 | } | 1364 | } |
1487 | } | 1365 | } |
1488 | 1366 | ||
1367 | /* | ||
1368 | * hsm_move() may trigger another command to be processed. | ||
1369 | * clean the link beforehand. | ||
1370 | */ | ||
1371 | ap->sff_pio_task_link = NULL; | ||
1489 | /* move the HSM */ | 1372 | /* move the HSM */ |
1490 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); | 1373 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); |
1491 | 1374 | ||
@@ -1497,15 +1380,11 @@ fsm_start: | |||
1497 | } | 1380 | } |
1498 | 1381 | ||
1499 | /** | 1382 | /** |
1500 | * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner | 1383 | * ata_sff_qc_issue - issue taskfile to a SFF controller |
1501 | * @qc: command to issue to device | 1384 | * @qc: command to issue to device |
1502 | * | 1385 | * |
1503 | * Using various libata functions and hooks, this function | 1386 | * This function issues a PIO or NODATA command to a SFF |
1504 | * starts an ATA command. ATA commands are grouped into | 1387 | * controller. |
1505 | * classes called "protocols", and issuing each type of protocol | ||
1506 | * is slightly different. | ||
1507 | * | ||
1508 | * May be used as the qc_issue() entry in ata_port_operations. | ||
1509 | * | 1388 | * |
1510 | * LOCKING: | 1389 | * LOCKING: |
1511 | * spin_lock_irqsave(host lock) | 1390 | * spin_lock_irqsave(host lock) |
@@ -1516,27 +1395,13 @@ fsm_start: | |||
1516 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | 1395 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
1517 | { | 1396 | { |
1518 | struct ata_port *ap = qc->ap; | 1397 | struct ata_port *ap = qc->ap; |
1398 | struct ata_link *link = qc->dev->link; | ||
1519 | 1399 | ||
1520 | /* Use polling pio if the LLD doesn't handle | 1400 | /* Use polling pio if the LLD doesn't handle |
1521 | * interrupt driven pio and atapi CDB interrupt. | 1401 | * interrupt driven pio and atapi CDB interrupt. |
1522 | */ | 1402 | */ |
1523 | if (ap->flags & ATA_FLAG_PIO_POLLING) { | 1403 | if (ap->flags & ATA_FLAG_PIO_POLLING) |
1524 | switch (qc->tf.protocol) { | 1404 | qc->tf.flags |= ATA_TFLAG_POLLING; |
1525 | case ATA_PROT_PIO: | ||
1526 | case ATA_PROT_NODATA: | ||
1527 | case ATAPI_PROT_PIO: | ||
1528 | case ATAPI_PROT_NODATA: | ||
1529 | qc->tf.flags |= ATA_TFLAG_POLLING; | ||
1530 | break; | ||
1531 | case ATAPI_PROT_DMA: | ||
1532 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) | ||
1533 | /* see ata_dma_blacklisted() */ | ||
1534 | BUG(); | ||
1535 | break; | ||
1536 | default: | ||
1537 | break; | ||
1538 | } | ||
1539 | } | ||
1540 | 1405 | ||
1541 | /* select the device */ | 1406 | /* select the device */ |
1542 | ata_dev_select(ap, qc->dev->devno, 1, 0); | 1407 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
@@ -1551,17 +1416,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1551 | ap->hsm_task_state = HSM_ST_LAST; | 1416 | ap->hsm_task_state = HSM_ST_LAST; |
1552 | 1417 | ||
1553 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1418 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1554 | ata_pio_queue_task(ap, qc, 0); | 1419 | ata_sff_queue_pio_task(link, 0); |
1555 | |||
1556 | break; | ||
1557 | |||
1558 | case ATA_PROT_DMA: | ||
1559 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | ||
1560 | 1420 | ||
1561 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
1562 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
1563 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
1564 | ap->hsm_task_state = HSM_ST_LAST; | ||
1565 | break; | 1421 | break; |
1566 | 1422 | ||
1567 | case ATA_PROT_PIO: | 1423 | case ATA_PROT_PIO: |
@@ -1573,20 +1429,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1573 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 1429 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
1574 | /* PIO data out protocol */ | 1430 | /* PIO data out protocol */ |
1575 | ap->hsm_task_state = HSM_ST_FIRST; | 1431 | ap->hsm_task_state = HSM_ST_FIRST; |
1576 | ata_pio_queue_task(ap, qc, 0); | 1432 | ata_sff_queue_pio_task(link, 0); |
1577 | 1433 | ||
1578 | /* always send first data block using | 1434 | /* always send first data block using the |
1579 | * the ata_pio_task() codepath. | 1435 | * ata_sff_pio_task() codepath. |
1580 | */ | 1436 | */ |
1581 | } else { | 1437 | } else { |
1582 | /* PIO data in protocol */ | 1438 | /* PIO data in protocol */ |
1583 | ap->hsm_task_state = HSM_ST; | 1439 | ap->hsm_task_state = HSM_ST; |
1584 | 1440 | ||
1585 | if (qc->tf.flags & ATA_TFLAG_POLLING) | 1441 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1586 | ata_pio_queue_task(ap, qc, 0); | 1442 | ata_sff_queue_pio_task(link, 0); |
1587 | 1443 | ||
1588 | /* if polling, ata_pio_task() handles the rest. | 1444 | /* if polling, ata_sff_pio_task() handles the |
1589 | * otherwise, interrupt handler takes over from here. | 1445 | * rest. otherwise, interrupt handler takes |
1446 | * over from here. | ||
1590 | */ | 1447 | */ |
1591 | } | 1448 | } |
1592 | 1449 | ||
@@ -1604,19 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) | |||
1604 | /* send cdb by polling if no cdb interrupt */ | 1461 | /* send cdb by polling if no cdb interrupt */ |
1605 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | 1462 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || |
1606 | (qc->tf.flags & ATA_TFLAG_POLLING)) | 1463 | (qc->tf.flags & ATA_TFLAG_POLLING)) |
1607 | ata_pio_queue_task(ap, qc, 0); | 1464 | ata_sff_queue_pio_task(link, 0); |
1608 | break; | ||
1609 | |||
1610 | case ATAPI_PROT_DMA: | ||
1611 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | ||
1612 | |||
1613 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
1614 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
1615 | ap->hsm_task_state = HSM_ST_FIRST; | ||
1616 | |||
1617 | /* send cdb by polling if no cdb interrupt */ | ||
1618 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
1619 | ata_pio_queue_task(ap, qc, 0); | ||
1620 | break; | 1465 | break; |
1621 | 1466 | ||
1622 | default: | 1467 | default: |
@@ -1648,27 +1493,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) | |||
1648 | } | 1493 | } |
1649 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); | 1494 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); |
1650 | 1495 | ||
1651 | /** | 1496 | static unsigned int ata_sff_idle_irq(struct ata_port *ap) |
1652 | * ata_sff_host_intr - Handle host interrupt for given (port, task) | ||
1653 | * @ap: Port on which interrupt arrived (possibly...) | ||
1654 | * @qc: Taskfile currently active in engine | ||
1655 | * | ||
1656 | * Handle host interrupt for given queued command. Currently, | ||
1657 | * only DMA interrupts are handled. All other commands are | ||
1658 | * handled via polling with interrupts disabled (nIEN bit). | ||
1659 | * | ||
1660 | * LOCKING: | ||
1661 | * spin_lock_irqsave(host lock) | ||
1662 | * | ||
1663 | * RETURNS: | ||
1664 | * One if interrupt was handled, zero if not (shared irq). | ||
1665 | */ | ||
1666 | unsigned int ata_sff_host_intr(struct ata_port *ap, | ||
1667 | struct ata_queued_cmd *qc) | ||
1668 | { | 1497 | { |
1669 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1498 | ap->stats.idle_irq++; |
1670 | u8 status, host_stat = 0; | 1499 | |
1671 | bool bmdma_stopped = false; | 1500 | #ifdef ATA_IRQ_TRAP |
1501 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
1502 | ap->ops->sff_check_status(ap); | ||
1503 | if (ap->ops->sff_irq_clear) | ||
1504 | ap->ops->sff_irq_clear(ap); | ||
1505 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
1506 | return 1; | ||
1507 | } | ||
1508 | #endif | ||
1509 | return 0; /* irq not handled */ | ||
1510 | } | ||
1511 | |||
1512 | static unsigned int __ata_sff_port_intr(struct ata_port *ap, | ||
1513 | struct ata_queued_cmd *qc, | ||
1514 | bool hsmv_on_idle) | ||
1515 | { | ||
1516 | u8 status; | ||
1672 | 1517 | ||
1673 | VPRINTK("ata%u: protocol %d task_state %d\n", | 1518 | VPRINTK("ata%u: protocol %d task_state %d\n", |
1674 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); | 1519 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); |
@@ -1685,90 +1530,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1685 | * need to check ata_is_atapi(qc->tf.protocol) again. | 1530 | * need to check ata_is_atapi(qc->tf.protocol) again. |
1686 | */ | 1531 | */ |
1687 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | 1532 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
1688 | goto idle_irq; | 1533 | return ata_sff_idle_irq(ap); |
1689 | break; | ||
1690 | case HSM_ST_LAST: | ||
1691 | if (qc->tf.protocol == ATA_PROT_DMA || | ||
1692 | qc->tf.protocol == ATAPI_PROT_DMA) { | ||
1693 | /* check status of DMA engine */ | ||
1694 | host_stat = ap->ops->bmdma_status(ap); | ||
1695 | VPRINTK("ata%u: host_stat 0x%X\n", | ||
1696 | ap->print_id, host_stat); | ||
1697 | |||
1698 | /* if it's not our irq... */ | ||
1699 | if (!(host_stat & ATA_DMA_INTR)) | ||
1700 | goto idle_irq; | ||
1701 | |||
1702 | /* before we do anything else, clear DMA-Start bit */ | ||
1703 | ap->ops->bmdma_stop(qc); | ||
1704 | bmdma_stopped = true; | ||
1705 | |||
1706 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
1707 | /* error when transfering data to/from memory */ | ||
1708 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
1709 | ap->hsm_task_state = HSM_ST_ERR; | ||
1710 | } | ||
1711 | } | ||
1712 | break; | 1534 | break; |
1713 | case HSM_ST: | 1535 | case HSM_ST: |
1536 | case HSM_ST_LAST: | ||
1714 | break; | 1537 | break; |
1715 | default: | 1538 | default: |
1716 | goto idle_irq; | 1539 | return ata_sff_idle_irq(ap); |
1717 | } | 1540 | } |
1718 | 1541 | ||
1719 | |||
1720 | /* check main status, clearing INTRQ if needed */ | 1542 | /* check main status, clearing INTRQ if needed */ |
1721 | status = ata_sff_irq_status(ap); | 1543 | status = ata_sff_irq_status(ap); |
1722 | if (status & ATA_BUSY) { | 1544 | if (status & ATA_BUSY) { |
1723 | if (bmdma_stopped) { | 1545 | if (hsmv_on_idle) { |
1724 | /* BMDMA engine is already stopped, we're screwed */ | 1546 | /* BMDMA engine is already stopped, we're screwed */ |
1725 | qc->err_mask |= AC_ERR_HSM; | 1547 | qc->err_mask |= AC_ERR_HSM; |
1726 | ap->hsm_task_state = HSM_ST_ERR; | 1548 | ap->hsm_task_state = HSM_ST_ERR; |
1727 | } else | 1549 | } else |
1728 | goto idle_irq; | 1550 | return ata_sff_idle_irq(ap); |
1729 | } | 1551 | } |
1730 | 1552 | ||
1731 | /* ack bmdma irq events */ | 1553 | /* clear irq events */ |
1732 | ap->ops->sff_irq_clear(ap); | 1554 | if (ap->ops->sff_irq_clear) |
1555 | ap->ops->sff_irq_clear(ap); | ||
1733 | 1556 | ||
1734 | ata_sff_hsm_move(ap, qc, status, 0); | 1557 | ata_sff_hsm_move(ap, qc, status, 0); |
1735 | 1558 | ||
1736 | if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || | ||
1737 | qc->tf.protocol == ATAPI_PROT_DMA)) | ||
1738 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
1739 | |||
1740 | return 1; /* irq handled */ | 1559 | return 1; /* irq handled */ |
1741 | |||
1742 | idle_irq: | ||
1743 | ap->stats.idle_irq++; | ||
1744 | |||
1745 | #ifdef ATA_IRQ_TRAP | ||
1746 | if ((ap->stats.idle_irq % 1000) == 0) { | ||
1747 | ap->ops->sff_check_status(ap); | ||
1748 | ap->ops->sff_irq_clear(ap); | ||
1749 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | ||
1750 | return 1; | ||
1751 | } | ||
1752 | #endif | ||
1753 | return 0; /* irq not handled */ | ||
1754 | } | 1560 | } |
1755 | EXPORT_SYMBOL_GPL(ata_sff_host_intr); | ||
1756 | 1561 | ||
1757 | /** | 1562 | /** |
1758 | * ata_sff_interrupt - Default ATA host interrupt handler | 1563 | * ata_sff_port_intr - Handle SFF port interrupt |
1759 | * @irq: irq line (unused) | 1564 | * @ap: Port on which interrupt arrived (possibly...) |
1760 | * @dev_instance: pointer to our ata_host information structure | 1565 | * @qc: Taskfile currently active in engine |
1761 | * | 1566 | * |
1762 | * Default interrupt handler for PCI IDE devices. Calls | 1567 | * Handle port interrupt for given queued command. |
1763 | * ata_sff_host_intr() for each port that is not disabled. | ||
1764 | * | 1568 | * |
1765 | * LOCKING: | 1569 | * LOCKING: |
1766 | * Obtains host lock during operation. | 1570 | * spin_lock_irqsave(host lock) |
1767 | * | 1571 | * |
1768 | * RETURNS: | 1572 | * RETURNS: |
1769 | * IRQ_NONE or IRQ_HANDLED. | 1573 | * One if interrupt was handled, zero if not (shared irq). |
1770 | */ | 1574 | */ |
1771 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | 1575 | unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
1576 | { | ||
1577 | return __ata_sff_port_intr(ap, qc, false); | ||
1578 | } | ||
1579 | EXPORT_SYMBOL_GPL(ata_sff_port_intr); | ||
1580 | |||
1581 | static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, | ||
1582 | unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) | ||
1772 | { | 1583 | { |
1773 | struct ata_host *host = dev_instance; | 1584 | struct ata_host *host = dev_instance; |
1774 | bool retried = false; | 1585 | bool retried = false; |
@@ -1785,13 +1596,10 @@ retry: | |||
1785 | struct ata_port *ap = host->ports[i]; | 1596 | struct ata_port *ap = host->ports[i]; |
1786 | struct ata_queued_cmd *qc; | 1597 | struct ata_queued_cmd *qc; |
1787 | 1598 | ||
1788 | if (unlikely(ap->flags & ATA_FLAG_DISABLED)) | ||
1789 | continue; | ||
1790 | |||
1791 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1599 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1792 | if (qc) { | 1600 | if (qc) { |
1793 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) | 1601 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
1794 | handled |= ata_sff_host_intr(ap, qc); | 1602 | handled |= port_intr(ap, qc); |
1795 | else | 1603 | else |
1796 | polling |= 1 << i; | 1604 | polling |= 1 << i; |
1797 | } else | 1605 | } else |
@@ -1818,7 +1626,8 @@ retry: | |||
1818 | 1626 | ||
1819 | if (idle & (1 << i)) { | 1627 | if (idle & (1 << i)) { |
1820 | ap->ops->sff_check_status(ap); | 1628 | ap->ops->sff_check_status(ap); |
1821 | ap->ops->sff_irq_clear(ap); | 1629 | if (ap->ops->sff_irq_clear) |
1630 | ap->ops->sff_irq_clear(ap); | ||
1822 | } else { | 1631 | } else { |
1823 | /* clear INTRQ and check if BUSY cleared */ | 1632 | /* clear INTRQ and check if BUSY cleared */ |
1824 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) | 1633 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) |
@@ -1840,6 +1649,25 @@ retry: | |||
1840 | 1649 | ||
1841 | return IRQ_RETVAL(handled); | 1650 | return IRQ_RETVAL(handled); |
1842 | } | 1651 | } |
1652 | |||
1653 | /** | ||
1654 | * ata_sff_interrupt - Default SFF ATA host interrupt handler | ||
1655 | * @irq: irq line (unused) | ||
1656 | * @dev_instance: pointer to our ata_host information structure | ||
1657 | * | ||
1658 | * Default interrupt handler for PCI IDE devices. Calls | ||
1659 | * ata_sff_port_intr() for each port that is not disabled. | ||
1660 | * | ||
1661 | * LOCKING: | ||
1662 | * Obtains host lock during operation. | ||
1663 | * | ||
1664 | * RETURNS: | ||
1665 | * IRQ_NONE or IRQ_HANDLED. | ||
1666 | */ | ||
1667 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | ||
1668 | { | ||
1669 | return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); | ||
1670 | } | ||
1843 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); | 1671 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); |
1844 | 1672 | ||
1845 | /** | 1673 | /** |
@@ -1862,11 +1690,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap) | |||
1862 | 1690 | ||
1863 | /* Only one outstanding command per SFF channel */ | 1691 | /* Only one outstanding command per SFF channel */ |
1864 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 1692 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
1865 | /* Check we have a live one.. */ | 1693 | /* We cannot lose an interrupt on a non-existent or polled command */ |
1866 | if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE)) | 1694 | if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) |
1867 | return; | ||
1868 | /* We cannot lose an interrupt on a polled command */ | ||
1869 | if (qc->tf.flags & ATA_TFLAG_POLLING) | ||
1870 | return; | 1695 | return; |
1871 | /* See if the controller thinks it is still busy - if so the command | 1696 | /* See if the controller thinks it is still busy - if so the command |
1872 | isn't a lost IRQ but is still in progress */ | 1697 | isn't a lost IRQ but is still in progress */ |
@@ -1880,7 +1705,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) | |||
1880 | status); | 1705 | status); |
1881 | /* Run the host interrupt logic as if the interrupt had not been | 1706 | /* Run the host interrupt logic as if the interrupt had not been |
1882 | lost */ | 1707 | lost */ |
1883 | ata_sff_host_intr(ap, qc); | 1708 | ata_sff_port_intr(ap, qc); |
1884 | } | 1709 | } |
1885 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); | 1710 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); |
1886 | 1711 | ||
@@ -1888,20 +1713,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); | |||
1888 | * ata_sff_freeze - Freeze SFF controller port | 1713 | * ata_sff_freeze - Freeze SFF controller port |
1889 | * @ap: port to freeze | 1714 | * @ap: port to freeze |
1890 | * | 1715 | * |
1891 | * Freeze BMDMA controller port. | 1716 | * Freeze SFF controller port. |
1892 | * | 1717 | * |
1893 | * LOCKING: | 1718 | * LOCKING: |
1894 | * Inherited from caller. | 1719 | * Inherited from caller. |
1895 | */ | 1720 | */ |
1896 | void ata_sff_freeze(struct ata_port *ap) | 1721 | void ata_sff_freeze(struct ata_port *ap) |
1897 | { | 1722 | { |
1898 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
1899 | |||
1900 | ap->ctl |= ATA_NIEN; | 1723 | ap->ctl |= ATA_NIEN; |
1901 | ap->last_ctl = ap->ctl; | 1724 | ap->last_ctl = ap->ctl; |
1902 | 1725 | ||
1903 | if (ioaddr->ctl_addr) | 1726 | if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) |
1904 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 1727 | ata_sff_set_devctl(ap, ap->ctl); |
1905 | 1728 | ||
1906 | /* Under certain circumstances, some controllers raise IRQ on | 1729 | /* Under certain circumstances, some controllers raise IRQ on |
1907 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 1730 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
@@ -1909,7 +1732,8 @@ void ata_sff_freeze(struct ata_port *ap) | |||
1909 | */ | 1732 | */ |
1910 | ap->ops->sff_check_status(ap); | 1733 | ap->ops->sff_check_status(ap); |
1911 | 1734 | ||
1912 | ap->ops->sff_irq_clear(ap); | 1735 | if (ap->ops->sff_irq_clear) |
1736 | ap->ops->sff_irq_clear(ap); | ||
1913 | } | 1737 | } |
1914 | EXPORT_SYMBOL_GPL(ata_sff_freeze); | 1738 | EXPORT_SYMBOL_GPL(ata_sff_freeze); |
1915 | 1739 | ||
@@ -1926,8 +1750,9 @@ void ata_sff_thaw(struct ata_port *ap) | |||
1926 | { | 1750 | { |
1927 | /* clear & re-enable interrupts */ | 1751 | /* clear & re-enable interrupts */ |
1928 | ap->ops->sff_check_status(ap); | 1752 | ap->ops->sff_check_status(ap); |
1929 | ap->ops->sff_irq_clear(ap); | 1753 | if (ap->ops->sff_irq_clear) |
1930 | ap->ops->sff_irq_on(ap); | 1754 | ap->ops->sff_irq_clear(ap); |
1755 | ata_sff_irq_on(ap); | ||
1931 | } | 1756 | } |
1932 | EXPORT_SYMBOL_GPL(ata_sff_thaw); | 1757 | EXPORT_SYMBOL_GPL(ata_sff_thaw); |
1933 | 1758 | ||
@@ -2301,8 +2126,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) | |||
2301 | } | 2126 | } |
2302 | 2127 | ||
2303 | /* set up device control */ | 2128 | /* set up device control */ |
2304 | if (ap->ioaddr.ctl_addr) { | 2129 | if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { |
2305 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); | 2130 | ata_sff_set_devctl(ap, ap->ctl); |
2306 | ap->last_ctl = ap->ctl; | 2131 | ap->last_ctl = ap->ctl; |
2307 | } | 2132 | } |
2308 | } | 2133 | } |
@@ -2342,7 +2167,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc) | |||
2342 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); | 2167 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); |
2343 | 2168 | ||
2344 | /** | 2169 | /** |
2345 | * ata_sff_error_handler - Stock error handler for BMDMA controller | 2170 | * ata_sff_error_handler - Stock error handler for SFF controller |
2346 | * @ap: port to handle error for | 2171 | * @ap: port to handle error for |
2347 | * | 2172 | * |
2348 | * Stock error handler for SFF controller. It can handle both | 2173 | * Stock error handler for SFF controller. It can handle both |
@@ -2359,62 +2184,32 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2359 | ata_reset_fn_t hardreset = ap->ops->hardreset; | 2184 | ata_reset_fn_t hardreset = ap->ops->hardreset; |
2360 | struct ata_queued_cmd *qc; | 2185 | struct ata_queued_cmd *qc; |
2361 | unsigned long flags; | 2186 | unsigned long flags; |
2362 | int thaw = 0; | ||
2363 | 2187 | ||
2364 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); | 2188 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); |
2365 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | 2189 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) |
2366 | qc = NULL; | 2190 | qc = NULL; |
2367 | 2191 | ||
2368 | /* reset PIO HSM and stop DMA engine */ | ||
2369 | spin_lock_irqsave(ap->lock, flags); | 2192 | spin_lock_irqsave(ap->lock, flags); |
2370 | 2193 | ||
2371 | ap->hsm_task_state = HSM_ST_IDLE; | 2194 | /* |
2372 | 2195 | * We *MUST* do FIFO draining before we issue a reset as | |
2373 | if (ap->ioaddr.bmdma_addr && | 2196 | * several devices helpfully clear their internal state and |
2374 | qc && (qc->tf.protocol == ATA_PROT_DMA || | 2197 | * will lock solid if we touch the data port post reset. Pass |
2375 | qc->tf.protocol == ATAPI_PROT_DMA)) { | 2198 | * qc in case anyone wants to do different PIO/DMA recovery or |
2376 | u8 host_stat; | 2199 | * has per command fixups |
2377 | |||
2378 | host_stat = ap->ops->bmdma_status(ap); | ||
2379 | |||
2380 | /* BMDMA controllers indicate host bus error by | ||
2381 | * setting DMA_ERR bit and timing out. As it wasn't | ||
2382 | * really a timeout event, adjust error mask and | ||
2383 | * cancel frozen state. | ||
2384 | */ | ||
2385 | if (qc->err_mask == AC_ERR_TIMEOUT | ||
2386 | && (host_stat & ATA_DMA_ERR)) { | ||
2387 | qc->err_mask = AC_ERR_HOST_BUS; | ||
2388 | thaw = 1; | ||
2389 | } | ||
2390 | |||
2391 | ap->ops->bmdma_stop(qc); | ||
2392 | } | ||
2393 | |||
2394 | ata_sff_sync(ap); /* FIXME: We don't need this */ | ||
2395 | ap->ops->sff_check_status(ap); | ||
2396 | ap->ops->sff_irq_clear(ap); | ||
2397 | /* We *MUST* do FIFO draining before we issue a reset as several | ||
2398 | * devices helpfully clear their internal state and will lock solid | ||
2399 | * if we touch the data port post reset. Pass qc in case anyone wants | ||
2400 | * to do different PIO/DMA recovery or has per command fixups | ||
2401 | */ | 2200 | */ |
2402 | if (ap->ops->drain_fifo) | 2201 | if (ap->ops->sff_drain_fifo) |
2403 | ap->ops->drain_fifo(qc); | 2202 | ap->ops->sff_drain_fifo(qc); |
2404 | 2203 | ||
2405 | spin_unlock_irqrestore(ap->lock, flags); | 2204 | spin_unlock_irqrestore(ap->lock, flags); |
2406 | 2205 | ||
2407 | if (thaw) | 2206 | /* ignore ata_sff_softreset if ctl isn't accessible */ |
2408 | ata_eh_thaw_port(ap); | ||
2409 | |||
2410 | /* PIO and DMA engines have been stopped, perform recovery */ | ||
2411 | |||
2412 | /* Ignore ata_sff_softreset if ctl isn't accessible and | ||
2413 | * built-in hardresets if SCR access isn't available. | ||
2414 | */ | ||
2415 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) | 2207 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) |
2416 | softreset = NULL; | 2208 | softreset = NULL; |
2417 | if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) | 2209 | |
2210 | /* ignore built-in hardresets if SCR access is not available */ | ||
2211 | if ((hardreset == sata_std_hardreset || | ||
2212 | hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) | ||
2418 | hardreset = NULL; | 2213 | hardreset = NULL; |
2419 | 2214 | ||
2420 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, | 2215 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, |
@@ -2423,73 +2218,6 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2423 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); | 2218 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); |
2424 | 2219 | ||
2425 | /** | 2220 | /** |
2426 | * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller | ||
2427 | * @qc: internal command to clean up | ||
2428 | * | ||
2429 | * LOCKING: | ||
2430 | * Kernel thread context (may sleep) | ||
2431 | */ | ||
2432 | void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) | ||
2433 | { | ||
2434 | struct ata_port *ap = qc->ap; | ||
2435 | unsigned long flags; | ||
2436 | |||
2437 | spin_lock_irqsave(ap->lock, flags); | ||
2438 | |||
2439 | ap->hsm_task_state = HSM_ST_IDLE; | ||
2440 | |||
2441 | if (ap->ioaddr.bmdma_addr) | ||
2442 | ap->ops->bmdma_stop(qc); | ||
2443 | |||
2444 | spin_unlock_irqrestore(ap->lock, flags); | ||
2445 | } | ||
2446 | EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); | ||
2447 | |||
2448 | /** | ||
2449 | * ata_sff_port_start - Set port up for dma. | ||
2450 | * @ap: Port to initialize | ||
2451 | * | ||
2452 | * Called just after data structures for each port are | ||
2453 | * initialized. Allocates space for PRD table if the device | ||
2454 | * is DMA capable SFF. | ||
2455 | * | ||
2456 | * May be used as the port_start() entry in ata_port_operations. | ||
2457 | * | ||
2458 | * LOCKING: | ||
2459 | * Inherited from caller. | ||
2460 | */ | ||
2461 | int ata_sff_port_start(struct ata_port *ap) | ||
2462 | { | ||
2463 | if (ap->ioaddr.bmdma_addr) | ||
2464 | return ata_port_start(ap); | ||
2465 | return 0; | ||
2466 | } | ||
2467 | EXPORT_SYMBOL_GPL(ata_sff_port_start); | ||
2468 | |||
2469 | /** | ||
2470 | * ata_sff_port_start32 - Set port up for dma. | ||
2471 | * @ap: Port to initialize | ||
2472 | * | ||
2473 | * Called just after data structures for each port are | ||
2474 | * initialized. Allocates space for PRD table if the device | ||
2475 | * is DMA capable SFF. | ||
2476 | * | ||
2477 | * May be used as the port_start() entry in ata_port_operations for | ||
2478 | * devices that are capable of 32bit PIO. | ||
2479 | * | ||
2480 | * LOCKING: | ||
2481 | * Inherited from caller. | ||
2482 | */ | ||
2483 | int ata_sff_port_start32(struct ata_port *ap) | ||
2484 | { | ||
2485 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | ||
2486 | if (ap->ioaddr.bmdma_addr) | ||
2487 | return ata_port_start(ap); | ||
2488 | return 0; | ||
2489 | } | ||
2490 | EXPORT_SYMBOL_GPL(ata_sff_port_start32); | ||
2491 | |||
2492 | /** | ||
2493 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. | 2221 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
2494 | * @ioaddr: IO address structure to be initialized | 2222 | * @ioaddr: IO address structure to be initialized |
2495 | * | 2223 | * |
@@ -2515,302 +2243,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) | |||
2515 | } | 2243 | } |
2516 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); | 2244 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); |
2517 | 2245 | ||
2518 | unsigned long ata_bmdma_mode_filter(struct ata_device *adev, | ||
2519 | unsigned long xfer_mask) | ||
2520 | { | ||
2521 | /* Filter out DMA modes if the device has been configured by | ||
2522 | the BIOS as PIO only */ | ||
2523 | |||
2524 | if (adev->link->ap->ioaddr.bmdma_addr == NULL) | ||
2525 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | ||
2526 | return xfer_mask; | ||
2527 | } | ||
2528 | EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); | ||
2529 | |||
2530 | /** | ||
2531 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
2532 | * @qc: Info associated with this ATA transaction. | ||
2533 | * | ||
2534 | * LOCKING: | ||
2535 | * spin_lock_irqsave(host lock) | ||
2536 | */ | ||
2537 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
2538 | { | ||
2539 | struct ata_port *ap = qc->ap; | ||
2540 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
2541 | u8 dmactl; | ||
2542 | |||
2543 | /* load PRD table addr. */ | ||
2544 | mb(); /* make sure PRD table writes are visible to controller */ | ||
2545 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
2546 | |||
2547 | /* specify data direction, triple-check start bit is clear */ | ||
2548 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2549 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
2550 | if (!rw) | ||
2551 | dmactl |= ATA_DMA_WR; | ||
2552 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2553 | |||
2554 | /* issue r/w command */ | ||
2555 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
2556 | } | ||
2557 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2558 | |||
2559 | /** | ||
2560 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
2561 | * @qc: Info associated with this ATA transaction. | ||
2562 | * | ||
2563 | * LOCKING: | ||
2564 | * spin_lock_irqsave(host lock) | ||
2565 | */ | ||
2566 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
2567 | { | ||
2568 | struct ata_port *ap = qc->ap; | ||
2569 | u8 dmactl; | ||
2570 | |||
2571 | /* start host DMA transaction */ | ||
2572 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2573 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2574 | |||
2575 | /* Strictly, one may wish to issue an ioread8() here, to | ||
2576 | * flush the mmio write. However, control also passes | ||
2577 | * to the hardware at this point, and it will interrupt | ||
2578 | * us when we are to resume control. So, in effect, | ||
2579 | * we don't care when the mmio write flushes. | ||
2580 | * Further, a read of the DMA status register _immediately_ | ||
2581 | * following the write may not be what certain flaky hardware | ||
2582 | * is expected, so I think it is best to not add a readb() | ||
2583 | * without first all the MMIO ATA cards/mobos. | ||
2584 | * Or maybe I'm just being paranoid. | ||
2585 | * | ||
2586 | * FIXME: The posting of this write means I/O starts are | ||
2587 | * unneccessarily delayed for MMIO | ||
2588 | */ | ||
2589 | } | ||
2590 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
2591 | |||
2592 | /** | ||
2593 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
2594 | * @qc: Command we are ending DMA for | ||
2595 | * | ||
2596 | * Clears the ATA_DMA_START flag in the dma control register | ||
2597 | * | ||
2598 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
2599 | * | ||
2600 | * LOCKING: | ||
2601 | * spin_lock_irqsave(host lock) | ||
2602 | */ | ||
2603 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
2604 | { | ||
2605 | struct ata_port *ap = qc->ap; | ||
2606 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
2607 | |||
2608 | /* clear start/stop bit */ | ||
2609 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
2610 | mmio + ATA_DMA_CMD); | ||
2611 | |||
2612 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
2613 | ata_sff_dma_pause(ap); | ||
2614 | } | ||
2615 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
2616 | |||
2617 | /** | ||
2618 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
2619 | * @ap: Port associated with this ATA transaction. | ||
2620 | * | ||
2621 | * Read and return BMDMA status register. | ||
2622 | * | ||
2623 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
2624 | * | ||
2625 | * LOCKING: | ||
2626 | * spin_lock_irqsave(host lock) | ||
2627 | */ | ||
2628 | u8 ata_bmdma_status(struct ata_port *ap) | ||
2629 | { | ||
2630 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
2631 | } | ||
2632 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
2633 | |||
2634 | /** | ||
2635 | * ata_bus_reset - reset host port and associated ATA channel | ||
2636 | * @ap: port to reset | ||
2637 | * | ||
2638 | * This is typically the first time we actually start issuing | ||
2639 | * commands to the ATA channel. We wait for BSY to clear, then | ||
2640 | * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its | ||
2641 | * result. Determine what devices, if any, are on the channel | ||
2642 | * by looking at the device 0/1 error register. Look at the signature | ||
2643 | * stored in each device's taskfile registers, to determine if | ||
2644 | * the device is ATA or ATAPI. | ||
2645 | * | ||
2646 | * LOCKING: | ||
2647 | * PCI/etc. bus probe sem. | ||
2648 | * Obtains host lock. | ||
2649 | * | ||
2650 | * SIDE EFFECTS: | ||
2651 | * Sets ATA_FLAG_DISABLED if bus reset fails. | ||
2652 | * | ||
2653 | * DEPRECATED: | ||
2654 | * This function is only for drivers which still use old EH and | ||
2655 | * will be removed soon. | ||
2656 | */ | ||
2657 | void ata_bus_reset(struct ata_port *ap) | ||
2658 | { | ||
2659 | struct ata_device *device = ap->link.device; | ||
2660 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
2661 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
2662 | u8 err; | ||
2663 | unsigned int dev0, dev1 = 0, devmask = 0; | ||
2664 | int rc; | ||
2665 | |||
2666 | DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); | ||
2667 | |||
2668 | /* determine if device 0/1 are present */ | ||
2669 | if (ap->flags & ATA_FLAG_SATA_RESET) | ||
2670 | dev0 = 1; | ||
2671 | else { | ||
2672 | dev0 = ata_devchk(ap, 0); | ||
2673 | if (slave_possible) | ||
2674 | dev1 = ata_devchk(ap, 1); | ||
2675 | } | ||
2676 | |||
2677 | if (dev0) | ||
2678 | devmask |= (1 << 0); | ||
2679 | if (dev1) | ||
2680 | devmask |= (1 << 1); | ||
2681 | |||
2682 | /* select device 0 again */ | ||
2683 | ap->ops->sff_dev_select(ap, 0); | ||
2684 | |||
2685 | /* issue bus reset */ | ||
2686 | if (ap->flags & ATA_FLAG_SRST) { | ||
2687 | rc = ata_bus_softreset(ap, devmask, | ||
2688 | ata_deadline(jiffies, 40000)); | ||
2689 | if (rc && rc != -ENODEV) | ||
2690 | goto err_out; | ||
2691 | } | ||
2692 | |||
2693 | /* | ||
2694 | * determine by signature whether we have ATA or ATAPI devices | ||
2695 | */ | ||
2696 | device[0].class = ata_sff_dev_classify(&device[0], dev0, &err); | ||
2697 | if ((slave_possible) && (err != 0x81)) | ||
2698 | device[1].class = ata_sff_dev_classify(&device[1], dev1, &err); | ||
2699 | |||
2700 | /* is double-select really necessary? */ | ||
2701 | if (device[1].class != ATA_DEV_NONE) | ||
2702 | ap->ops->sff_dev_select(ap, 1); | ||
2703 | if (device[0].class != ATA_DEV_NONE) | ||
2704 | ap->ops->sff_dev_select(ap, 0); | ||
2705 | |||
2706 | /* if no devices were detected, disable this port */ | ||
2707 | if ((device[0].class == ATA_DEV_NONE) && | ||
2708 | (device[1].class == ATA_DEV_NONE)) | ||
2709 | goto err_out; | ||
2710 | |||
2711 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { | ||
2712 | /* set up device control for ATA_FLAG_SATA_RESET */ | ||
2713 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
2714 | ap->last_ctl = ap->ctl; | ||
2715 | } | ||
2716 | |||
2717 | DPRINTK("EXIT\n"); | ||
2718 | return; | ||
2719 | |||
2720 | err_out: | ||
2721 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); | ||
2722 | ata_port_disable(ap); | ||
2723 | |||
2724 | DPRINTK("EXIT\n"); | ||
2725 | } | ||
2726 | EXPORT_SYMBOL_GPL(ata_bus_reset); | ||
2727 | |||
2728 | #ifdef CONFIG_PCI | 2246 | #ifdef CONFIG_PCI |
2729 | 2247 | ||
2730 | /** | ||
2731 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex | ||
2732 | * @pdev: PCI device | ||
2733 | * | ||
2734 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
2735 | * enter non simplex mode. This implements the necessary logic to | ||
2736 | * perform the task on such devices. Calling it on other devices will | ||
2737 | * have -undefined- behaviour. | ||
2738 | */ | ||
2739 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | ||
2740 | { | ||
2741 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
2742 | u8 simplex; | ||
2743 | |||
2744 | if (bmdma == 0) | ||
2745 | return -ENOENT; | ||
2746 | |||
2747 | simplex = inb(bmdma + 0x02); | ||
2748 | outb(simplex & 0x60, bmdma + 0x02); | ||
2749 | simplex = inb(bmdma + 0x02); | ||
2750 | if (simplex & 0x80) | ||
2751 | return -EOPNOTSUPP; | ||
2752 | return 0; | ||
2753 | } | ||
2754 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
2755 | |||
2756 | /** | ||
2757 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | ||
2758 | * @host: target ATA host | ||
2759 | * | ||
2760 | * Acquire PCI BMDMA resources and initialize @host accordingly. | ||
2761 | * | ||
2762 | * LOCKING: | ||
2763 | * Inherited from calling layer (may sleep). | ||
2764 | * | ||
2765 | * RETURNS: | ||
2766 | * 0 on success, -errno otherwise. | ||
2767 | */ | ||
2768 | int ata_pci_bmdma_init(struct ata_host *host) | ||
2769 | { | ||
2770 | struct device *gdev = host->dev; | ||
2771 | struct pci_dev *pdev = to_pci_dev(gdev); | ||
2772 | int i, rc; | ||
2773 | |||
2774 | /* No BAR4 allocation: No DMA */ | ||
2775 | if (pci_resource_start(pdev, 4) == 0) | ||
2776 | return 0; | ||
2777 | |||
2778 | /* TODO: If we get no DMA mask we should fall back to PIO */ | ||
2779 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
2780 | if (rc) | ||
2781 | return rc; | ||
2782 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
2783 | if (rc) | ||
2784 | return rc; | ||
2785 | |||
2786 | /* request and iomap DMA region */ | ||
2787 | rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); | ||
2788 | if (rc) { | ||
2789 | dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); | ||
2790 | return -ENOMEM; | ||
2791 | } | ||
2792 | host->iomap = pcim_iomap_table(pdev); | ||
2793 | |||
2794 | for (i = 0; i < 2; i++) { | ||
2795 | struct ata_port *ap = host->ports[i]; | ||
2796 | void __iomem *bmdma = host->iomap[4] + 8 * i; | ||
2797 | |||
2798 | if (ata_port_is_dummy(ap)) | ||
2799 | continue; | ||
2800 | |||
2801 | ap->ioaddr.bmdma_addr = bmdma; | ||
2802 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | ||
2803 | (ioread8(bmdma + 2) & 0x80)) | ||
2804 | host->flags |= ATA_HOST_SIMPLEX; | ||
2805 | |||
2806 | ata_port_desc(ap, "bmdma 0x%llx", | ||
2807 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | ||
2808 | } | ||
2809 | |||
2810 | return 0; | ||
2811 | } | ||
2812 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
2813 | |||
2814 | static int ata_resources_present(struct pci_dev *pdev, int port) | 2248 | static int ata_resources_present(struct pci_dev *pdev, int port) |
2815 | { | 2249 | { |
2816 | int i; | 2250 | int i; |
@@ -2905,13 +2339,13 @@ int ata_pci_sff_init_host(struct ata_host *host) | |||
2905 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); | 2339 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); |
2906 | 2340 | ||
2907 | /** | 2341 | /** |
2908 | * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host | 2342 | * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host |
2909 | * @pdev: target PCI device | 2343 | * @pdev: target PCI device |
2910 | * @ppi: array of port_info, must be enough for two ports | 2344 | * @ppi: array of port_info, must be enough for two ports |
2911 | * @r_host: out argument for the initialized ATA host | 2345 | * @r_host: out argument for the initialized ATA host |
2912 | * | 2346 | * |
2913 | * Helper to allocate ATA host for @pdev, acquire all native PCI | 2347 | * Helper to allocate PIO-only SFF ATA host for @pdev, acquire |
2914 | * resources and initialize it accordingly in one go. | 2348 | * all PCI resources and initialize it accordingly in one go. |
2915 | * | 2349 | * |
2916 | * LOCKING: | 2350 | * LOCKING: |
2917 | * Inherited from calling layer (may sleep). | 2351 | * Inherited from calling layer (may sleep). |
@@ -2941,22 +2375,10 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, | |||
2941 | if (rc) | 2375 | if (rc) |
2942 | goto err_out; | 2376 | goto err_out; |
2943 | 2377 | ||
2944 | /* init DMA related stuff */ | ||
2945 | rc = ata_pci_bmdma_init(host); | ||
2946 | if (rc) | ||
2947 | goto err_bmdma; | ||
2948 | |||
2949 | devres_remove_group(&pdev->dev, NULL); | 2378 | devres_remove_group(&pdev->dev, NULL); |
2950 | *r_host = host; | 2379 | *r_host = host; |
2951 | return 0; | 2380 | return 0; |
2952 | 2381 | ||
2953 | err_bmdma: | ||
2954 | /* This is necessary because PCI and iomap resources are | ||
2955 | * merged and releasing the top group won't release the | ||
2956 | * acquired resources if some of those have been acquired | ||
2957 | * before entering this function. | ||
2958 | */ | ||
2959 | pcim_iounmap_regions(pdev, 0xf); | ||
2960 | err_out: | 2382 | err_out: |
2961 | devres_release_group(&pdev->dev, NULL); | 2383 | devres_release_group(&pdev->dev, NULL); |
2962 | return rc; | 2384 | return rc; |
@@ -3057,8 +2479,21 @@ out: | |||
3057 | } | 2479 | } |
3058 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | 2480 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); |
3059 | 2481 | ||
2482 | static const struct ata_port_info *ata_sff_find_valid_pi( | ||
2483 | const struct ata_port_info * const *ppi) | ||
2484 | { | ||
2485 | int i; | ||
2486 | |||
2487 | /* look up the first valid port_info */ | ||
2488 | for (i = 0; i < 2 && ppi[i]; i++) | ||
2489 | if (ppi[i]->port_ops != &ata_dummy_port_ops) | ||
2490 | return ppi[i]; | ||
2491 | |||
2492 | return NULL; | ||
2493 | } | ||
2494 | |||
3060 | /** | 2495 | /** |
3061 | * ata_pci_sff_init_one - Initialize/register PCI IDE host controller | 2496 | * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller |
3062 | * @pdev: Controller to be initialized | 2497 | * @pdev: Controller to be initialized |
3063 | * @ppi: array of port_info, must be enough for two ports | 2498 | * @ppi: array of port_info, must be enough for two ports |
3064 | * @sht: scsi_host_template to use when registering the host | 2499 | * @sht: scsi_host_template to use when registering the host |
@@ -3067,11 +2502,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); | |||
3067 | * | 2502 | * |
3068 | * This is a helper function which can be called from a driver's | 2503 | * This is a helper function which can be called from a driver's |
3069 | * xxx_init_one() probe function if the hardware uses traditional | 2504 | * xxx_init_one() probe function if the hardware uses traditional |
3070 | * IDE taskfile registers. | 2505 | * IDE taskfile registers and is PIO only. |
3071 | * | ||
3072 | * This function calls pci_enable_device(), reserves its register | ||
3073 | * regions, sets the dma mask, enables bus master mode, and calls | ||
3074 | * ata_device_add() | ||
3075 | * | 2506 | * |
3076 | * ASSUMPTION: | 2507 | * ASSUMPTION: |
3077 | * Nobody makes a single channel controller that appears solely as | 2508 | * Nobody makes a single channel controller that appears solely as |
@@ -3088,20 +2519,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
3088 | struct scsi_host_template *sht, void *host_priv, int hflag) | 2519 | struct scsi_host_template *sht, void *host_priv, int hflag) |
3089 | { | 2520 | { |
3090 | struct device *dev = &pdev->dev; | 2521 | struct device *dev = &pdev->dev; |
3091 | const struct ata_port_info *pi = NULL; | 2522 | const struct ata_port_info *pi; |
3092 | struct ata_host *host = NULL; | 2523 | struct ata_host *host = NULL; |
3093 | int i, rc; | 2524 | int rc; |
3094 | 2525 | ||
3095 | DPRINTK("ENTER\n"); | 2526 | DPRINTK("ENTER\n"); |
3096 | 2527 | ||
3097 | /* look up the first valid port_info */ | 2528 | pi = ata_sff_find_valid_pi(ppi); |
3098 | for (i = 0; i < 2 && ppi[i]; i++) { | ||
3099 | if (ppi[i]->port_ops != &ata_dummy_port_ops) { | ||
3100 | pi = ppi[i]; | ||
3101 | break; | ||
3102 | } | ||
3103 | } | ||
3104 | |||
3105 | if (!pi) { | 2529 | if (!pi) { |
3106 | dev_printk(KERN_ERR, &pdev->dev, | 2530 | dev_printk(KERN_ERR, &pdev->dev, |
3107 | "no valid port_info specified\n"); | 2531 | "no valid port_info specified\n"); |
@@ -3122,7 +2546,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, | |||
3122 | host->private_data = host_priv; | 2546 | host->private_data = host_priv; |
3123 | host->flags |= hflag; | 2547 | host->flags |= hflag; |
3124 | 2548 | ||
3125 | pci_set_master(pdev); | ||
3126 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); | 2549 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
3127 | out: | 2550 | out: |
3128 | if (rc == 0) | 2551 | if (rc == 0) |
@@ -3135,3 +2558,791 @@ out: | |||
3135 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | 2558 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); |
3136 | 2559 | ||
3137 | #endif /* CONFIG_PCI */ | 2560 | #endif /* CONFIG_PCI */ |
2561 | |||
2562 | /* | ||
2563 | * BMDMA support | ||
2564 | */ | ||
2565 | |||
2566 | #ifdef CONFIG_ATA_BMDMA | ||
2567 | |||
2568 | const struct ata_port_operations ata_bmdma_port_ops = { | ||
2569 | .inherits = &ata_sff_port_ops, | ||
2570 | |||
2571 | .error_handler = ata_bmdma_error_handler, | ||
2572 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
2573 | |||
2574 | .qc_prep = ata_bmdma_qc_prep, | ||
2575 | .qc_issue = ata_bmdma_qc_issue, | ||
2576 | |||
2577 | .sff_irq_clear = ata_bmdma_irq_clear, | ||
2578 | .bmdma_setup = ata_bmdma_setup, | ||
2579 | .bmdma_start = ata_bmdma_start, | ||
2580 | .bmdma_stop = ata_bmdma_stop, | ||
2581 | .bmdma_status = ata_bmdma_status, | ||
2582 | |||
2583 | .port_start = ata_bmdma_port_start, | ||
2584 | }; | ||
2585 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | ||
2586 | |||
2587 | const struct ata_port_operations ata_bmdma32_port_ops = { | ||
2588 | .inherits = &ata_bmdma_port_ops, | ||
2589 | |||
2590 | .sff_data_xfer = ata_sff_data_xfer32, | ||
2591 | .port_start = ata_bmdma_port_start32, | ||
2592 | }; | ||
2593 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | ||
2594 | |||
2595 | /** | ||
2596 | * ata_bmdma_fill_sg - Fill PCI IDE PRD table | ||
2597 | * @qc: Metadata associated with taskfile to be transferred | ||
2598 | * | ||
2599 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
2600 | * associated with the current disk command. | ||
2601 | * | ||
2602 | * LOCKING: | ||
2603 | * spin_lock_irqsave(host lock) | ||
2604 | * | ||
2605 | */ | ||
2606 | static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) | ||
2607 | { | ||
2608 | struct ata_port *ap = qc->ap; | ||
2609 | struct ata_bmdma_prd *prd = ap->bmdma_prd; | ||
2610 | struct scatterlist *sg; | ||
2611 | unsigned int si, pi; | ||
2612 | |||
2613 | pi = 0; | ||
2614 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
2615 | u32 addr, offset; | ||
2616 | u32 sg_len, len; | ||
2617 | |||
2618 | /* determine if physical DMA addr spans 64K boundary. | ||
2619 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
2620 | * truncate dma_addr_t to u32. | ||
2621 | */ | ||
2622 | addr = (u32) sg_dma_address(sg); | ||
2623 | sg_len = sg_dma_len(sg); | ||
2624 | |||
2625 | while (sg_len) { | ||
2626 | offset = addr & 0xffff; | ||
2627 | len = sg_len; | ||
2628 | if ((offset + sg_len) > 0x10000) | ||
2629 | len = 0x10000 - offset; | ||
2630 | |||
2631 | prd[pi].addr = cpu_to_le32(addr); | ||
2632 | prd[pi].flags_len = cpu_to_le32(len & 0xffff); | ||
2633 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
2634 | |||
2635 | pi++; | ||
2636 | sg_len -= len; | ||
2637 | addr += len; | ||
2638 | } | ||
2639 | } | ||
2640 | |||
2641 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
2642 | } | ||
2643 | |||
2644 | /** | ||
2645 | * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table | ||
2646 | * @qc: Metadata associated with taskfile to be transferred | ||
2647 | * | ||
2648 | * Fill PCI IDE PRD (scatter-gather) table with segments | ||
2649 | * associated with the current disk command. Perform the fill | ||
2650 | * so that we avoid writing any length 64K records for | ||
2651 | * controllers that don't follow the spec. | ||
2652 | * | ||
2653 | * LOCKING: | ||
2654 | * spin_lock_irqsave(host lock) | ||
2655 | * | ||
2656 | */ | ||
2657 | static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) | ||
2658 | { | ||
2659 | struct ata_port *ap = qc->ap; | ||
2660 | struct ata_bmdma_prd *prd = ap->bmdma_prd; | ||
2661 | struct scatterlist *sg; | ||
2662 | unsigned int si, pi; | ||
2663 | |||
2664 | pi = 0; | ||
2665 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
2666 | u32 addr, offset; | ||
2667 | u32 sg_len, len, blen; | ||
2668 | |||
2669 | /* determine if physical DMA addr spans 64K boundary. | ||
2670 | * Note h/w doesn't support 64-bit, so we unconditionally | ||
2671 | * truncate dma_addr_t to u32. | ||
2672 | */ | ||
2673 | addr = (u32) sg_dma_address(sg); | ||
2674 | sg_len = sg_dma_len(sg); | ||
2675 | |||
2676 | while (sg_len) { | ||
2677 | offset = addr & 0xffff; | ||
2678 | len = sg_len; | ||
2679 | if ((offset + sg_len) > 0x10000) | ||
2680 | len = 0x10000 - offset; | ||
2681 | |||
2682 | blen = len & 0xffff; | ||
2683 | prd[pi].addr = cpu_to_le32(addr); | ||
2684 | if (blen == 0) { | ||
2685 | /* Some PATA chipsets like the CS5530 can't | ||
2686 | cope with 0x0000 meaning 64K as the spec | ||
2687 | says */ | ||
2688 | prd[pi].flags_len = cpu_to_le32(0x8000); | ||
2689 | blen = 0x8000; | ||
2690 | prd[++pi].addr = cpu_to_le32(addr + 0x8000); | ||
2691 | } | ||
2692 | prd[pi].flags_len = cpu_to_le32(blen); | ||
2693 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); | ||
2694 | |||
2695 | pi++; | ||
2696 | sg_len -= len; | ||
2697 | addr += len; | ||
2698 | } | ||
2699 | } | ||
2700 | |||
2701 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
2702 | } | ||
2703 | |||
2704 | /** | ||
2705 | * ata_bmdma_qc_prep - Prepare taskfile for submission | ||
2706 | * @qc: Metadata associated with taskfile to be prepared | ||
2707 | * | ||
2708 | * Prepare ATA taskfile for submission. | ||
2709 | * | ||
2710 | * LOCKING: | ||
2711 | * spin_lock_irqsave(host lock) | ||
2712 | */ | ||
2713 | void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) | ||
2714 | { | ||
2715 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
2716 | return; | ||
2717 | |||
2718 | ata_bmdma_fill_sg(qc); | ||
2719 | } | ||
2720 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); | ||
2721 | |||
2722 | /** | ||
2723 | * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission | ||
2724 | * @qc: Metadata associated with taskfile to be prepared | ||
2725 | * | ||
2726 | * Prepare ATA taskfile for submission. | ||
2727 | * | ||
2728 | * LOCKING: | ||
2729 | * spin_lock_irqsave(host lock) | ||
2730 | */ | ||
2731 | void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) | ||
2732 | { | ||
2733 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | ||
2734 | return; | ||
2735 | |||
2736 | ata_bmdma_fill_sg_dumb(qc); | ||
2737 | } | ||
2738 | EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); | ||
2739 | |||
2740 | /** | ||
2741 | * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller | ||
2742 | * @qc: command to issue to device | ||
2743 | * | ||
2744 | * This function issues a PIO, NODATA or DMA command to a | ||
2745 | * SFF/BMDMA controller. PIO and NODATA are handled by | ||
2746 | * ata_sff_qc_issue(). | ||
2747 | * | ||
2748 | * LOCKING: | ||
2749 | * spin_lock_irqsave(host lock) | ||
2750 | * | ||
2751 | * RETURNS: | ||
2752 | * Zero on success, AC_ERR_* mask on failure | ||
2753 | */ | ||
2754 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | ||
2755 | { | ||
2756 | struct ata_port *ap = qc->ap; | ||
2757 | struct ata_link *link = qc->dev->link; | ||
2758 | |||
2759 | /* defer PIO handling to sff_qc_issue */ | ||
2760 | if (!ata_is_dma(qc->tf.protocol)) | ||
2761 | return ata_sff_qc_issue(qc); | ||
2762 | |||
2763 | /* select the device */ | ||
2764 | ata_dev_select(ap, qc->dev->devno, 1, 0); | ||
2765 | |||
2766 | /* start the command */ | ||
2767 | switch (qc->tf.protocol) { | ||
2768 | case ATA_PROT_DMA: | ||
2769 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | ||
2770 | |||
2771 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
2772 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
2773 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
2774 | ap->hsm_task_state = HSM_ST_LAST; | ||
2775 | break; | ||
2776 | |||
2777 | case ATAPI_PROT_DMA: | ||
2778 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | ||
2779 | |||
2780 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | ||
2781 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | ||
2782 | ap->hsm_task_state = HSM_ST_FIRST; | ||
2783 | |||
2784 | /* send cdb by polling if no cdb interrupt */ | ||
2785 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | ||
2786 | ata_sff_queue_pio_task(link, 0); | ||
2787 | break; | ||
2788 | |||
2789 | default: | ||
2790 | WARN_ON(1); | ||
2791 | return AC_ERR_SYSTEM; | ||
2792 | } | ||
2793 | |||
2794 | return 0; | ||
2795 | } | ||
2796 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); | ||
2797 | |||
2798 | /** | ||
2799 | * ata_bmdma_port_intr - Handle BMDMA port interrupt | ||
2800 | * @ap: Port on which interrupt arrived (possibly...) | ||
2801 | * @qc: Taskfile currently active in engine | ||
2802 | * | ||
2803 | * Handle port interrupt for given queued command. | ||
2804 | * | ||
2805 | * LOCKING: | ||
2806 | * spin_lock_irqsave(host lock) | ||
2807 | * | ||
2808 | * RETURNS: | ||
2809 | * One if interrupt was handled, zero if not (shared irq). | ||
2810 | */ | ||
2811 | unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | ||
2812 | { | ||
2813 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
2814 | u8 host_stat = 0; | ||
2815 | bool bmdma_stopped = false; | ||
2816 | unsigned int handled; | ||
2817 | |||
2818 | if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { | ||
2819 | /* check status of DMA engine */ | ||
2820 | host_stat = ap->ops->bmdma_status(ap); | ||
2821 | VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); | ||
2822 | |||
2823 | /* if it's not our irq... */ | ||
2824 | if (!(host_stat & ATA_DMA_INTR)) | ||
2825 | return ata_sff_idle_irq(ap); | ||
2826 | |||
2827 | /* before we do anything else, clear DMA-Start bit */ | ||
2828 | ap->ops->bmdma_stop(qc); | ||
2829 | bmdma_stopped = true; | ||
2830 | |||
2831 | if (unlikely(host_stat & ATA_DMA_ERR)) { | ||
2832 | /* error when transfering data to/from memory */ | ||
2833 | qc->err_mask |= AC_ERR_HOST_BUS; | ||
2834 | ap->hsm_task_state = HSM_ST_ERR; | ||
2835 | } | ||
2836 | } | ||
2837 | |||
2838 | handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); | ||
2839 | |||
2840 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) | ||
2841 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | ||
2842 | |||
2843 | return handled; | ||
2844 | } | ||
2845 | EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); | ||
2846 | |||
2847 | /** | ||
2848 | * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler | ||
2849 | * @irq: irq line (unused) | ||
2850 | * @dev_instance: pointer to our ata_host information structure | ||
2851 | * | ||
2852 | * Default interrupt handler for PCI IDE devices. Calls | ||
2853 | * ata_bmdma_port_intr() for each port that is not disabled. | ||
2854 | * | ||
2855 | * LOCKING: | ||
2856 | * Obtains host lock during operation. | ||
2857 | * | ||
2858 | * RETURNS: | ||
2859 | * IRQ_NONE or IRQ_HANDLED. | ||
2860 | */ | ||
2861 | irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) | ||
2862 | { | ||
2863 | return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); | ||
2864 | } | ||
2865 | EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); | ||
2866 | |||
2867 | /** | ||
2868 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller | ||
2869 | * @ap: port to handle error for | ||
2870 | * | ||
2871 | * Stock error handler for BMDMA controller. It can handle both | ||
2872 | * PATA and SATA controllers. Most BMDMA controllers should be | ||
2873 | * able to use this EH as-is or with some added handling before | ||
2874 | * and after. | ||
2875 | * | ||
2876 | * LOCKING: | ||
2877 | * Kernel thread context (may sleep) | ||
2878 | */ | ||
2879 | void ata_bmdma_error_handler(struct ata_port *ap) | ||
2880 | { | ||
2881 | struct ata_queued_cmd *qc; | ||
2882 | unsigned long flags; | ||
2883 | bool thaw = false; | ||
2884 | |||
2885 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); | ||
2886 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | ||
2887 | qc = NULL; | ||
2888 | |||
2889 | /* reset PIO HSM and stop DMA engine */ | ||
2890 | spin_lock_irqsave(ap->lock, flags); | ||
2891 | |||
2892 | if (qc && ata_is_dma(qc->tf.protocol)) { | ||
2893 | u8 host_stat; | ||
2894 | |||
2895 | host_stat = ap->ops->bmdma_status(ap); | ||
2896 | |||
2897 | /* BMDMA controllers indicate host bus error by | ||
2898 | * setting DMA_ERR bit and timing out. As it wasn't | ||
2899 | * really a timeout event, adjust error mask and | ||
2900 | * cancel frozen state. | ||
2901 | */ | ||
2902 | if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { | ||
2903 | qc->err_mask = AC_ERR_HOST_BUS; | ||
2904 | thaw = true; | ||
2905 | } | ||
2906 | |||
2907 | ap->ops->bmdma_stop(qc); | ||
2908 | |||
2909 | /* if we're gonna thaw, make sure IRQ is clear */ | ||
2910 | if (thaw) { | ||
2911 | ap->ops->sff_check_status(ap); | ||
2912 | if (ap->ops->sff_irq_clear) | ||
2913 | ap->ops->sff_irq_clear(ap); | ||
2914 | } | ||
2915 | } | ||
2916 | |||
2917 | spin_unlock_irqrestore(ap->lock, flags); | ||
2918 | |||
2919 | if (thaw) | ||
2920 | ata_eh_thaw_port(ap); | ||
2921 | |||
2922 | ata_sff_error_handler(ap); | ||
2923 | } | ||
2924 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); | ||
2925 | |||
2926 | /** | ||
2927 | * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA | ||
2928 | * @qc: internal command to clean up | ||
2929 | * | ||
2930 | * LOCKING: | ||
2931 | * Kernel thread context (may sleep) | ||
2932 | */ | ||
2933 | void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | ||
2934 | { | ||
2935 | struct ata_port *ap = qc->ap; | ||
2936 | unsigned long flags; | ||
2937 | |||
2938 | if (ata_is_dma(qc->tf.protocol)) { | ||
2939 | spin_lock_irqsave(ap->lock, flags); | ||
2940 | ap->ops->bmdma_stop(qc); | ||
2941 | spin_unlock_irqrestore(ap->lock, flags); | ||
2942 | } | ||
2943 | } | ||
2944 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | ||
2945 | |||
2946 | /** | ||
2947 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
2948 | * @ap: Port associated with this ATA transaction. | ||
2949 | * | ||
2950 | * Clear interrupt and error flags in DMA status register. | ||
2951 | * | ||
2952 | * May be used as the irq_clear() entry in ata_port_operations. | ||
2953 | * | ||
2954 | * LOCKING: | ||
2955 | * spin_lock_irqsave(host lock) | ||
2956 | */ | ||
2957 | void ata_bmdma_irq_clear(struct ata_port *ap) | ||
2958 | { | ||
2959 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
2960 | |||
2961 | if (!mmio) | ||
2962 | return; | ||
2963 | |||
2964 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | ||
2965 | } | ||
2966 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); | ||
2967 | |||
2968 | /** | ||
2969 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
2970 | * @qc: Info associated with this ATA transaction. | ||
2971 | * | ||
2972 | * LOCKING: | ||
2973 | * spin_lock_irqsave(host lock) | ||
2974 | */ | ||
2975 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
2976 | { | ||
2977 | struct ata_port *ap = qc->ap; | ||
2978 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
2979 | u8 dmactl; | ||
2980 | |||
2981 | /* load PRD table addr. */ | ||
2982 | mb(); /* make sure PRD table writes are visible to controller */ | ||
2983 | iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
2984 | |||
2985 | /* specify data direction, triple-check start bit is clear */ | ||
2986 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2987 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
2988 | if (!rw) | ||
2989 | dmactl |= ATA_DMA_WR; | ||
2990 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
2991 | |||
2992 | /* issue r/w command */ | ||
2993 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
2994 | } | ||
2995 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | ||
2996 | |||
2997 | /** | ||
2998 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
2999 | * @qc: Info associated with this ATA transaction. | ||
3000 | * | ||
3001 | * LOCKING: | ||
3002 | * spin_lock_irqsave(host lock) | ||
3003 | */ | ||
3004 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
3005 | { | ||
3006 | struct ata_port *ap = qc->ap; | ||
3007 | u8 dmactl; | ||
3008 | |||
3009 | /* start host DMA transaction */ | ||
3010 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
3011 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
3012 | |||
3013 | /* Strictly, one may wish to issue an ioread8() here, to | ||
3014 | * flush the mmio write. However, control also passes | ||
3015 | * to the hardware at this point, and it will interrupt | ||
3016 | * us when we are to resume control. So, in effect, | ||
3017 | * we don't care when the mmio write flushes. | ||
3018 | * Further, a read of the DMA status register _immediately_ | ||
3019 | * following the write may not be what certain flaky hardware | ||
3020 | * is expected, so I think it is best to not add a readb() | ||
3021 | * without first all the MMIO ATA cards/mobos. | ||
3022 | * Or maybe I'm just being paranoid. | ||
3023 | * | ||
3024 | * FIXME: The posting of this write means I/O starts are | ||
3025 | * unneccessarily delayed for MMIO | ||
3026 | */ | ||
3027 | } | ||
3028 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | ||
3029 | |||
3030 | /** | ||
3031 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
3032 | * @qc: Command we are ending DMA for | ||
3033 | * | ||
3034 | * Clears the ATA_DMA_START flag in the dma control register | ||
3035 | * | ||
3036 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
3037 | * | ||
3038 | * LOCKING: | ||
3039 | * spin_lock_irqsave(host lock) | ||
3040 | */ | ||
3041 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
3042 | { | ||
3043 | struct ata_port *ap = qc->ap; | ||
3044 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
3045 | |||
3046 | /* clear start/stop bit */ | ||
3047 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
3048 | mmio + ATA_DMA_CMD); | ||
3049 | |||
3050 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
3051 | ata_sff_dma_pause(ap); | ||
3052 | } | ||
3053 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | ||
3054 | |||
3055 | /** | ||
3056 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
3057 | * @ap: Port associated with this ATA transaction. | ||
3058 | * | ||
3059 | * Read and return BMDMA status register. | ||
3060 | * | ||
3061 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
3062 | * | ||
3063 | * LOCKING: | ||
3064 | * spin_lock_irqsave(host lock) | ||
3065 | */ | ||
3066 | u8 ata_bmdma_status(struct ata_port *ap) | ||
3067 | { | ||
3068 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
3069 | } | ||
3070 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | ||
3071 | |||
3072 | |||
3073 | /** | ||
3074 | * ata_bmdma_port_start - Set port up for bmdma. | ||
3075 | * @ap: Port to initialize | ||
3076 | * | ||
3077 | * Called just after data structures for each port are | ||
3078 | * initialized. Allocates space for PRD table. | ||
3079 | * | ||
3080 | * May be used as the port_start() entry in ata_port_operations. | ||
3081 | * | ||
3082 | * LOCKING: | ||
3083 | * Inherited from caller. | ||
3084 | */ | ||
3085 | int ata_bmdma_port_start(struct ata_port *ap) | ||
3086 | { | ||
3087 | if (ap->mwdma_mask || ap->udma_mask) { | ||
3088 | ap->bmdma_prd = | ||
3089 | dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, | ||
3090 | &ap->bmdma_prd_dma, GFP_KERNEL); | ||
3091 | if (!ap->bmdma_prd) | ||
3092 | return -ENOMEM; | ||
3093 | } | ||
3094 | |||
3095 | return 0; | ||
3096 | } | ||
3097 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start); | ||
3098 | |||
3099 | /** | ||
3100 | * ata_bmdma_port_start32 - Set port up for dma. | ||
3101 | * @ap: Port to initialize | ||
3102 | * | ||
3103 | * Called just after data structures for each port are | ||
3104 | * initialized. Enables 32bit PIO and allocates space for PRD | ||
3105 | * table. | ||
3106 | * | ||
3107 | * May be used as the port_start() entry in ata_port_operations for | ||
3108 | * devices that are capable of 32bit PIO. | ||
3109 | * | ||
3110 | * LOCKING: | ||
3111 | * Inherited from caller. | ||
3112 | */ | ||
3113 | int ata_bmdma_port_start32(struct ata_port *ap) | ||
3114 | { | ||
3115 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | ||
3116 | return ata_bmdma_port_start(ap); | ||
3117 | } | ||
3118 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); | ||
3119 | |||
3120 | #ifdef CONFIG_PCI | ||
3121 | |||
3122 | /** | ||
3123 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex | ||
3124 | * @pdev: PCI device | ||
3125 | * | ||
3126 | * Some PCI ATA devices report simplex mode but in fact can be told to | ||
3127 | * enter non simplex mode. This implements the necessary logic to | ||
3128 | * perform the task on such devices. Calling it on other devices will | ||
3129 | * have -undefined- behaviour. | ||
3130 | */ | ||
3131 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | ||
3132 | { | ||
3133 | unsigned long bmdma = pci_resource_start(pdev, 4); | ||
3134 | u8 simplex; | ||
3135 | |||
3136 | if (bmdma == 0) | ||
3137 | return -ENOENT; | ||
3138 | |||
3139 | simplex = inb(bmdma + 0x02); | ||
3140 | outb(simplex & 0x60, bmdma + 0x02); | ||
3141 | simplex = inb(bmdma + 0x02); | ||
3142 | if (simplex & 0x80) | ||
3143 | return -EOPNOTSUPP; | ||
3144 | return 0; | ||
3145 | } | ||
3146 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | ||
3147 | |||
3148 | static void ata_bmdma_nodma(struct ata_host *host, const char *reason) | ||
3149 | { | ||
3150 | int i; | ||
3151 | |||
3152 | dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", | ||
3153 | reason); | ||
3154 | |||
3155 | for (i = 0; i < 2; i++) { | ||
3156 | host->ports[i]->mwdma_mask = 0; | ||
3157 | host->ports[i]->udma_mask = 0; | ||
3158 | } | ||
3159 | } | ||
3160 | |||
3161 | /** | ||
3162 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | ||
3163 | * @host: target ATA host | ||
3164 | * | ||
3165 | * Acquire PCI BMDMA resources and initialize @host accordingly. | ||
3166 | * | ||
3167 | * LOCKING: | ||
3168 | * Inherited from calling layer (may sleep). | ||
3169 | */ | ||
3170 | void ata_pci_bmdma_init(struct ata_host *host) | ||
3171 | { | ||
3172 | struct device *gdev = host->dev; | ||
3173 | struct pci_dev *pdev = to_pci_dev(gdev); | ||
3174 | int i, rc; | ||
3175 | |||
3176 | /* No BAR4 allocation: No DMA */ | ||
3177 | if (pci_resource_start(pdev, 4) == 0) { | ||
3178 | ata_bmdma_nodma(host, "BAR4 is zero"); | ||
3179 | return; | ||
3180 | } | ||
3181 | |||
3182 | /* | ||
3183 | * Some controllers require BMDMA region to be initialized | ||
3184 | * even if DMA is not in use to clear IRQ status via | ||
3185 | * ->sff_irq_clear method. Try to initialize bmdma_addr | ||
3186 | * regardless of dma masks. | ||
3187 | */ | ||
3188 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
3189 | if (rc) | ||
3190 | ata_bmdma_nodma(host, "failed to set dma mask"); | ||
3191 | if (!rc) { | ||
3192 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
3193 | if (rc) | ||
3194 | ata_bmdma_nodma(host, | ||
3195 | "failed to set consistent dma mask"); | ||
3196 | } | ||
3197 | |||
3198 | /* request and iomap DMA region */ | ||
3199 | rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); | ||
3200 | if (rc) { | ||
3201 | ata_bmdma_nodma(host, "failed to request/iomap BAR4"); | ||
3202 | return; | ||
3203 | } | ||
3204 | host->iomap = pcim_iomap_table(pdev); | ||
3205 | |||
3206 | for (i = 0; i < 2; i++) { | ||
3207 | struct ata_port *ap = host->ports[i]; | ||
3208 | void __iomem *bmdma = host->iomap[4] + 8 * i; | ||
3209 | |||
3210 | if (ata_port_is_dummy(ap)) | ||
3211 | continue; | ||
3212 | |||
3213 | ap->ioaddr.bmdma_addr = bmdma; | ||
3214 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | ||
3215 | (ioread8(bmdma + 2) & 0x80)) | ||
3216 | host->flags |= ATA_HOST_SIMPLEX; | ||
3217 | |||
3218 | ata_port_desc(ap, "bmdma 0x%llx", | ||
3219 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | ||
3220 | } | ||
3221 | } | ||
3222 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | ||
3223 | |||
3224 | /** | ||
3225 | * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host | ||
3226 | * @pdev: target PCI device | ||
3227 | * @ppi: array of port_info, must be enough for two ports | ||
3228 | * @r_host: out argument for the initialized ATA host | ||
3229 | * | ||
3230 | * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI | ||
3231 | * resources and initialize it accordingly in one go. | ||
3232 | * | ||
3233 | * LOCKING: | ||
3234 | * Inherited from calling layer (may sleep). | ||
3235 | * | ||
3236 | * RETURNS: | ||
3237 | * 0 on success, -errno otherwise. | ||
3238 | */ | ||
3239 | int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, | ||
3240 | const struct ata_port_info * const * ppi, | ||
3241 | struct ata_host **r_host) | ||
3242 | { | ||
3243 | int rc; | ||
3244 | |||
3245 | rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); | ||
3246 | if (rc) | ||
3247 | return rc; | ||
3248 | |||
3249 | ata_pci_bmdma_init(*r_host); | ||
3250 | return 0; | ||
3251 | } | ||
3252 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); | ||
3253 | |||
3254 | /** | ||
3255 | * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller | ||
3256 | * @pdev: Controller to be initialized | ||
3257 | * @ppi: array of port_info, must be enough for two ports | ||
3258 | * @sht: scsi_host_template to use when registering the host | ||
3259 | * @host_priv: host private_data | ||
3260 | * @hflags: host flags | ||
3261 | * | ||
3262 | * This function is similar to ata_pci_sff_init_one() but also | ||
3263 | * takes care of BMDMA initialization. | ||
3264 | * | ||
3265 | * LOCKING: | ||
3266 | * Inherited from PCI layer (may sleep). | ||
3267 | * | ||
3268 | * RETURNS: | ||
3269 | * Zero on success, negative on errno-based value on error. | ||
3270 | */ | ||
3271 | int ata_pci_bmdma_init_one(struct pci_dev *pdev, | ||
3272 | const struct ata_port_info * const * ppi, | ||
3273 | struct scsi_host_template *sht, void *host_priv, | ||
3274 | int hflags) | ||
3275 | { | ||
3276 | struct device *dev = &pdev->dev; | ||
3277 | const struct ata_port_info *pi; | ||
3278 | struct ata_host *host = NULL; | ||
3279 | int rc; | ||
3280 | |||
3281 | DPRINTK("ENTER\n"); | ||
3282 | |||
3283 | pi = ata_sff_find_valid_pi(ppi); | ||
3284 | if (!pi) { | ||
3285 | dev_printk(KERN_ERR, &pdev->dev, | ||
3286 | "no valid port_info specified\n"); | ||
3287 | return -EINVAL; | ||
3288 | } | ||
3289 | |||
3290 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | ||
3291 | return -ENOMEM; | ||
3292 | |||
3293 | rc = pcim_enable_device(pdev); | ||
3294 | if (rc) | ||
3295 | goto out; | ||
3296 | |||
3297 | /* prepare and activate BMDMA host */ | ||
3298 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); | ||
3299 | if (rc) | ||
3300 | goto out; | ||
3301 | host->private_data = host_priv; | ||
3302 | host->flags |= hflags; | ||
3303 | |||
3304 | pci_set_master(pdev); | ||
3305 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); | ||
3306 | out: | ||
3307 | if (rc == 0) | ||
3308 | devres_remove_group(&pdev->dev, NULL); | ||
3309 | else | ||
3310 | devres_release_group(&pdev->dev, NULL); | ||
3311 | |||
3312 | return rc; | ||
3313 | } | ||
3314 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); | ||
3315 | |||
3316 | #endif /* CONFIG_PCI */ | ||
3317 | #endif /* CONFIG_ATA_BMDMA */ | ||
3318 | |||
3319 | /** | ||
3320 | * ata_sff_port_init - Initialize SFF/BMDMA ATA port | ||
3321 | * @ap: Port to initialize | ||
3322 | * | ||
3323 | * Called on port allocation to initialize SFF/BMDMA specific | ||
3324 | * fields. | ||
3325 | * | ||
3326 | * LOCKING: | ||
3327 | * None. | ||
3328 | */ | ||
3329 | void ata_sff_port_init(struct ata_port *ap) | ||
3330 | { | ||
3331 | INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); | ||
3332 | ap->ctl = ATA_DEVCTL_OBS; | ||
3333 | ap->last_ctl = 0xFF; | ||
3334 | } | ||
3335 | |||
3336 | int __init ata_sff_init(void) | ||
3337 | { | ||
3338 | ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); | ||
3339 | if (!ata_sff_wq) | ||
3340 | return -ENOMEM; | ||
3341 | |||
3342 | return 0; | ||
3343 | } | ||
3344 | |||
3345 | void __exit ata_sff_exit(void) | ||
3346 | { | ||
3347 | destroy_workqueue(ata_sff_wq); | ||
3348 | } | ||