aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ata/libata-sff.c290
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c2
-rw-r--r--drivers/ata/pata_sc1200.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_nv.c4
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--include/linux/libata.h4
9 files changed, 156 insertions, 158 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index aa378c04ed87..a58693bdde9d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -45,7 +45,7 @@ static struct workqueue_struct *ata_sff_wq;
45const struct ata_port_operations ata_sff_port_ops = { 45const struct ata_port_operations ata_sff_port_ops = {
46 .inherits = &ata_base_port_ops, 46 .inherits = &ata_base_port_ops,
47 47
48 .qc_prep = ata_sff_qc_prep, 48 .qc_prep = ata_noop_qc_prep,
49 .qc_issue = ata_sff_qc_issue, 49 .qc_issue = ata_sff_qc_issue,
50 .qc_fill_rtf = ata_sff_qc_fill_rtf, 50 .qc_fill_rtf = ata_sff_qc_fill_rtf,
51 51
@@ -71,149 +71,6 @@ const struct ata_port_operations ata_sff_port_ops = {
71EXPORT_SYMBOL_GPL(ata_sff_port_ops); 71EXPORT_SYMBOL_GPL(ata_sff_port_ops);
72 72
73/** 73/**
74 * ata_fill_sg - Fill PCI IDE PRD table
75 * @qc: Metadata associated with taskfile to be transferred
76 *
77 * Fill PCI IDE PRD (scatter-gather) table with segments
78 * associated with the current disk command.
79 *
80 * LOCKING:
81 * spin_lock_irqsave(host lock)
82 *
83 */
84static void ata_fill_sg(struct ata_queued_cmd *qc)
85{
86 struct ata_port *ap = qc->ap;
87 struct scatterlist *sg;
88 unsigned int si, pi;
89
90 pi = 0;
91 for_each_sg(qc->sg, sg, qc->n_elem, si) {
92 u32 addr, offset;
93 u32 sg_len, len;
94
95 /* determine if physical DMA addr spans 64K boundary.
96 * Note h/w doesn't support 64-bit, so we unconditionally
97 * truncate dma_addr_t to u32.
98 */
99 addr = (u32) sg_dma_address(sg);
100 sg_len = sg_dma_len(sg);
101
102 while (sg_len) {
103 offset = addr & 0xffff;
104 len = sg_len;
105 if ((offset + sg_len) > 0x10000)
106 len = 0x10000 - offset;
107
108 ap->prd[pi].addr = cpu_to_le32(addr);
109 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
110 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
111
112 pi++;
113 sg_len -= len;
114 addr += len;
115 }
116 }
117
118 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
119}
120
121/**
122 * ata_fill_sg_dumb - Fill PCI IDE PRD table
123 * @qc: Metadata associated with taskfile to be transferred
124 *
125 * Fill PCI IDE PRD (scatter-gather) table with segments
126 * associated with the current disk command. Perform the fill
127 * so that we avoid writing any length 64K records for
128 * controllers that don't follow the spec.
129 *
130 * LOCKING:
131 * spin_lock_irqsave(host lock)
132 *
133 */
134static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
135{
136 struct ata_port *ap = qc->ap;
137 struct scatterlist *sg;
138 unsigned int si, pi;
139
140 pi = 0;
141 for_each_sg(qc->sg, sg, qc->n_elem, si) {
142 u32 addr, offset;
143 u32 sg_len, len, blen;
144
145 /* determine if physical DMA addr spans 64K boundary.
146 * Note h/w doesn't support 64-bit, so we unconditionally
147 * truncate dma_addr_t to u32.
148 */
149 addr = (u32) sg_dma_address(sg);
150 sg_len = sg_dma_len(sg);
151
152 while (sg_len) {
153 offset = addr & 0xffff;
154 len = sg_len;
155 if ((offset + sg_len) > 0x10000)
156 len = 0x10000 - offset;
157
158 blen = len & 0xffff;
159 ap->prd[pi].addr = cpu_to_le32(addr);
160 if (blen == 0) {
161 /* Some PATA chipsets like the CS5530 can't
162 cope with 0x0000 meaning 64K as the spec
163 says */
164 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
165 blen = 0x8000;
166 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
167 }
168 ap->prd[pi].flags_len = cpu_to_le32(blen);
169 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
170
171 pi++;
172 sg_len -= len;
173 addr += len;
174 }
175 }
176
177 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
178}
179
180/**
181 * ata_sff_qc_prep - Prepare taskfile for submission
182 * @qc: Metadata associated with taskfile to be prepared
183 *
184 * Prepare ATA taskfile for submission.
185 *
186 * LOCKING:
187 * spin_lock_irqsave(host lock)
188 */
189void ata_sff_qc_prep(struct ata_queued_cmd *qc)
190{
191 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
192 return;
193
194 ata_fill_sg(qc);
195}
196EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
197
198/**
199 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
200 * @qc: Metadata associated with taskfile to be prepared
201 *
202 * Prepare ATA taskfile for submission.
203 *
204 * LOCKING:
205 * spin_lock_irqsave(host lock)
206 */
207void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
208{
209 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
210 return;
211
212 ata_fill_sg_dumb(qc);
213}
214EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
215
216/**
217 * ata_sff_check_status - Read device status reg & clear interrupt 74 * ata_sff_check_status - Read device status reg & clear interrupt
218 * @ap: port where the device is 75 * @ap: port where the device is
219 * 76 *
@@ -2760,6 +2617,8 @@ const struct ata_port_operations ata_bmdma_port_ops = {
2760 .error_handler = ata_bmdma_error_handler, 2617 .error_handler = ata_bmdma_error_handler,
2761 .post_internal_cmd = ata_bmdma_post_internal_cmd, 2618 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2762 2619
2620 .qc_prep = ata_bmdma_qc_prep,
2621
2763 .bmdma_setup = ata_bmdma_setup, 2622 .bmdma_setup = ata_bmdma_setup,
2764 .bmdma_start = ata_bmdma_start, 2623 .bmdma_start = ata_bmdma_start,
2765 .bmdma_stop = ata_bmdma_stop, 2624 .bmdma_stop = ata_bmdma_stop,
@@ -2778,6 +2637,149 @@ const struct ata_port_operations ata_bmdma32_port_ops = {
2778EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 2637EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2779 2638
2780/** 2639/**
2640 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2641 * @qc: Metadata associated with taskfile to be transferred
2642 *
2643 * Fill PCI IDE PRD (scatter-gather) table with segments
2644 * associated with the current disk command.
2645 *
2646 * LOCKING:
2647 * spin_lock_irqsave(host lock)
2648 *
2649 */
2650static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2651{
2652 struct ata_port *ap = qc->ap;
2653 struct scatterlist *sg;
2654 unsigned int si, pi;
2655
2656 pi = 0;
2657 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2658 u32 addr, offset;
2659 u32 sg_len, len;
2660
2661 /* determine if physical DMA addr spans 64K boundary.
2662 * Note h/w doesn't support 64-bit, so we unconditionally
2663 * truncate dma_addr_t to u32.
2664 */
2665 addr = (u32) sg_dma_address(sg);
2666 sg_len = sg_dma_len(sg);
2667
2668 while (sg_len) {
2669 offset = addr & 0xffff;
2670 len = sg_len;
2671 if ((offset + sg_len) > 0x10000)
2672 len = 0x10000 - offset;
2673
2674 ap->prd[pi].addr = cpu_to_le32(addr);
2675 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2676 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2677
2678 pi++;
2679 sg_len -= len;
2680 addr += len;
2681 }
2682 }
2683
2684 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2685}
2686
2687/**
2688 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2689 * @qc: Metadata associated with taskfile to be transferred
2690 *
2691 * Fill PCI IDE PRD (scatter-gather) table with segments
2692 * associated with the current disk command. Perform the fill
2693 * so that we avoid writing any length 64K records for
2694 * controllers that don't follow the spec.
2695 *
2696 * LOCKING:
2697 * spin_lock_irqsave(host lock)
2698 *
2699 */
2700static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2701{
2702 struct ata_port *ap = qc->ap;
2703 struct scatterlist *sg;
2704 unsigned int si, pi;
2705
2706 pi = 0;
2707 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2708 u32 addr, offset;
2709 u32 sg_len, len, blen;
2710
2711 /* determine if physical DMA addr spans 64K boundary.
2712 * Note h/w doesn't support 64-bit, so we unconditionally
2713 * truncate dma_addr_t to u32.
2714 */
2715 addr = (u32) sg_dma_address(sg);
2716 sg_len = sg_dma_len(sg);
2717
2718 while (sg_len) {
2719 offset = addr & 0xffff;
2720 len = sg_len;
2721 if ((offset + sg_len) > 0x10000)
2722 len = 0x10000 - offset;
2723
2724 blen = len & 0xffff;
2725 ap->prd[pi].addr = cpu_to_le32(addr);
2726 if (blen == 0) {
2727 /* Some PATA chipsets like the CS5530 can't
2728 cope with 0x0000 meaning 64K as the spec
2729 says */
2730 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
2731 blen = 0x8000;
2732 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2733 }
2734 ap->prd[pi].flags_len = cpu_to_le32(blen);
2735 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2736
2737 pi++;
2738 sg_len -= len;
2739 addr += len;
2740 }
2741 }
2742
2743 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2744}
2745
2746/**
2747 * ata_bmdma_qc_prep - Prepare taskfile for submission
2748 * @qc: Metadata associated with taskfile to be prepared
2749 *
2750 * Prepare ATA taskfile for submission.
2751 *
2752 * LOCKING:
2753 * spin_lock_irqsave(host lock)
2754 */
2755void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2756{
2757 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2758 return;
2759
2760 ata_bmdma_fill_sg(qc);
2761}
2762EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2763
2764/**
2765 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2766 * @qc: Metadata associated with taskfile to be prepared
2767 *
2768 * Prepare ATA taskfile for submission.
2769 *
2770 * LOCKING:
2771 * spin_lock_irqsave(host lock)
2772 */
2773void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2774{
2775 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2776 return;
2777
2778 ata_bmdma_fill_sg_dumb(qc);
2779}
2780EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2781
2782/**
2781 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 2783 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2782 * @ap: port to handle error for 2784 * @ap: port to handle error for
2783 * 2785 *
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index cbaf2eddac6b..44d88b380ddd 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
217static struct ata_port_operations atiixp_port_ops = { 217static struct ata_port_operations atiixp_port_ops = {
218 .inherits = &ata_bmdma_port_ops, 218 .inherits = &ata_bmdma_port_ops,
219 219
220 .qc_prep = ata_sff_dumb_qc_prep, 220 .qc_prep = ata_bmdma_dumb_qc_prep,
221 .bmdma_start = atiixp_bmdma_start, 221 .bmdma_start = atiixp_bmdma_start,
222 .bmdma_stop = atiixp_bmdma_stop, 222 .bmdma_stop = atiixp_bmdma_stop,
223 223
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 95ebdac517f2..17c5f346ff01 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
110 110
111static struct ata_port_operations cs5520_port_ops = { 111static struct ata_port_operations cs5520_port_ops = {
112 .inherits = &ata_bmdma_port_ops, 112 .inherits = &ata_bmdma_port_ops,
113 .qc_prep = ata_sff_dumb_qc_prep, 113 .qc_prep = ata_bmdma_dumb_qc_prep,
114 .cable_detect = ata_cable_40wire, 114 .cable_detect = ata_cable_40wire,
115 .set_piomode = cs5520_set_piomode, 115 .set_piomode = cs5520_set_piomode,
116}; 116};
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 738ad2e14a97..4b9a66f18de6 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
167static struct ata_port_operations cs5530_port_ops = { 167static struct ata_port_operations cs5530_port_ops = {
168 .inherits = &ata_bmdma_port_ops, 168 .inherits = &ata_bmdma_port_ops,
169 169
170 .qc_prep = ata_sff_dumb_qc_prep, 170 .qc_prep = ata_bmdma_dumb_qc_prep,
171 .qc_issue = cs5530_qc_issue, 171 .qc_issue = cs5530_qc_issue,
172 172
173 .cable_detect = ata_cable_40wire, 173 .cable_detect = ata_cable_40wire,
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index dfecc6f964b0..599e648a722f 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
209 209
210static struct ata_port_operations sc1200_port_ops = { 210static struct ata_port_operations sc1200_port_ops = {
211 .inherits = &ata_bmdma_port_ops, 211 .inherits = &ata_bmdma_port_ops,
212 .qc_prep = ata_sff_dumb_qc_prep, 212 .qc_prep = ata_bmdma_dumb_qc_prep,
213 .qc_issue = sc1200_qc_issue, 213 .qc_issue = sc1200_qc_issue,
214 .qc_defer = sc1200_qc_defer, 214 .qc_defer = sc1200_qc_defer,
215 .cable_detect = ata_cable_40wire, 215 .cable_detect = ata_cable_40wire,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index bb4f838655b6..adbe0426c8f0 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
324 VPRINTK("ENTER\n"); 324 VPRINTK("ENTER\n");
325 325
326 adma_enter_reg_mode(qc->ap); 326 adma_enter_reg_mode(qc->ap);
327 if (qc->tf.protocol != ATA_PROT_DMA) { 327 if (qc->tf.protocol != ATA_PROT_DMA)
328 ata_sff_qc_prep(qc);
329 return; 328 return;
330 }
331 329
332 buf[i++] = 0; /* Response flags */ 330 buf[i++] = 0; /* Response flags */
333 buf[i++] = 0; /* reserved */ 331 buf[i++] = 0; /* reserved */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 64e99824d8c1..7a283d5d68f3 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1409,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1409 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && 1409 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1410 (qc->flags & ATA_QCFLAG_DMAMAP)); 1410 (qc->flags & ATA_QCFLAG_DMAMAP));
1411 nv_adma_register_mode(qc->ap); 1411 nv_adma_register_mode(qc->ap);
1412 ata_sff_qc_prep(qc); 1412 ata_bmdma_qc_prep(qc);
1413 return; 1413 return;
1414 } 1414 }
1415 1415
@@ -2012,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
2012static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) 2012static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2013{ 2013{
2014 if (qc->tf.protocol != ATA_PROT_NCQ) { 2014 if (qc->tf.protocol != ATA_PROT_NCQ) {
2015 ata_sff_qc_prep(qc); 2015 ata_bmdma_qc_prep(qc);
2016 return; 2016 return;
2017 } 2017 }
2018 2018
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index d3a22f2ae7b6..d533b3d20ca1 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -303,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
303 VPRINTK("ENTER\n"); 303 VPRINTK("ENTER\n");
304 304
305 qs_enter_reg_mode(qc->ap); 305 qs_enter_reg_mode(qc->ap);
306 if (qc->tf.protocol != ATA_PROT_DMA) { 306 if (qc->tf.protocol != ATA_PROT_DMA)
307 ata_sff_qc_prep(qc);
308 return; 307 return;
309 }
310 308
311 nelem = qs_fill_sg(qc); 309 nelem = qs_fill_sg(qc);
312 310
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1d3859016aec..3675fd29b2e5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1570,8 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops;
1570 .sg_tablesize = LIBATA_MAX_PRD, \ 1570 .sg_tablesize = LIBATA_MAX_PRD, \
1571 .dma_boundary = ATA_DMA_BOUNDARY 1571 .dma_boundary = ATA_DMA_BOUNDARY
1572 1572
1573extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
1574extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
1575extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); 1573extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
1576extern u8 ata_sff_check_status(struct ata_port *ap); 1574extern u8 ata_sff_check_status(struct ata_port *ap);
1577extern void ata_sff_pause(struct ata_port *ap); 1575extern void ata_sff_pause(struct ata_port *ap);
@@ -1628,6 +1626,8 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
1628 struct scsi_host_template *sht, void *host_priv, int hflags); 1626 struct scsi_host_template *sht, void *host_priv, int hflags);
1629#endif /* CONFIG_PCI */ 1627#endif /* CONFIG_PCI */
1630 1628
1629extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
1630extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
1631extern void ata_bmdma_error_handler(struct ata_port *ap); 1631extern void ata_bmdma_error_handler(struct ata_port *ap);
1632extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 1632extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
1633extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 1633extern void ata_bmdma_setup(struct ata_queued_cmd *qc);