aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ata/sata_sil.c96
1 files changed, 94 insertions, 2 deletions
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 177370921774..564c142b03b0 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -46,7 +46,9 @@
46#include <linux/libata.h> 46#include <linux/libata.h>
47 47
48#define DRV_NAME "sata_sil" 48#define DRV_NAME "sata_sil"
49#define DRV_VERSION "2.3" 49#define DRV_VERSION "2.4"
50
51#define SIL_DMA_BOUNDARY 0x7fffffffUL
50 52
51enum { 53enum {
52 SIL_MMIO_BAR = 5, 54 SIL_MMIO_BAR = 5,
@@ -118,6 +120,10 @@ static void sil_dev_config(struct ata_device *dev);
118static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 120static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
119static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 121static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 122static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
123static void sil_qc_prep(struct ata_queued_cmd *qc);
124static void sil_bmdma_setup(struct ata_queued_cmd *qc);
125static void sil_bmdma_start(struct ata_queued_cmd *qc);
126static void sil_bmdma_stop(struct ata_queued_cmd *qc);
121static void sil_freeze(struct ata_port *ap); 127static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap); 128static void sil_thaw(struct ata_port *ap);
123 129
@@ -167,13 +173,22 @@ static struct pci_driver sil_pci_driver = {
167}; 173};
168 174
169static struct scsi_host_template sil_sht = { 175static struct scsi_host_template sil_sht = {
170 ATA_BMDMA_SHT(DRV_NAME), 176 ATA_BASE_SHT(DRV_NAME),
177 /** These controllers support Large Block Transfer which allows
178 transfer chunks up to 2GB and which cross 64KB boundaries,
179 therefore the DMA limits are more relaxed than standard ATA SFF. */
180 .dma_boundary = SIL_DMA_BOUNDARY,
181 .sg_tablesize = ATA_MAX_PRD
171}; 182};
172 183
173static struct ata_port_operations sil_ops = { 184static struct ata_port_operations sil_ops = {
174 .inherits = &ata_bmdma_port_ops, 185 .inherits = &ata_bmdma_port_ops,
175 .dev_config = sil_dev_config, 186 .dev_config = sil_dev_config,
176 .set_mode = sil_set_mode, 187 .set_mode = sil_set_mode,
188 .bmdma_setup = sil_bmdma_setup,
189 .bmdma_start = sil_bmdma_start,
190 .bmdma_stop = sil_bmdma_stop,
191 .qc_prep = sil_qc_prep,
177 .freeze = sil_freeze, 192 .freeze = sil_freeze,
178 .thaw = sil_thaw, 193 .thaw = sil_thaw,
179 .scr_read = sil_scr_read, 194 .scr_read = sil_scr_read,
@@ -249,6 +264,83 @@ module_param(slow_down, int, 0444);
249MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 264MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
250 265
251 266
267static void sil_bmdma_stop(struct ata_queued_cmd *qc)
268{
269 struct ata_port *ap = qc->ap;
270 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
271 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
272
273 /* clear start/stop bit - can safely always write 0 */
274 iowrite8(0, bmdma2);
275
276 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
277 ata_sff_dma_pause(ap);
278}
279
280static void sil_bmdma_setup(struct ata_queued_cmd *qc)
281{
282 struct ata_port *ap = qc->ap;
283 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
284
285 /* load PRD table addr. */
286 iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
287
288 /* issue r/w command */
289 ap->ops->sff_exec_command(ap, &qc->tf);
290}
291
292static void sil_bmdma_start(struct ata_queued_cmd *qc)
293{
294 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
295 struct ata_port *ap = qc->ap;
296 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
297 void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
298 u8 dmactl = ATA_DMA_START;
299
300 /* set transfer direction, start host DMA transaction
301 Note: For Large Block Transfer to work, the DMA must be started
302 using the bmdma2 register. */
303 if (!rw)
304 dmactl |= ATA_DMA_WR;
305 iowrite8(dmactl, bmdma2);
306}
307
308/* The way God intended PCI IDE scatter/gather lists to look and behave... */
309static void sil_fill_sg(struct ata_queued_cmd *qc)
310{
311 struct scatterlist *sg;
312 struct ata_port *ap = qc->ap;
313 struct ata_prd *prd, *last_prd = NULL;
314 unsigned int si;
315
316 prd = &ap->prd[0];
317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
318 /* Note h/w doesn't support 64-bit, so we unconditionally
319 * truncate dma_addr_t to u32.
320 */
321 u32 addr = (u32) sg_dma_address(sg);
322 u32 sg_len = sg_dma_len(sg);
323
324 prd->addr = cpu_to_le32(addr);
325 prd->flags_len = cpu_to_le32(sg_len);
326 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, sg_len);
327
328 last_prd = prd;
329 prd++;
330 }
331
332 if (likely(last_prd))
333 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
334}
335
336static void sil_qc_prep(struct ata_queued_cmd *qc)
337{
338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
339 return;
340
341 sil_fill_sg(qc);
342}
343
252static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 344static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
253{ 345{
254 u8 cache_line = 0; 346 u8 cache_line = 0;