diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/ata_piix.c | 6 | ||||
-rw-r--r-- | drivers/scsi/libata-bmdma.c | 238 | ||||
-rw-r--r-- | drivers/scsi/libata-core.c | 469 | ||||
-rw-r--r-- | drivers/scsi/libata-scsi.c | 79 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 181 | ||||
-rw-r--r-- | drivers/scsi/sata_sil.c | 2 | ||||
-rw-r--r-- | drivers/scsi/sata_sil24.c | 25 | ||||
-rw-r--r-- | drivers/scsi/sata_uli.c | 37 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_sysfs.c | 2 |
10 files changed, 540 insertions, 503 deletions
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index a74e23d39ba9..2d5be84d8bd4 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c | |||
@@ -742,7 +742,7 @@ static int piix_disable_ahci(struct pci_dev *pdev) | |||
742 | /** | 742 | /** |
743 | * piix_check_450nx_errata - Check for problem 450NX setup | 743 | * piix_check_450nx_errata - Check for problem 450NX setup |
744 | * @ata_dev: the PCI device to check | 744 | * @ata_dev: the PCI device to check |
745 | * | 745 | * |
746 | * Check for the present of 450NX errata #19 and errata #25. If | 746 | * Check for the present of 450NX errata #19 and errata #25. If |
747 | * they are found return an error code so we can turn off DMA | 747 | * they are found return an error code so we can turn off DMA |
748 | */ | 748 | */ |
@@ -753,7 +753,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) | |||
753 | u16 cfg; | 753 | u16 cfg; |
754 | u8 rev; | 754 | u8 rev; |
755 | int no_piix_dma = 0; | 755 | int no_piix_dma = 0; |
756 | 756 | ||
757 | while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) | 757 | while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) |
758 | { | 758 | { |
759 | /* Look for 450NX PXB. Check for problem configurations | 759 | /* Look for 450NX PXB. Check for problem configurations |
@@ -772,7 +772,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) | |||
772 | if(no_piix_dma == 2) | 772 | if(no_piix_dma == 2) |
773 | dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); | 773 | dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); |
774 | return no_piix_dma; | 774 | return no_piix_dma; |
775 | } | 775 | } |
776 | 776 | ||
777 | static void __devinit piix_init_sata_map(struct pci_dev *pdev, | 777 | static void __devinit piix_init_sata_map(struct pci_dev *pdev, |
778 | struct ata_port_info *pinfo) | 778 | struct ata_port_info *pinfo) |
diff --git a/drivers/scsi/libata-bmdma.c b/drivers/scsi/libata-bmdma.c index 96b4d2160df8..95d81d86d8b7 100644 --- a/drivers/scsi/libata-bmdma.c +++ b/drivers/scsi/libata-bmdma.c | |||
@@ -418,6 +418,240 @@ u8 ata_altstatus(struct ata_port *ap) | |||
418 | return inb(ap->ioaddr.altstatus_addr); | 418 | return inb(ap->ioaddr.altstatus_addr); |
419 | } | 419 | } |
420 | 420 | ||
421 | /** | ||
422 | * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction | ||
423 | * @qc: Info associated with this ATA transaction. | ||
424 | * | ||
425 | * LOCKING: | ||
426 | * spin_lock_irqsave(host_set lock) | ||
427 | */ | ||
428 | |||
429 | static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) | ||
430 | { | ||
431 | struct ata_port *ap = qc->ap; | ||
432 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
433 | u8 dmactl; | ||
434 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
435 | |||
436 | /* load PRD table addr. */ | ||
437 | mb(); /* make sure PRD table writes are visible to controller */ | ||
438 | writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); | ||
439 | |||
440 | /* specify data direction, triple-check start bit is clear */ | ||
441 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
442 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
443 | if (!rw) | ||
444 | dmactl |= ATA_DMA_WR; | ||
445 | writeb(dmactl, mmio + ATA_DMA_CMD); | ||
446 | |||
447 | /* issue r/w command */ | ||
448 | ap->ops->exec_command(ap, &qc->tf); | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction | ||
453 | * @qc: Info associated with this ATA transaction. | ||
454 | * | ||
455 | * LOCKING: | ||
456 | * spin_lock_irqsave(host_set lock) | ||
457 | */ | ||
458 | |||
459 | static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) | ||
460 | { | ||
461 | struct ata_port *ap = qc->ap; | ||
462 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
463 | u8 dmactl; | ||
464 | |||
465 | /* start host DMA transaction */ | ||
466 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
467 | writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); | ||
468 | |||
469 | /* Strictly, one may wish to issue a readb() here, to | ||
470 | * flush the mmio write. However, control also passes | ||
471 | * to the hardware at this point, and it will interrupt | ||
472 | * us when we are to resume control. So, in effect, | ||
473 | * we don't care when the mmio write flushes. | ||
474 | * Further, a read of the DMA status register _immediately_ | ||
475 | * following the write may not be what certain flaky hardware | ||
476 | * is expected, so I think it is best to not add a readb() | ||
477 | * without first all the MMIO ATA cards/mobos. | ||
478 | * Or maybe I'm just being paranoid. | ||
479 | */ | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) | ||
484 | * @qc: Info associated with this ATA transaction. | ||
485 | * | ||
486 | * LOCKING: | ||
487 | * spin_lock_irqsave(host_set lock) | ||
488 | */ | ||
489 | |||
490 | static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) | ||
491 | { | ||
492 | struct ata_port *ap = qc->ap; | ||
493 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
494 | u8 dmactl; | ||
495 | |||
496 | /* load PRD table addr. */ | ||
497 | outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
498 | |||
499 | /* specify data direction, triple-check start bit is clear */ | ||
500 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
501 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
502 | if (!rw) | ||
503 | dmactl |= ATA_DMA_WR; | ||
504 | outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
505 | |||
506 | /* issue r/w command */ | ||
507 | ap->ops->exec_command(ap, &qc->tf); | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) | ||
512 | * @qc: Info associated with this ATA transaction. | ||
513 | * | ||
514 | * LOCKING: | ||
515 | * spin_lock_irqsave(host_set lock) | ||
516 | */ | ||
517 | |||
518 | static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) | ||
519 | { | ||
520 | struct ata_port *ap = qc->ap; | ||
521 | u8 dmactl; | ||
522 | |||
523 | /* start host DMA transaction */ | ||
524 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
525 | outb(dmactl | ATA_DMA_START, | ||
526 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
527 | } | ||
528 | |||
529 | |||
530 | /** | ||
531 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
532 | * @qc: Info associated with this ATA transaction. | ||
533 | * | ||
534 | * Writes the ATA_DMA_START flag to the DMA command register. | ||
535 | * | ||
536 | * May be used as the bmdma_start() entry in ata_port_operations. | ||
537 | * | ||
538 | * LOCKING: | ||
539 | * spin_lock_irqsave(host_set lock) | ||
540 | */ | ||
541 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
542 | { | ||
543 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
544 | ata_bmdma_start_mmio(qc); | ||
545 | else | ||
546 | ata_bmdma_start_pio(qc); | ||
547 | } | ||
548 | |||
549 | |||
550 | /** | ||
551 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
552 | * @qc: Info associated with this ATA transaction. | ||
553 | * | ||
554 | * Writes address of PRD table to device's PRD Table Address | ||
555 | * register, sets the DMA control register, and calls | ||
556 | * ops->exec_command() to start the transfer. | ||
557 | * | ||
558 | * May be used as the bmdma_setup() entry in ata_port_operations. | ||
559 | * | ||
560 | * LOCKING: | ||
561 | * spin_lock_irqsave(host_set lock) | ||
562 | */ | ||
563 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
564 | { | ||
565 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
566 | ata_bmdma_setup_mmio(qc); | ||
567 | else | ||
568 | ata_bmdma_setup_pio(qc); | ||
569 | } | ||
570 | |||
571 | |||
572 | /** | ||
573 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
574 | * @ap: Port associated with this ATA transaction. | ||
575 | * | ||
576 | * Clear interrupt and error flags in DMA status register. | ||
577 | * | ||
578 | * May be used as the irq_clear() entry in ata_port_operations. | ||
579 | * | ||
580 | * LOCKING: | ||
581 | * spin_lock_irqsave(host_set lock) | ||
582 | */ | ||
583 | |||
584 | void ata_bmdma_irq_clear(struct ata_port *ap) | ||
585 | { | ||
586 | if (!ap->ioaddr.bmdma_addr) | ||
587 | return; | ||
588 | |||
589 | if (ap->flags & ATA_FLAG_MMIO) { | ||
590 | void __iomem *mmio = | ||
591 | ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; | ||
592 | writeb(readb(mmio), mmio); | ||
593 | } else { | ||
594 | unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; | ||
595 | outb(inb(addr), addr); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | |||
600 | /** | ||
601 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
602 | * @ap: Port associated with this ATA transaction. | ||
603 | * | ||
604 | * Read and return BMDMA status register. | ||
605 | * | ||
606 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
607 | * | ||
608 | * LOCKING: | ||
609 | * spin_lock_irqsave(host_set lock) | ||
610 | */ | ||
611 | |||
612 | u8 ata_bmdma_status(struct ata_port *ap) | ||
613 | { | ||
614 | u8 host_stat; | ||
615 | if (ap->flags & ATA_FLAG_MMIO) { | ||
616 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
617 | host_stat = readb(mmio + ATA_DMA_STATUS); | ||
618 | } else | ||
619 | host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
620 | return host_stat; | ||
621 | } | ||
622 | |||
623 | |||
624 | /** | ||
625 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
626 | * @qc: Command we are ending DMA for | ||
627 | * | ||
628 | * Clears the ATA_DMA_START flag in the dma control register | ||
629 | * | ||
630 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
631 | * | ||
632 | * LOCKING: | ||
633 | * spin_lock_irqsave(host_set lock) | ||
634 | */ | ||
635 | |||
636 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
637 | { | ||
638 | struct ata_port *ap = qc->ap; | ||
639 | if (ap->flags & ATA_FLAG_MMIO) { | ||
640 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
641 | |||
642 | /* clear start/stop bit */ | ||
643 | writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
644 | mmio + ATA_DMA_CMD); | ||
645 | } else { | ||
646 | /* clear start/stop bit */ | ||
647 | outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
648 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
649 | } | ||
650 | |||
651 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
652 | ata_altstatus(ap); /* dummy read */ | ||
653 | } | ||
654 | |||
421 | #ifdef CONFIG_PCI | 655 | #ifdef CONFIG_PCI |
422 | static struct ata_probe_ent * | 656 | static struct ata_probe_ent * |
423 | ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) | 657 | ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) |
@@ -707,7 +941,7 @@ err_out: | |||
707 | * @pdev: PCI device | 941 | * @pdev: PCI device |
708 | * | 942 | * |
709 | * Some PCI ATA devices report simplex mode but in fact can be told to | 943 | * Some PCI ATA devices report simplex mode but in fact can be told to |
710 | * enter non simplex mode. This implements the neccessary logic to | 944 | * enter non simplex mode. This implements the neccessary logic to |
711 | * perform the task on such devices. Calling it on other devices will | 945 | * perform the task on such devices. Calling it on other devices will |
712 | * have -undefined- behaviour. | 946 | * have -undefined- behaviour. |
713 | */ | 947 | */ |
@@ -732,7 +966,7 @@ unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_devic | |||
732 | { | 966 | { |
733 | /* Filter out DMA modes if the device has been configured by | 967 | /* Filter out DMA modes if the device has been configured by |
734 | the BIOS as PIO only */ | 968 | the BIOS as PIO only */ |
735 | 969 | ||
736 | if (ap->ioaddr.bmdma_addr == 0) | 970 | if (ap->ioaddr.bmdma_addr == 0) |
737 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 971 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
738 | return xfer_mask; | 972 | return xfer_mask; |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 0314abd97f2d..d279666dcb38 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -64,9 +64,9 @@ | |||
64 | static unsigned int ata_dev_init_params(struct ata_port *ap, | 64 | static unsigned int ata_dev_init_params(struct ata_port *ap, |
65 | struct ata_device *dev); | 65 | struct ata_device *dev); |
66 | static void ata_set_mode(struct ata_port *ap); | 66 | static void ata_set_mode(struct ata_port *ap); |
67 | static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); | 67 | static unsigned int ata_dev_set_xfermode(struct ata_port *ap, |
68 | static unsigned int ata_dev_xfermask(struct ata_port *ap, | 68 | struct ata_device *dev); |
69 | struct ata_device *dev); | 69 | static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); |
70 | 70 | ||
71 | static unsigned int ata_unique_id = 1; | 71 | static unsigned int ata_unique_id = 1; |
72 | static struct workqueue_struct *ata_wq; | 72 | static struct workqueue_struct *ata_wq; |
@@ -190,7 +190,7 @@ static const u8 ata_rw_cmds[] = { | |||
190 | * ata_rwcmd_protocol - set taskfile r/w commands and protocol | 190 | * ata_rwcmd_protocol - set taskfile r/w commands and protocol |
191 | * @qc: command to examine and configure | 191 | * @qc: command to examine and configure |
192 | * | 192 | * |
193 | * Examine the device configuration and tf->flags to calculate | 193 | * Examine the device configuration and tf->flags to calculate |
194 | * the proper read/write commands and protocol to use. | 194 | * the proper read/write commands and protocol to use. |
195 | * | 195 | * |
196 | * LOCKING: | 196 | * LOCKING: |
@@ -203,7 +203,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) | |||
203 | u8 cmd; | 203 | u8 cmd; |
204 | 204 | ||
205 | int index, fua, lba48, write; | 205 | int index, fua, lba48, write; |
206 | 206 | ||
207 | fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; | 207 | fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; |
208 | lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; | 208 | lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; |
209 | write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; | 209 | write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; |
@@ -252,6 +252,29 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask, | |||
252 | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); | 252 | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); |
253 | } | 253 | } |
254 | 254 | ||
255 | /** | ||
256 | * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks | ||
257 | * @xfer_mask: xfer_mask to unpack | ||
258 | * @pio_mask: resulting pio_mask | ||
259 | * @mwdma_mask: resulting mwdma_mask | ||
260 | * @udma_mask: resulting udma_mask | ||
261 | * | ||
262 | * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. | ||
263 | * Any NULL distination masks will be ignored. | ||
264 | */ | ||
265 | static void ata_unpack_xfermask(unsigned int xfer_mask, | ||
266 | unsigned int *pio_mask, | ||
267 | unsigned int *mwdma_mask, | ||
268 | unsigned int *udma_mask) | ||
269 | { | ||
270 | if (pio_mask) | ||
271 | *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; | ||
272 | if (mwdma_mask) | ||
273 | *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; | ||
274 | if (udma_mask) | ||
275 | *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; | ||
276 | } | ||
277 | |||
255 | static const struct ata_xfer_ent { | 278 | static const struct ata_xfer_ent { |
256 | unsigned int shift, bits; | 279 | unsigned int shift, bits; |
257 | u8 base; | 280 | u8 base; |
@@ -372,6 +395,15 @@ static const char *ata_mode_string(unsigned int xfer_mask) | |||
372 | return "<n/a>"; | 395 | return "<n/a>"; |
373 | } | 396 | } |
374 | 397 | ||
398 | static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) | ||
399 | { | ||
400 | if (ata_dev_present(dev)) { | ||
401 | printk(KERN_WARNING "ata%u: dev %u disabled\n", | ||
402 | ap->id, dev->devno); | ||
403 | dev->class++; | ||
404 | } | ||
405 | } | ||
406 | |||
375 | /** | 407 | /** |
376 | * ata_pio_devchk - PATA device presence detection | 408 | * ata_pio_devchk - PATA device presence detection |
377 | * @ap: ATA channel to examine | 409 | * @ap: ATA channel to examine |
@@ -987,6 +1019,22 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
987 | 1019 | ||
988 | ata_qc_free(qc); | 1020 | ata_qc_free(qc); |
989 | 1021 | ||
1022 | /* XXX - Some LLDDs (sata_mv) disable port on command failure. | ||
1023 | * Until those drivers are fixed, we detect the condition | ||
1024 | * here, fail the command with AC_ERR_SYSTEM and reenable the | ||
1025 | * port. | ||
1026 | * | ||
1027 | * Note that this doesn't change any behavior as internal | ||
1028 | * command failure results in disabling the device in the | ||
1029 | * higher layer for LLDDs without new reset/EH callbacks. | ||
1030 | * | ||
1031 | * Kill the following code as soon as those drivers are fixed. | ||
1032 | */ | ||
1033 | if (ap->flags & ATA_FLAG_PORT_DISABLED) { | ||
1034 | err_mask |= AC_ERR_SYSTEM; | ||
1035 | ata_port_probe(ap); | ||
1036 | } | ||
1037 | |||
990 | return err_mask; | 1038 | return err_mask; |
991 | } | 1039 | } |
992 | 1040 | ||
@@ -1007,7 +1055,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) | |||
1007 | return 0; | 1055 | return 0; |
1008 | if (speed > 2) | 1056 | if (speed > 2) |
1009 | return 1; | 1057 | return 1; |
1010 | 1058 | ||
1011 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ | 1059 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ |
1012 | 1060 | ||
1013 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ | 1061 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ |
@@ -1305,7 +1353,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1305 | if (print_info) | 1353 | if (print_info) |
1306 | printk(KERN_INFO "ata%u(%u): applying bridge limits\n", | 1354 | printk(KERN_INFO "ata%u(%u): applying bridge limits\n", |
1307 | ap->id, dev->devno); | 1355 | ap->id, dev->devno); |
1308 | ap->udma_mask &= ATA_UDMA5; | 1356 | dev->udma_mask &= ATA_UDMA5; |
1309 | dev->max_sectors = ATA_MAX_SECTORS; | 1357 | dev->max_sectors = ATA_MAX_SECTORS; |
1310 | } | 1358 | } |
1311 | 1359 | ||
@@ -1316,8 +1364,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1316 | return 0; | 1364 | return 0; |
1317 | 1365 | ||
1318 | err_out_nosup: | 1366 | err_out_nosup: |
1319 | printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", | ||
1320 | ap->id, dev->devno); | ||
1321 | DPRINTK("EXIT, err\n"); | 1367 | DPRINTK("EXIT, err\n"); |
1322 | return rc; | 1368 | return rc; |
1323 | } | 1369 | } |
@@ -1384,7 +1430,7 @@ static int ata_bus_probe(struct ata_port *ap) | |||
1384 | } | 1430 | } |
1385 | 1431 | ||
1386 | if (ata_dev_configure(ap, dev, 1)) { | 1432 | if (ata_dev_configure(ap, dev, 1)) { |
1387 | dev->class++; /* disable device */ | 1433 | ata_dev_disable(ap, dev); |
1388 | continue; | 1434 | continue; |
1389 | } | 1435 | } |
1390 | 1436 | ||
@@ -1530,6 +1576,23 @@ void sata_phy_reset(struct ata_port *ap) | |||
1530 | } | 1576 | } |
1531 | 1577 | ||
1532 | /** | 1578 | /** |
1579 | * ata_dev_pair - return other device on cable | ||
1580 | * @ap: port | ||
1581 | * @adev: device | ||
1582 | * | ||
1583 | * Obtain the other device on the same cable, or if none is | ||
1584 | * present NULL is returned | ||
1585 | */ | ||
1586 | |||
1587 | struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) | ||
1588 | { | ||
1589 | struct ata_device *pair = &ap->device[1 - adev->devno]; | ||
1590 | if (!ata_dev_present(pair)) | ||
1591 | return NULL; | ||
1592 | return pair; | ||
1593 | } | ||
1594 | |||
1595 | /** | ||
1533 | * ata_port_disable - Disable port. | 1596 | * ata_port_disable - Disable port. |
1534 | * @ap: Port to be disabled. | 1597 | * @ap: Port to be disabled. |
1535 | * | 1598 | * |
@@ -1557,7 +1620,7 @@ void ata_port_disable(struct ata_port *ap) | |||
1557 | * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). | 1620 | * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). |
1558 | * These were taken from ATA/ATAPI-6 standard, rev 0a, except | 1621 | * These were taken from ATA/ATAPI-6 standard, rev 0a, except |
1559 | * for PIO 5, which is a nonstandard extension and UDMA6, which | 1622 | * for PIO 5, which is a nonstandard extension and UDMA6, which |
1560 | * is currently supported only by Maxtor drives. | 1623 | * is currently supported only by Maxtor drives. |
1561 | */ | 1624 | */ |
1562 | 1625 | ||
1563 | static const struct ata_timing ata_timing[] = { | 1626 | static const struct ata_timing ata_timing[] = { |
@@ -1572,11 +1635,11 @@ static const struct ata_timing ata_timing[] = { | |||
1572 | { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, | 1635 | { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, |
1573 | 1636 | ||
1574 | /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ | 1637 | /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ |
1575 | 1638 | ||
1576 | { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, | 1639 | { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, |
1577 | { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, | 1640 | { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, |
1578 | { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, | 1641 | { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, |
1579 | 1642 | ||
1580 | { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, | 1643 | { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, |
1581 | { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, | 1644 | { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, |
1582 | { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, | 1645 | { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, |
@@ -1629,7 +1692,7 @@ static const struct ata_timing* ata_timing_find_mode(unsigned short speed) | |||
1629 | for (t = ata_timing; t->mode != speed; t++) | 1692 | for (t = ata_timing; t->mode != speed; t++) |
1630 | if (t->mode == 0xFF) | 1693 | if (t->mode == 0xFF) |
1631 | return NULL; | 1694 | return NULL; |
1632 | return t; | 1695 | return t; |
1633 | } | 1696 | } |
1634 | 1697 | ||
1635 | int ata_timing_compute(struct ata_device *adev, unsigned short speed, | 1698 | int ata_timing_compute(struct ata_device *adev, unsigned short speed, |
@@ -1639,7 +1702,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1639 | struct ata_timing p; | 1702 | struct ata_timing p; |
1640 | 1703 | ||
1641 | /* | 1704 | /* |
1642 | * Find the mode. | 1705 | * Find the mode. |
1643 | */ | 1706 | */ |
1644 | 1707 | ||
1645 | if (!(s = ata_timing_find_mode(speed))) | 1708 | if (!(s = ata_timing_find_mode(speed))) |
@@ -1697,20 +1760,28 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1697 | return 0; | 1760 | return 0; |
1698 | } | 1761 | } |
1699 | 1762 | ||
1700 | static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) | 1763 | static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) |
1701 | { | 1764 | { |
1702 | if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) | 1765 | unsigned int err_mask; |
1703 | return; | 1766 | int rc; |
1704 | 1767 | ||
1705 | if (dev->xfer_shift == ATA_SHIFT_PIO) | 1768 | if (dev->xfer_shift == ATA_SHIFT_PIO) |
1706 | dev->flags |= ATA_DFLAG_PIO; | 1769 | dev->flags |= ATA_DFLAG_PIO; |
1707 | 1770 | ||
1708 | ata_dev_set_xfermode(ap, dev); | 1771 | err_mask = ata_dev_set_xfermode(ap, dev); |
1772 | if (err_mask) { | ||
1773 | printk(KERN_ERR | ||
1774 | "ata%u: failed to set xfermode (err_mask=0x%x)\n", | ||
1775 | ap->id, err_mask); | ||
1776 | return -EIO; | ||
1777 | } | ||
1709 | 1778 | ||
1710 | if (ata_dev_revalidate(ap, dev, 0)) { | 1779 | rc = ata_dev_revalidate(ap, dev, 0); |
1711 | printk(KERN_ERR "ata%u: failed to revalidate after set " | 1780 | if (rc) { |
1712 | "xfermode, disabled\n", ap->id); | 1781 | printk(KERN_ERR |
1713 | ata_port_disable(ap); | 1782 | "ata%u: failed to revalidate after set xfermode\n", |
1783 | ap->id); | ||
1784 | return rc; | ||
1714 | } | 1785 | } |
1715 | 1786 | ||
1716 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", | 1787 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", |
@@ -1719,6 +1790,7 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) | |||
1719 | printk(KERN_INFO "ata%u: dev %u configured for %s\n", | 1790 | printk(KERN_INFO "ata%u: dev %u configured for %s\n", |
1720 | ap->id, dev->devno, | 1791 | ap->id, dev->devno, |
1721 | ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); | 1792 | ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); |
1793 | return 0; | ||
1722 | } | 1794 | } |
1723 | 1795 | ||
1724 | static int ata_host_set_pio(struct ata_port *ap) | 1796 | static int ata_host_set_pio(struct ata_port *ap) |
@@ -1778,16 +1850,19 @@ static void ata_set_mode(struct ata_port *ap) | |||
1778 | /* step 1: calculate xfer_mask */ | 1850 | /* step 1: calculate xfer_mask */ |
1779 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 1851 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1780 | struct ata_device *dev = &ap->device[i]; | 1852 | struct ata_device *dev = &ap->device[i]; |
1781 | unsigned int xfer_mask; | 1853 | unsigned int pio_mask, dma_mask; |
1782 | 1854 | ||
1783 | if (!ata_dev_present(dev)) | 1855 | if (!ata_dev_present(dev)) |
1784 | continue; | 1856 | continue; |
1785 | 1857 | ||
1786 | xfer_mask = ata_dev_xfermask(ap, dev); | 1858 | ata_dev_xfermask(ap, dev); |
1859 | |||
1860 | /* TODO: let LLDD filter dev->*_mask here */ | ||
1787 | 1861 | ||
1788 | dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO); | 1862 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); |
1789 | dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA | | 1863 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); |
1790 | ATA_MASK_UDMA)); | 1864 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); |
1865 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); | ||
1791 | } | 1866 | } |
1792 | 1867 | ||
1793 | /* step 2: always set host PIO timings */ | 1868 | /* step 2: always set host PIO timings */ |
@@ -1799,11 +1874,15 @@ static void ata_set_mode(struct ata_port *ap) | |||
1799 | ata_host_set_dma(ap); | 1874 | ata_host_set_dma(ap); |
1800 | 1875 | ||
1801 | /* step 4: update devices' xfer mode */ | 1876 | /* step 4: update devices' xfer mode */ |
1802 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 1877 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1803 | ata_dev_set_mode(ap, &ap->device[i]); | 1878 | struct ata_device *dev = &ap->device[i]; |
1804 | 1879 | ||
1805 | if (ap->flags & ATA_FLAG_PORT_DISABLED) | 1880 | if (!ata_dev_present(dev)) |
1806 | return; | 1881 | continue; |
1882 | |||
1883 | if (ata_dev_set_mode(ap, dev)) | ||
1884 | goto err_out; | ||
1885 | } | ||
1807 | 1886 | ||
1808 | if (ap->ops->post_set_mode) | 1887 | if (ap->ops->post_set_mode) |
1809 | ap->ops->post_set_mode(ap); | 1888 | ap->ops->post_set_mode(ap); |
@@ -1999,11 +2078,11 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, | |||
1999 | */ | 2078 | */ |
2000 | msleep(150); | 2079 | msleep(150); |
2001 | 2080 | ||
2002 | 2081 | ||
2003 | /* Before we perform post reset processing we want to see if | 2082 | /* Before we perform post reset processing we want to see if |
2004 | the bus shows 0xFF because the odd clown forgets the D7 pulldown | 2083 | the bus shows 0xFF because the odd clown forgets the D7 pulldown |
2005 | resistor */ | 2084 | resistor */ |
2006 | 2085 | ||
2007 | if (ata_check_status(ap) == 0xFF) | 2086 | if (ata_check_status(ap) == 0xFF) |
2008 | return 1; /* Positive is failure for some reason */ | 2087 | return 1; /* Positive is failure for some reason */ |
2009 | 2088 | ||
@@ -2572,22 +2651,22 @@ static const char * const ata_dma_blacklist [] = { | |||
2572 | "SanDisk SDP3B-64", NULL, | 2651 | "SanDisk SDP3B-64", NULL, |
2573 | "SANYO CD-ROM CRD", NULL, | 2652 | "SANYO CD-ROM CRD", NULL, |
2574 | "HITACHI CDR-8", NULL, | 2653 | "HITACHI CDR-8", NULL, |
2575 | "HITACHI CDR-8335", NULL, | 2654 | "HITACHI CDR-8335", NULL, |
2576 | "HITACHI CDR-8435", NULL, | 2655 | "HITACHI CDR-8435", NULL, |
2577 | "Toshiba CD-ROM XM-6202B", NULL, | 2656 | "Toshiba CD-ROM XM-6202B", NULL, |
2578 | "TOSHIBA CD-ROM XM-1702BC", NULL, | 2657 | "TOSHIBA CD-ROM XM-1702BC", NULL, |
2579 | "CD-532E-A", NULL, | 2658 | "CD-532E-A", NULL, |
2580 | "E-IDE CD-ROM CR-840", NULL, | 2659 | "E-IDE CD-ROM CR-840", NULL, |
2581 | "CD-ROM Drive/F5A", NULL, | 2660 | "CD-ROM Drive/F5A", NULL, |
2582 | "WPI CDD-820", NULL, | 2661 | "WPI CDD-820", NULL, |
2583 | "SAMSUNG CD-ROM SC-148C", NULL, | 2662 | "SAMSUNG CD-ROM SC-148C", NULL, |
2584 | "SAMSUNG CD-ROM SC", NULL, | 2663 | "SAMSUNG CD-ROM SC", NULL, |
2585 | "SanDisk SDP3B-64", NULL, | 2664 | "SanDisk SDP3B-64", NULL, |
2586 | "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, | 2665 | "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, |
2587 | "_NEC DV5800A", NULL, | 2666 | "_NEC DV5800A", NULL, |
2588 | "SAMSUNG CD-ROM SN-124", "N001" | 2667 | "SAMSUNG CD-ROM SN-124", "N001" |
2589 | }; | 2668 | }; |
2590 | 2669 | ||
2591 | static int ata_strim(char *s, size_t len) | 2670 | static int ata_strim(char *s, size_t len) |
2592 | { | 2671 | { |
2593 | len = strnlen(s, len); | 2672 | len = strnlen(s, len); |
@@ -2630,18 +2709,15 @@ static int ata_dma_blacklisted(const struct ata_device *dev) | |||
2630 | * @ap: Port on which the device to compute xfermask for resides | 2709 | * @ap: Port on which the device to compute xfermask for resides |
2631 | * @dev: Device to compute xfermask for | 2710 | * @dev: Device to compute xfermask for |
2632 | * | 2711 | * |
2633 | * Compute supported xfermask of @dev. This function is | 2712 | * Compute supported xfermask of @dev and store it in |
2634 | * responsible for applying all known limits including host | 2713 | * dev->*_mask. This function is responsible for applying all |
2635 | * controller limits, device blacklist, etc... | 2714 | * known limits including host controller limits, device |
2715 | * blacklist, etc... | ||
2636 | * | 2716 | * |
2637 | * LOCKING: | 2717 | * LOCKING: |
2638 | * None. | 2718 | * None. |
2639 | * | ||
2640 | * RETURNS: | ||
2641 | * Computed xfermask. | ||
2642 | */ | 2719 | */ |
2643 | static unsigned int ata_dev_xfermask(struct ata_port *ap, | 2720 | static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) |
2644 | struct ata_device *dev) | ||
2645 | { | 2721 | { |
2646 | unsigned long xfer_mask; | 2722 | unsigned long xfer_mask; |
2647 | int i; | 2723 | int i; |
@@ -2654,6 +2730,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, | |||
2654 | struct ata_device *d = &ap->device[i]; | 2730 | struct ata_device *d = &ap->device[i]; |
2655 | if (!ata_dev_present(d)) | 2731 | if (!ata_dev_present(d)) |
2656 | continue; | 2732 | continue; |
2733 | xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, | ||
2734 | d->udma_mask); | ||
2657 | xfer_mask &= ata_id_xfermask(d->id); | 2735 | xfer_mask &= ata_id_xfermask(d->id); |
2658 | if (ata_dma_blacklisted(d)) | 2736 | if (ata_dma_blacklisted(d)) |
2659 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 2737 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
@@ -2663,7 +2741,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, | |||
2663 | printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " | 2741 | printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " |
2664 | "disabling DMA\n", ap->id, dev->devno); | 2742 | "disabling DMA\n", ap->id, dev->devno); |
2665 | 2743 | ||
2666 | return xfer_mask; | 2744 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, |
2745 | &dev->udma_mask); | ||
2667 | } | 2746 | } |
2668 | 2747 | ||
2669 | /** | 2748 | /** |
@@ -2676,11 +2755,16 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, | |||
2676 | * | 2755 | * |
2677 | * LOCKING: | 2756 | * LOCKING: |
2678 | * PCI/etc. bus probe sem. | 2757 | * PCI/etc. bus probe sem. |
2758 | * | ||
2759 | * RETURNS: | ||
2760 | * 0 on success, AC_ERR_* mask otherwise. | ||
2679 | */ | 2761 | */ |
2680 | 2762 | ||
2681 | static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) | 2763 | static unsigned int ata_dev_set_xfermode(struct ata_port *ap, |
2764 | struct ata_device *dev) | ||
2682 | { | 2765 | { |
2683 | struct ata_taskfile tf; | 2766 | struct ata_taskfile tf; |
2767 | unsigned int err_mask; | ||
2684 | 2768 | ||
2685 | /* set up set-features taskfile */ | 2769 | /* set up set-features taskfile */ |
2686 | DPRINTK("set features - xfer mode\n"); | 2770 | DPRINTK("set features - xfer mode\n"); |
@@ -2692,13 +2776,10 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) | |||
2692 | tf.protocol = ATA_PROT_NODATA; | 2776 | tf.protocol = ATA_PROT_NODATA; |
2693 | tf.nsect = dev->xfer_mode; | 2777 | tf.nsect = dev->xfer_mode; |
2694 | 2778 | ||
2695 | if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { | 2779 | err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); |
2696 | printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n", | ||
2697 | ap->id); | ||
2698 | ata_port_disable(ap); | ||
2699 | } | ||
2700 | 2780 | ||
2701 | DPRINTK("EXIT\n"); | 2781 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
2782 | return err_mask; | ||
2702 | } | 2783 | } |
2703 | 2784 | ||
2704 | /** | 2785 | /** |
@@ -2775,7 +2856,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2775 | 2856 | ||
2776 | if (qc->flags & ATA_QCFLAG_SG) { | 2857 | if (qc->flags & ATA_QCFLAG_SG) { |
2777 | if (qc->n_elem) | 2858 | if (qc->n_elem) |
2778 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2859 | dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); |
2779 | /* restore last sg */ | 2860 | /* restore last sg */ |
2780 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | 2861 | sg[qc->orig_n_elem - 1].length += qc->pad_len; |
2781 | if (pad_buf) { | 2862 | if (pad_buf) { |
@@ -2786,7 +2867,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2786 | } | 2867 | } |
2787 | } else { | 2868 | } else { |
2788 | if (qc->n_elem) | 2869 | if (qc->n_elem) |
2789 | dma_unmap_single(ap->host_set->dev, | 2870 | dma_unmap_single(ap->dev, |
2790 | sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), | 2871 | sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), |
2791 | dir); | 2872 | dir); |
2792 | /* restore sg */ | 2873 | /* restore sg */ |
@@ -2997,7 +3078,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2997 | goto skip_map; | 3078 | goto skip_map; |
2998 | } | 3079 | } |
2999 | 3080 | ||
3000 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 3081 | dma_address = dma_map_single(ap->dev, qc->buf_virt, |
3001 | sg->length, dir); | 3082 | sg->length, dir); |
3002 | if (dma_mapping_error(dma_address)) { | 3083 | if (dma_mapping_error(dma_address)) { |
3003 | /* restore sg */ | 3084 | /* restore sg */ |
@@ -3085,7 +3166,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
3085 | } | 3166 | } |
3086 | 3167 | ||
3087 | dir = qc->dma_dir; | 3168 | dir = qc->dma_dir; |
3088 | n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); | 3169 | n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); |
3089 | if (n_elem < 1) { | 3170 | if (n_elem < 1) { |
3090 | /* restore last sg */ | 3171 | /* restore last sg */ |
3091 | lsg->length += qc->pad_len; | 3172 | lsg->length += qc->pad_len; |
@@ -3616,7 +3697,7 @@ static void ata_pio_error(struct ata_port *ap) | |||
3616 | if (qc->tf.command != ATA_CMD_PACKET) | 3697 | if (qc->tf.command != ATA_CMD_PACKET) |
3617 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | 3698 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); |
3618 | 3699 | ||
3619 | /* make sure qc->err_mask is available to | 3700 | /* make sure qc->err_mask is available to |
3620 | * know what's wrong and recover | 3701 | * know what's wrong and recover |
3621 | */ | 3702 | */ |
3622 | WARN_ON(qc->err_mask == 0); | 3703 | WARN_ON(qc->err_mask == 0); |
@@ -4065,240 +4146,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
4065 | } | 4146 | } |
4066 | 4147 | ||
4067 | /** | 4148 | /** |
4068 | * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction | ||
4069 | * @qc: Info associated with this ATA transaction. | ||
4070 | * | ||
4071 | * LOCKING: | ||
4072 | * spin_lock_irqsave(host_set lock) | ||
4073 | */ | ||
4074 | |||
4075 | static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) | ||
4076 | { | ||
4077 | struct ata_port *ap = qc->ap; | ||
4078 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
4079 | u8 dmactl; | ||
4080 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
4081 | |||
4082 | /* load PRD table addr. */ | ||
4083 | mb(); /* make sure PRD table writes are visible to controller */ | ||
4084 | writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); | ||
4085 | |||
4086 | /* specify data direction, triple-check start bit is clear */ | ||
4087 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
4088 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
4089 | if (!rw) | ||
4090 | dmactl |= ATA_DMA_WR; | ||
4091 | writeb(dmactl, mmio + ATA_DMA_CMD); | ||
4092 | |||
4093 | /* issue r/w command */ | ||
4094 | ap->ops->exec_command(ap, &qc->tf); | ||
4095 | } | ||
4096 | |||
4097 | /** | ||
4098 | * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction | ||
4099 | * @qc: Info associated with this ATA transaction. | ||
4100 | * | ||
4101 | * LOCKING: | ||
4102 | * spin_lock_irqsave(host_set lock) | ||
4103 | */ | ||
4104 | |||
4105 | static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) | ||
4106 | { | ||
4107 | struct ata_port *ap = qc->ap; | ||
4108 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
4109 | u8 dmactl; | ||
4110 | |||
4111 | /* start host DMA transaction */ | ||
4112 | dmactl = readb(mmio + ATA_DMA_CMD); | ||
4113 | writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); | ||
4114 | |||
4115 | /* Strictly, one may wish to issue a readb() here, to | ||
4116 | * flush the mmio write. However, control also passes | ||
4117 | * to the hardware at this point, and it will interrupt | ||
4118 | * us when we are to resume control. So, in effect, | ||
4119 | * we don't care when the mmio write flushes. | ||
4120 | * Further, a read of the DMA status register _immediately_ | ||
4121 | * following the write may not be what certain flaky hardware | ||
4122 | * is expected, so I think it is best to not add a readb() | ||
4123 | * without first all the MMIO ATA cards/mobos. | ||
4124 | * Or maybe I'm just being paranoid. | ||
4125 | */ | ||
4126 | } | ||
4127 | |||
4128 | /** | ||
4129 | * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) | ||
4130 | * @qc: Info associated with this ATA transaction. | ||
4131 | * | ||
4132 | * LOCKING: | ||
4133 | * spin_lock_irqsave(host_set lock) | ||
4134 | */ | ||
4135 | |||
4136 | static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) | ||
4137 | { | ||
4138 | struct ata_port *ap = qc->ap; | ||
4139 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
4140 | u8 dmactl; | ||
4141 | |||
4142 | /* load PRD table addr. */ | ||
4143 | outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | ||
4144 | |||
4145 | /* specify data direction, triple-check start bit is clear */ | ||
4146 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
4147 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
4148 | if (!rw) | ||
4149 | dmactl |= ATA_DMA_WR; | ||
4150 | outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
4151 | |||
4152 | /* issue r/w command */ | ||
4153 | ap->ops->exec_command(ap, &qc->tf); | ||
4154 | } | ||
4155 | |||
4156 | /** | ||
4157 | * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) | ||
4158 | * @qc: Info associated with this ATA transaction. | ||
4159 | * | ||
4160 | * LOCKING: | ||
4161 | * spin_lock_irqsave(host_set lock) | ||
4162 | */ | ||
4163 | |||
4164 | static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) | ||
4165 | { | ||
4166 | struct ata_port *ap = qc->ap; | ||
4167 | u8 dmactl; | ||
4168 | |||
4169 | /* start host DMA transaction */ | ||
4170 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
4171 | outb(dmactl | ATA_DMA_START, | ||
4172 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
4173 | } | ||
4174 | |||
4175 | |||
4176 | /** | ||
4177 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | ||
4178 | * @qc: Info associated with this ATA transaction. | ||
4179 | * | ||
4180 | * Writes the ATA_DMA_START flag to the DMA command register. | ||
4181 | * | ||
4182 | * May be used as the bmdma_start() entry in ata_port_operations. | ||
4183 | * | ||
4184 | * LOCKING: | ||
4185 | * spin_lock_irqsave(host_set lock) | ||
4186 | */ | ||
4187 | void ata_bmdma_start(struct ata_queued_cmd *qc) | ||
4188 | { | ||
4189 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
4190 | ata_bmdma_start_mmio(qc); | ||
4191 | else | ||
4192 | ata_bmdma_start_pio(qc); | ||
4193 | } | ||
4194 | |||
4195 | |||
4196 | /** | ||
4197 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
4198 | * @qc: Info associated with this ATA transaction. | ||
4199 | * | ||
4200 | * Writes address of PRD table to device's PRD Table Address | ||
4201 | * register, sets the DMA control register, and calls | ||
4202 | * ops->exec_command() to start the transfer. | ||
4203 | * | ||
4204 | * May be used as the bmdma_setup() entry in ata_port_operations. | ||
4205 | * | ||
4206 | * LOCKING: | ||
4207 | * spin_lock_irqsave(host_set lock) | ||
4208 | */ | ||
4209 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | ||
4210 | { | ||
4211 | if (qc->ap->flags & ATA_FLAG_MMIO) | ||
4212 | ata_bmdma_setup_mmio(qc); | ||
4213 | else | ||
4214 | ata_bmdma_setup_pio(qc); | ||
4215 | } | ||
4216 | |||
4217 | |||
4218 | /** | ||
4219 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
4220 | * @ap: Port associated with this ATA transaction. | ||
4221 | * | ||
4222 | * Clear interrupt and error flags in DMA status register. | ||
4223 | * | ||
4224 | * May be used as the irq_clear() entry in ata_port_operations. | ||
4225 | * | ||
4226 | * LOCKING: | ||
4227 | * spin_lock_irqsave(host_set lock) | ||
4228 | */ | ||
4229 | |||
4230 | void ata_bmdma_irq_clear(struct ata_port *ap) | ||
4231 | { | ||
4232 | if (!ap->ioaddr.bmdma_addr) | ||
4233 | return; | ||
4234 | |||
4235 | if (ap->flags & ATA_FLAG_MMIO) { | ||
4236 | void __iomem *mmio = | ||
4237 | ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; | ||
4238 | writeb(readb(mmio), mmio); | ||
4239 | } else { | ||
4240 | unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; | ||
4241 | outb(inb(addr), addr); | ||
4242 | } | ||
4243 | } | ||
4244 | |||
4245 | |||
4246 | /** | ||
4247 | * ata_bmdma_status - Read PCI IDE BMDMA status | ||
4248 | * @ap: Port associated with this ATA transaction. | ||
4249 | * | ||
4250 | * Read and return BMDMA status register. | ||
4251 | * | ||
4252 | * May be used as the bmdma_status() entry in ata_port_operations. | ||
4253 | * | ||
4254 | * LOCKING: | ||
4255 | * spin_lock_irqsave(host_set lock) | ||
4256 | */ | ||
4257 | |||
4258 | u8 ata_bmdma_status(struct ata_port *ap) | ||
4259 | { | ||
4260 | u8 host_stat; | ||
4261 | if (ap->flags & ATA_FLAG_MMIO) { | ||
4262 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
4263 | host_stat = readb(mmio + ATA_DMA_STATUS); | ||
4264 | } else | ||
4265 | host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | ||
4266 | return host_stat; | ||
4267 | } | ||
4268 | |||
4269 | |||
4270 | /** | ||
4271 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
4272 | * @qc: Command we are ending DMA for | ||
4273 | * | ||
4274 | * Clears the ATA_DMA_START flag in the dma control register | ||
4275 | * | ||
4276 | * May be used as the bmdma_stop() entry in ata_port_operations. | ||
4277 | * | ||
4278 | * LOCKING: | ||
4279 | * spin_lock_irqsave(host_set lock) | ||
4280 | */ | ||
4281 | |||
4282 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | ||
4283 | { | ||
4284 | struct ata_port *ap = qc->ap; | ||
4285 | if (ap->flags & ATA_FLAG_MMIO) { | ||
4286 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | ||
4287 | |||
4288 | /* clear start/stop bit */ | ||
4289 | writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
4290 | mmio + ATA_DMA_CMD); | ||
4291 | } else { | ||
4292 | /* clear start/stop bit */ | ||
4293 | outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, | ||
4294 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | ||
4295 | } | ||
4296 | |||
4297 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
4298 | ata_altstatus(ap); /* dummy read */ | ||
4299 | } | ||
4300 | |||
4301 | /** | ||
4302 | * ata_host_intr - Handle host interrupt for given (port, task) | 4149 | * ata_host_intr - Handle host interrupt for given (port, task) |
4303 | * @ap: Port on which interrupt arrived (possibly...) | 4150 | * @ap: Port on which interrupt arrived (possibly...) |
4304 | * @qc: Taskfile currently active in engine | 4151 | * @qc: Taskfile currently active in engine |
@@ -4506,14 +4353,15 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) | |||
4506 | * Flush the cache on the drive, if appropriate, then issue a | 4353 | * Flush the cache on the drive, if appropriate, then issue a |
4507 | * standbynow command. | 4354 | * standbynow command. |
4508 | */ | 4355 | */ |
4509 | int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) | 4356 | int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) |
4510 | { | 4357 | { |
4511 | if (!ata_dev_present(dev)) | 4358 | if (!ata_dev_present(dev)) |
4512 | return 0; | 4359 | return 0; |
4513 | if (dev->class == ATA_DEV_ATA) | 4360 | if (dev->class == ATA_DEV_ATA) |
4514 | ata_flush_cache(ap, dev); | 4361 | ata_flush_cache(ap, dev); |
4515 | 4362 | ||
4516 | ata_standby_drive(ap, dev); | 4363 | if (state.event != PM_EVENT_FREEZE) |
4364 | ata_standby_drive(ap, dev); | ||
4517 | ap->flags |= ATA_FLAG_SUSPENDED; | 4365 | ap->flags |= ATA_FLAG_SUSPENDED; |
4518 | return 0; | 4366 | return 0; |
4519 | } | 4367 | } |
@@ -4533,7 +4381,7 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) | |||
4533 | 4381 | ||
4534 | int ata_port_start (struct ata_port *ap) | 4382 | int ata_port_start (struct ata_port *ap) |
4535 | { | 4383 | { |
4536 | struct device *dev = ap->host_set->dev; | 4384 | struct device *dev = ap->dev; |
4537 | int rc; | 4385 | int rc; |
4538 | 4386 | ||
4539 | ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); | 4387 | ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); |
@@ -4566,7 +4414,7 @@ int ata_port_start (struct ata_port *ap) | |||
4566 | 4414 | ||
4567 | void ata_port_stop (struct ata_port *ap) | 4415 | void ata_port_stop (struct ata_port *ap) |
4568 | { | 4416 | { |
4569 | struct device *dev = ap->host_set->dev; | 4417 | struct device *dev = ap->dev; |
4570 | 4418 | ||
4571 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | 4419 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); |
4572 | ata_pad_free(ap, dev); | 4420 | ata_pad_free(ap, dev); |
@@ -4632,6 +4480,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4632 | ap->host = host; | 4480 | ap->host = host; |
4633 | ap->ctl = ATA_DEVCTL_OBS; | 4481 | ap->ctl = ATA_DEVCTL_OBS; |
4634 | ap->host_set = host_set; | 4482 | ap->host_set = host_set; |
4483 | ap->dev = ent->dev; | ||
4635 | ap->port_no = port_no; | 4484 | ap->port_no = port_no; |
4636 | ap->hard_port_no = | 4485 | ap->hard_port_no = |
4637 | ent->legacy_mode ? ent->hard_port_no : port_no; | 4486 | ent->legacy_mode ? ent->hard_port_no : port_no; |
@@ -4647,8 +4496,13 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4647 | INIT_WORK(&ap->port_task, NULL, NULL); | 4496 | INIT_WORK(&ap->port_task, NULL, NULL); |
4648 | INIT_LIST_HEAD(&ap->eh_done_q); | 4497 | INIT_LIST_HEAD(&ap->eh_done_q); |
4649 | 4498 | ||
4650 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 4499 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
4651 | ap->device[i].devno = i; | 4500 | struct ata_device *dev = &ap->device[i]; |
4501 | dev->devno = i; | ||
4502 | dev->pio_mask = UINT_MAX; | ||
4503 | dev->mwdma_mask = UINT_MAX; | ||
4504 | dev->udma_mask = UINT_MAX; | ||
4505 | } | ||
4652 | 4506 | ||
4653 | #ifdef ATA_IRQ_TRAP | 4507 | #ifdef ATA_IRQ_TRAP |
4654 | ap->stats.unhandled_irq = 1; | 4508 | ap->stats.unhandled_irq = 1; |
@@ -4842,7 +4696,7 @@ err_free_ret: | |||
4842 | * ata_host_set_remove - PCI layer callback for device removal | 4696 | * ata_host_set_remove - PCI layer callback for device removal |
4843 | * @host_set: ATA host set that was removed | 4697 | * @host_set: ATA host set that was removed |
4844 | * | 4698 | * |
4845 | * Unregister all objects associated with this host set. Free those | 4699 | * Unregister all objects associated with this host set. Free those |
4846 | * objects. | 4700 | * objects. |
4847 | * | 4701 | * |
4848 | * LOCKING: | 4702 | * LOCKING: |
@@ -5114,6 +4968,8 @@ EXPORT_SYMBOL_GPL(ata_std_postreset); | |||
5114 | EXPORT_SYMBOL_GPL(ata_std_probe_reset); | 4968 | EXPORT_SYMBOL_GPL(ata_std_probe_reset); |
5115 | EXPORT_SYMBOL_GPL(ata_drive_probe_reset); | 4969 | EXPORT_SYMBOL_GPL(ata_drive_probe_reset); |
5116 | EXPORT_SYMBOL_GPL(ata_dev_revalidate); | 4970 | EXPORT_SYMBOL_GPL(ata_dev_revalidate); |
4971 | EXPORT_SYMBOL_GPL(ata_dev_classify); | ||
4972 | EXPORT_SYMBOL_GPL(ata_dev_pair); | ||
5117 | EXPORT_SYMBOL_GPL(ata_port_disable); | 4973 | EXPORT_SYMBOL_GPL(ata_port_disable); |
5118 | EXPORT_SYMBOL_GPL(ata_ratelimit); | 4974 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
5119 | EXPORT_SYMBOL_GPL(ata_busy_sleep); | 4975 | EXPORT_SYMBOL_GPL(ata_busy_sleep); |
@@ -5124,7 +4980,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_error); | |||
5124 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); | 4980 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |
5125 | EXPORT_SYMBOL_GPL(ata_scsi_release); | 4981 | EXPORT_SYMBOL_GPL(ata_scsi_release); |
5126 | EXPORT_SYMBOL_GPL(ata_host_intr); | 4982 | EXPORT_SYMBOL_GPL(ata_host_intr); |
5127 | EXPORT_SYMBOL_GPL(ata_dev_classify); | ||
5128 | EXPORT_SYMBOL_GPL(ata_id_string); | 4983 | EXPORT_SYMBOL_GPL(ata_id_string); |
5129 | EXPORT_SYMBOL_GPL(ata_id_c_string); | 4984 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
5130 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 4985 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index a1259b242b8e..628191bfd990 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -256,7 +256,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
256 | scsi_cmd[14] = args[0]; | 256 | scsi_cmd[14] = args[0]; |
257 | 257 | ||
258 | /* Good values for timeout and retries? Values below | 258 | /* Good values for timeout and retries? Values below |
259 | from scsi_ioctl_send_command() for default case... */ | 259 | from scsi_ioctl_send_command() for default case... */ |
260 | if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr, | 260 | if (scsi_execute_req(scsidev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr, |
261 | (10*HZ), 5)) | 261 | (10*HZ), 5)) |
262 | rc = -EIO; | 262 | rc = -EIO; |
@@ -267,20 +267,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
267 | 267 | ||
268 | int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) | 268 | int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) |
269 | { | 269 | { |
270 | struct ata_port *ap; | ||
271 | struct ata_device *dev; | ||
272 | int val = -EINVAL, rc = -EINVAL; | 270 | int val = -EINVAL, rc = -EINVAL; |
273 | 271 | ||
274 | ap = (struct ata_port *) &scsidev->host->hostdata[0]; | ||
275 | if (!ap) | ||
276 | goto out; | ||
277 | |||
278 | dev = ata_scsi_find_dev(ap, scsidev); | ||
279 | if (!dev) { | ||
280 | rc = -ENODEV; | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | switch (cmd) { | 272 | switch (cmd) { |
285 | case ATA_IOC_GET_IO32: | 273 | case ATA_IOC_GET_IO32: |
286 | val = 0; | 274 | val = 0; |
@@ -309,7 +297,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) | |||
309 | break; | 297 | break; |
310 | } | 298 | } |
311 | 299 | ||
312 | out: | ||
313 | return rc; | 300 | return rc; |
314 | } | 301 | } |
315 | 302 | ||
@@ -414,12 +401,12 @@ int ata_scsi_device_resume(struct scsi_device *sdev) | |||
414 | return ata_device_resume(ap, dev); | 401 | return ata_device_resume(ap, dev); |
415 | } | 402 | } |
416 | 403 | ||
417 | int ata_scsi_device_suspend(struct scsi_device *sdev) | 404 | int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state) |
418 | { | 405 | { |
419 | struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; | 406 | struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; |
420 | struct ata_device *dev = &ap->device[sdev->id]; | 407 | struct ata_device *dev = &ap->device[sdev->id]; |
421 | 408 | ||
422 | return ata_device_suspend(ap, dev); | 409 | return ata_device_suspend(ap, dev, state); |
423 | } | 410 | } |
424 | 411 | ||
425 | /** | 412 | /** |
@@ -438,7 +425,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev) | |||
438 | * LOCKING: | 425 | * LOCKING: |
439 | * spin_lock_irqsave(host_set lock) | 426 | * spin_lock_irqsave(host_set lock) |
440 | */ | 427 | */ |
441 | void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, | 428 | void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, |
442 | u8 *ascq) | 429 | u8 *ascq) |
443 | { | 430 | { |
444 | int i; | 431 | int i; |
@@ -495,7 +482,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, | |||
495 | /* Look for drv_err */ | 482 | /* Look for drv_err */ |
496 | for (i = 0; sense_table[i][0] != 0xFF; i++) { | 483 | for (i = 0; sense_table[i][0] != 0xFF; i++) { |
497 | /* Look for best matches first */ | 484 | /* Look for best matches first */ |
498 | if ((sense_table[i][0] & drv_err) == | 485 | if ((sense_table[i][0] & drv_err) == |
499 | sense_table[i][0]) { | 486 | sense_table[i][0]) { |
500 | *sk = sense_table[i][1]; | 487 | *sk = sense_table[i][1]; |
501 | *asc = sense_table[i][2]; | 488 | *asc = sense_table[i][2]; |
@@ -518,7 +505,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, | |||
518 | } | 505 | } |
519 | } | 506 | } |
520 | /* No error? Undecoded? */ | 507 | /* No error? Undecoded? */ |
521 | printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", | 508 | printk(KERN_WARNING "ata%u: no sense translation for status: 0x%02x\n", |
522 | id, drv_stat); | 509 | id, drv_stat); |
523 | 510 | ||
524 | /* We need a sensible error return here, which is tricky, and one | 511 | /* We need a sensible error return here, which is tricky, and one |
@@ -1150,14 +1137,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc | |||
1150 | 1137 | ||
1151 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", | 1138 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", |
1152 | (u32)block, track, cyl, head, sect); | 1139 | (u32)block, track, cyl, head, sect); |
1153 | 1140 | ||
1154 | /* Check whether the converted CHS can fit. | 1141 | /* Check whether the converted CHS can fit. |
1155 | Cylinder: 0-65535 | 1142 | Cylinder: 0-65535 |
1156 | Head: 0-15 | 1143 | Head: 0-15 |
1157 | Sector: 1-255*/ | 1144 | Sector: 1-255*/ |
1158 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) | 1145 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) |
1159 | goto out_of_range; | 1146 | goto out_of_range; |
1160 | 1147 | ||
1161 | tf->command = ATA_CMD_VERIFY; | 1148 | tf->command = ATA_CMD_VERIFY; |
1162 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ | 1149 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ |
1163 | tf->lbal = sect; | 1150 | tf->lbal = sect; |
@@ -1289,7 +1276,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm | |||
1289 | tf->lbal = block & 0xff; | 1276 | tf->lbal = block & 0xff; |
1290 | 1277 | ||
1291 | tf->device |= ATA_LBA; | 1278 | tf->device |= ATA_LBA; |
1292 | } else { | 1279 | } else { |
1293 | /* CHS */ | 1280 | /* CHS */ |
1294 | u32 sect, head, cyl, track; | 1281 | u32 sect, head, cyl, track; |
1295 | 1282 | ||
@@ -1309,8 +1296,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm | |||
1309 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", | 1296 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", |
1310 | (u32)block, track, cyl, head, sect); | 1297 | (u32)block, track, cyl, head, sect); |
1311 | 1298 | ||
1312 | /* Check whether the converted CHS can fit. | 1299 | /* Check whether the converted CHS can fit. |
1313 | Cylinder: 0-65535 | 1300 | Cylinder: 0-65535 |
1314 | Head: 0-15 | 1301 | Head: 0-15 |
1315 | Sector: 1-255*/ | 1302 | Sector: 1-255*/ |
1316 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) | 1303 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) |
@@ -1697,7 +1684,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, | |||
1697 | 1684 | ||
1698 | if (buflen > (ATA_SERNO_LEN + num + 3)) { | 1685 | if (buflen > (ATA_SERNO_LEN + num + 3)) { |
1699 | /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ | 1686 | /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ |
1700 | rbuf[num + 0] = 2; | 1687 | rbuf[num + 0] = 2; |
1701 | rbuf[num + 3] = ATA_SERNO_LEN; | 1688 | rbuf[num + 3] = ATA_SERNO_LEN; |
1702 | num += 4; | 1689 | num += 4; |
1703 | ata_id_string(args->id, (unsigned char *) rbuf + num, | 1690 | ata_id_string(args->id, (unsigned char *) rbuf + num, |
@@ -1707,8 +1694,8 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, | |||
1707 | if (buflen > (sat_model_serial_desc_len + num + 3)) { | 1694 | if (buflen > (sat_model_serial_desc_len + num + 3)) { |
1708 | /* SAT defined lu model and serial numbers descriptor */ | 1695 | /* SAT defined lu model and serial numbers descriptor */ |
1709 | /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ | 1696 | /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ |
1710 | rbuf[num + 0] = 2; | 1697 | rbuf[num + 0] = 2; |
1711 | rbuf[num + 1] = 1; | 1698 | rbuf[num + 1] = 1; |
1712 | rbuf[num + 3] = sat_model_serial_desc_len; | 1699 | rbuf[num + 3] = sat_model_serial_desc_len; |
1713 | num += 4; | 1700 | num += 4; |
1714 | memcpy(rbuf + num, "ATA ", 8); | 1701 | memcpy(rbuf + num, "ATA ", 8); |
@@ -2597,6 +2584,21 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, | |||
2597 | #endif | 2584 | #endif |
2598 | } | 2585 | } |
2599 | 2586 | ||
2587 | static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), | ||
2588 | struct ata_port *ap, struct ata_device *dev) | ||
2589 | { | ||
2590 | if (dev->class == ATA_DEV_ATA) { | ||
2591 | ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, | ||
2592 | cmd->cmnd[0]); | ||
2593 | |||
2594 | if (xlat_func) | ||
2595 | ata_scsi_translate(ap, dev, cmd, done, xlat_func); | ||
2596 | else | ||
2597 | ata_scsi_simulate(ap, dev, cmd, done); | ||
2598 | } else | ||
2599 | ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); | ||
2600 | } | ||
2601 | |||
2600 | /** | 2602 | /** |
2601 | * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device | 2603 | * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device |
2602 | * @cmd: SCSI command to be sent | 2604 | * @cmd: SCSI command to be sent |
@@ -2631,24 +2633,13 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
2631 | ata_scsi_dump_cdb(ap, cmd); | 2633 | ata_scsi_dump_cdb(ap, cmd); |
2632 | 2634 | ||
2633 | dev = ata_scsi_find_dev(ap, scsidev); | 2635 | dev = ata_scsi_find_dev(ap, scsidev); |
2634 | if (unlikely(!dev)) { | 2636 | if (likely(dev)) |
2637 | __ata_scsi_queuecmd(cmd, done, ap, dev); | ||
2638 | else { | ||
2635 | cmd->result = (DID_BAD_TARGET << 16); | 2639 | cmd->result = (DID_BAD_TARGET << 16); |
2636 | done(cmd); | 2640 | done(cmd); |
2637 | goto out_unlock; | ||
2638 | } | 2641 | } |
2639 | 2642 | ||
2640 | if (dev->class == ATA_DEV_ATA) { | ||
2641 | ata_xlat_func_t xlat_func = ata_get_xlat_func(dev, | ||
2642 | cmd->cmnd[0]); | ||
2643 | |||
2644 | if (xlat_func) | ||
2645 | ata_scsi_translate(ap, dev, cmd, done, xlat_func); | ||
2646 | else | ||
2647 | ata_scsi_simulate(ap, dev, cmd, done); | ||
2648 | } else | ||
2649 | ata_scsi_translate(ap, dev, cmd, done, atapi_xlat); | ||
2650 | |||
2651 | out_unlock: | ||
2652 | spin_unlock(&ap->host_set->lock); | 2643 | spin_unlock(&ap->host_set->lock); |
2653 | spin_lock(shost->host_lock); | 2644 | spin_lock(shost->host_lock); |
2654 | return 0; | 2645 | return 0; |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index e5b20c6afc18..f77bf183dfab 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -29,34 +29,6 @@ | |||
29 | * NV-specific details such as register offsets, SATA phy location, | 29 | * NV-specific details such as register offsets, SATA phy location, |
30 | * hotplug info, etc. | 30 | * hotplug info, etc. |
31 | * | 31 | * |
32 | * 0.10 | ||
33 | * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB | ||
34 | * drive. Also made the check_hotplug() callbacks return whether there | ||
35 | * was a hotplug interrupt or not. This was not the source of the | ||
36 | * spurious interrupts, but is the right thing to do anyway. | ||
37 | * | ||
38 | * 0.09 | ||
39 | * - Fixed bug introduced by 0.08's MCP51 and MCP55 support. | ||
40 | * | ||
41 | * 0.08 | ||
42 | * - Added support for MCP51 and MCP55. | ||
43 | * | ||
44 | * 0.07 | ||
45 | * - Added support for RAID class code. | ||
46 | * | ||
47 | * 0.06 | ||
48 | * - Added generic SATA support by using a pci_device_id that filters on | ||
49 | * the IDE storage class code. | ||
50 | * | ||
51 | * 0.03 | ||
52 | * - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using | ||
53 | * mmio_base, which is only set for the CK804/MCP04 case. | ||
54 | * | ||
55 | * 0.02 | ||
56 | * - Added support for CK804 SATA controller. | ||
57 | * | ||
58 | * 0.01 | ||
59 | * - Initial revision. | ||
60 | */ | 32 | */ |
61 | 33 | ||
62 | #include <linux/config.h> | 34 | #include <linux/config.h> |
@@ -74,53 +46,55 @@ | |||
74 | #define DRV_NAME "sata_nv" | 46 | #define DRV_NAME "sata_nv" |
75 | #define DRV_VERSION "0.8" | 47 | #define DRV_VERSION "0.8" |
76 | 48 | ||
77 | #define NV_PORTS 2 | 49 | enum { |
78 | #define NV_PIO_MASK 0x1f | 50 | NV_PORTS = 2, |
79 | #define NV_MWDMA_MASK 0x07 | 51 | NV_PIO_MASK = 0x1f, |
80 | #define NV_UDMA_MASK 0x7f | 52 | NV_MWDMA_MASK = 0x07, |
81 | #define NV_PORT0_SCR_REG_OFFSET 0x00 | 53 | NV_UDMA_MASK = 0x7f, |
82 | #define NV_PORT1_SCR_REG_OFFSET 0x40 | 54 | NV_PORT0_SCR_REG_OFFSET = 0x00, |
83 | 55 | NV_PORT1_SCR_REG_OFFSET = 0x40, | |
84 | #define NV_INT_STATUS 0x10 | 56 | |
85 | #define NV_INT_STATUS_CK804 0x440 | 57 | NV_INT_STATUS = 0x10, |
86 | #define NV_INT_STATUS_PDEV_INT 0x01 | 58 | NV_INT_STATUS_CK804 = 0x440, |
87 | #define NV_INT_STATUS_PDEV_PM 0x02 | 59 | NV_INT_STATUS_PDEV_INT = 0x01, |
88 | #define NV_INT_STATUS_PDEV_ADDED 0x04 | 60 | NV_INT_STATUS_PDEV_PM = 0x02, |
89 | #define NV_INT_STATUS_PDEV_REMOVED 0x08 | 61 | NV_INT_STATUS_PDEV_ADDED = 0x04, |
90 | #define NV_INT_STATUS_SDEV_INT 0x10 | 62 | NV_INT_STATUS_PDEV_REMOVED = 0x08, |
91 | #define NV_INT_STATUS_SDEV_PM 0x20 | 63 | NV_INT_STATUS_SDEV_INT = 0x10, |
92 | #define NV_INT_STATUS_SDEV_ADDED 0x40 | 64 | NV_INT_STATUS_SDEV_PM = 0x20, |
93 | #define NV_INT_STATUS_SDEV_REMOVED 0x80 | 65 | NV_INT_STATUS_SDEV_ADDED = 0x40, |
94 | #define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \ | 66 | NV_INT_STATUS_SDEV_REMOVED = 0x80, |
95 | NV_INT_STATUS_PDEV_REMOVED) | 67 | NV_INT_STATUS_PDEV_HOTPLUG = (NV_INT_STATUS_PDEV_ADDED | |
96 | #define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \ | 68 | NV_INT_STATUS_PDEV_REMOVED), |
97 | NV_INT_STATUS_SDEV_REMOVED) | 69 | NV_INT_STATUS_SDEV_HOTPLUG = (NV_INT_STATUS_SDEV_ADDED | |
98 | #define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \ | 70 | NV_INT_STATUS_SDEV_REMOVED), |
99 | NV_INT_STATUS_SDEV_HOTPLUG) | 71 | NV_INT_STATUS_HOTPLUG = (NV_INT_STATUS_PDEV_HOTPLUG | |
100 | 72 | NV_INT_STATUS_SDEV_HOTPLUG), | |
101 | #define NV_INT_ENABLE 0x11 | 73 | |
102 | #define NV_INT_ENABLE_CK804 0x441 | 74 | NV_INT_ENABLE = 0x11, |
103 | #define NV_INT_ENABLE_PDEV_MASK 0x01 | 75 | NV_INT_ENABLE_CK804 = 0x441, |
104 | #define NV_INT_ENABLE_PDEV_PM 0x02 | 76 | NV_INT_ENABLE_PDEV_MASK = 0x01, |
105 | #define NV_INT_ENABLE_PDEV_ADDED 0x04 | 77 | NV_INT_ENABLE_PDEV_PM = 0x02, |
106 | #define NV_INT_ENABLE_PDEV_REMOVED 0x08 | 78 | NV_INT_ENABLE_PDEV_ADDED = 0x04, |
107 | #define NV_INT_ENABLE_SDEV_MASK 0x10 | 79 | NV_INT_ENABLE_PDEV_REMOVED = 0x08, |
108 | #define NV_INT_ENABLE_SDEV_PM 0x20 | 80 | NV_INT_ENABLE_SDEV_MASK = 0x10, |
109 | #define NV_INT_ENABLE_SDEV_ADDED 0x40 | 81 | NV_INT_ENABLE_SDEV_PM = 0x20, |
110 | #define NV_INT_ENABLE_SDEV_REMOVED 0x80 | 82 | NV_INT_ENABLE_SDEV_ADDED = 0x40, |
111 | #define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \ | 83 | NV_INT_ENABLE_SDEV_REMOVED = 0x80, |
112 | NV_INT_ENABLE_PDEV_REMOVED) | 84 | NV_INT_ENABLE_PDEV_HOTPLUG = (NV_INT_ENABLE_PDEV_ADDED | |
113 | #define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \ | 85 | NV_INT_ENABLE_PDEV_REMOVED), |
114 | NV_INT_ENABLE_SDEV_REMOVED) | 86 | NV_INT_ENABLE_SDEV_HOTPLUG = (NV_INT_ENABLE_SDEV_ADDED | |
115 | #define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \ | 87 | NV_INT_ENABLE_SDEV_REMOVED), |
116 | NV_INT_ENABLE_SDEV_HOTPLUG) | 88 | NV_INT_ENABLE_HOTPLUG = (NV_INT_ENABLE_PDEV_HOTPLUG | |
117 | 89 | NV_INT_ENABLE_SDEV_HOTPLUG), | |
118 | #define NV_INT_CONFIG 0x12 | 90 | |
119 | #define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI | 91 | NV_INT_CONFIG = 0x12, |
120 | 92 | NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI | |
121 | // For PCI config register 20 | 93 | |
122 | #define NV_MCP_SATA_CFG_20 0x50 | 94 | // For PCI config register 20 |
123 | #define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04 | 95 | NV_MCP_SATA_CFG_20 = 0x50, |
96 | NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, | ||
97 | }; | ||
124 | 98 | ||
125 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 99 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
126 | static irqreturn_t nv_interrupt (int irq, void *dev_instance, | 100 | static irqreturn_t nv_interrupt (int irq, void *dev_instance, |
@@ -175,8 +149,6 @@ static const struct pci_device_id nv_pci_tbl[] = { | |||
175 | { 0, } /* terminate list */ | 149 | { 0, } /* terminate list */ |
176 | }; | 150 | }; |
177 | 151 | ||
178 | #define NV_HOST_FLAGS_SCR_MMIO 0x00000001 | ||
179 | |||
180 | struct nv_host_desc | 152 | struct nv_host_desc |
181 | { | 153 | { |
182 | enum nv_host_type host_type; | 154 | enum nv_host_type host_type; |
@@ -332,36 +304,23 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, | |||
332 | 304 | ||
333 | static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) | 305 | static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) |
334 | { | 306 | { |
335 | struct ata_host_set *host_set = ap->host_set; | ||
336 | struct nv_host *host = host_set->private_data; | ||
337 | |||
338 | if (sc_reg > SCR_CONTROL) | 307 | if (sc_reg > SCR_CONTROL) |
339 | return 0xffffffffU; | 308 | return 0xffffffffU; |
340 | 309 | ||
341 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) | 310 | return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); |
342 | return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); | ||
343 | else | ||
344 | return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); | ||
345 | } | 311 | } |
346 | 312 | ||
347 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 313 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) |
348 | { | 314 | { |
349 | struct ata_host_set *host_set = ap->host_set; | ||
350 | struct nv_host *host = host_set->private_data; | ||
351 | |||
352 | if (sc_reg > SCR_CONTROL) | 315 | if (sc_reg > SCR_CONTROL) |
353 | return; | 316 | return; |
354 | 317 | ||
355 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) | 318 | iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); |
356 | writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); | ||
357 | else | ||
358 | outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); | ||
359 | } | 319 | } |
360 | 320 | ||
361 | static void nv_host_stop (struct ata_host_set *host_set) | 321 | static void nv_host_stop (struct ata_host_set *host_set) |
362 | { | 322 | { |
363 | struct nv_host *host = host_set->private_data; | 323 | struct nv_host *host = host_set->private_data; |
364 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
365 | 324 | ||
366 | // Disable hotplug event interrupts. | 325 | // Disable hotplug event interrupts. |
367 | if (host->host_desc->disable_hotplug) | 326 | if (host->host_desc->disable_hotplug) |
@@ -369,8 +328,7 @@ static void nv_host_stop (struct ata_host_set *host_set) | |||
369 | 328 | ||
370 | kfree(host); | 329 | kfree(host); |
371 | 330 | ||
372 | if (host_set->mmio_base) | 331 | ata_pci_host_stop(host_set); |
373 | pci_iounmap(pdev, host_set->mmio_base); | ||
374 | } | 332 | } |
375 | 333 | ||
376 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 334 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -382,6 +340,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
382 | int pci_dev_busy = 0; | 340 | int pci_dev_busy = 0; |
383 | int rc; | 341 | int rc; |
384 | u32 bar; | 342 | u32 bar; |
343 | unsigned long base; | ||
385 | 344 | ||
386 | // Make sure this is a SATA controller by counting the number of bars | 345 | // Make sure this is a SATA controller by counting the number of bars |
387 | // (NVIDIA SATA controllers will always have six bars). Otherwise, | 346 | // (NVIDIA SATA controllers will always have six bars). Otherwise, |
@@ -426,31 +385,16 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
426 | 385 | ||
427 | probe_ent->private_data = host; | 386 | probe_ent->private_data = host; |
428 | 387 | ||
429 | if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM) | 388 | probe_ent->mmio_base = pci_iomap(pdev, 5, 0); |
430 | host->host_flags |= NV_HOST_FLAGS_SCR_MMIO; | 389 | if (!probe_ent->mmio_base) { |
431 | 390 | rc = -EIO; | |
432 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { | 391 | goto err_out_free_host; |
433 | unsigned long base; | 392 | } |
434 | |||
435 | probe_ent->mmio_base = pci_iomap(pdev, 5, 0); | ||
436 | if (probe_ent->mmio_base == NULL) { | ||
437 | rc = -EIO; | ||
438 | goto err_out_free_host; | ||
439 | } | ||
440 | |||
441 | base = (unsigned long)probe_ent->mmio_base; | ||
442 | 393 | ||
443 | probe_ent->port[0].scr_addr = | 394 | base = (unsigned long)probe_ent->mmio_base; |
444 | base + NV_PORT0_SCR_REG_OFFSET; | ||
445 | probe_ent->port[1].scr_addr = | ||
446 | base + NV_PORT1_SCR_REG_OFFSET; | ||
447 | } else { | ||
448 | 395 | ||
449 | probe_ent->port[0].scr_addr = | 396 | probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; |
450 | pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET; | 397 | probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; |
451 | probe_ent->port[1].scr_addr = | ||
452 | pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET; | ||
453 | } | ||
454 | 398 | ||
455 | pci_set_master(pdev); | 399 | pci_set_master(pdev); |
456 | 400 | ||
@@ -467,8 +411,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
467 | return 0; | 411 | return 0; |
468 | 412 | ||
469 | err_out_iounmap: | 413 | err_out_iounmap: |
470 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) | 414 | pci_iounmap(pdev, probe_ent->mmio_base); |
471 | pci_iounmap(pdev, probe_ent->mmio_base); | ||
472 | err_out_free_host: | 415 | err_out_free_host: |
473 | kfree(host); | 416 | kfree(host); |
474 | err_out_free_ent: | 417 | err_out_free_ent: |
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index 3e75d6733239..18c296c56899 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c | |||
@@ -371,7 +371,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) | |||
371 | if (quirks & SIL_QUIRK_UDMA5MAX) { | 371 | if (quirks & SIL_QUIRK_UDMA5MAX) { |
372 | printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", | 372 | printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", |
373 | ap->id, dev->devno, model_num); | 373 | ap->id, dev->devno, model_num); |
374 | ap->udma_mask &= ATA_UDMA5; | 374 | dev->udma_mask &= ATA_UDMA5; |
375 | return; | 375 | return; |
376 | } | 376 | } |
377 | } | 377 | } |
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 5d01e5ce5ac5..068c98a4111b 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c | |||
@@ -342,7 +342,7 @@ static struct ata_port_info sil24_port_info[] = { | |||
342 | .udma_mask = 0x3f, /* udma0-5 */ | 342 | .udma_mask = 0x3f, /* udma0-5 */ |
343 | .port_ops = &sil24_ops, | 343 | .port_ops = &sil24_ops, |
344 | }, | 344 | }, |
345 | /* sil_3132 */ | 345 | /* sil_3132 */ |
346 | { | 346 | { |
347 | .sht = &sil24_sht, | 347 | .sht = &sil24_sht, |
348 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 348 | .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
@@ -842,9 +842,10 @@ static void sil24_port_stop(struct ata_port *ap) | |||
842 | static void sil24_host_stop(struct ata_host_set *host_set) | 842 | static void sil24_host_stop(struct ata_host_set *host_set) |
843 | { | 843 | { |
844 | struct sil24_host_priv *hpriv = host_set->private_data; | 844 | struct sil24_host_priv *hpriv = host_set->private_data; |
845 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
845 | 846 | ||
846 | iounmap(hpriv->host_base); | 847 | pci_iounmap(pdev, hpriv->host_base); |
847 | iounmap(hpriv->port_base); | 848 | pci_iounmap(pdev, hpriv->port_base); |
848 | kfree(hpriv); | 849 | kfree(hpriv); |
849 | } | 850 | } |
850 | 851 | ||
@@ -871,26 +872,23 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
871 | goto out_disable; | 872 | goto out_disable; |
872 | 873 | ||
873 | rc = -ENOMEM; | 874 | rc = -ENOMEM; |
874 | /* ioremap mmio registers */ | 875 | /* map mmio registers */ |
875 | host_base = ioremap(pci_resource_start(pdev, 0), | 876 | host_base = pci_iomap(pdev, 0, 0); |
876 | pci_resource_len(pdev, 0)); | ||
877 | if (!host_base) | 877 | if (!host_base) |
878 | goto out_free; | 878 | goto out_free; |
879 | port_base = ioremap(pci_resource_start(pdev, 2), | 879 | port_base = pci_iomap(pdev, 2, 0); |
880 | pci_resource_len(pdev, 2)); | ||
881 | if (!port_base) | 880 | if (!port_base) |
882 | goto out_free; | 881 | goto out_free; |
883 | 882 | ||
884 | /* allocate & init probe_ent and hpriv */ | 883 | /* allocate & init probe_ent and hpriv */ |
885 | probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); | 884 | probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); |
886 | if (!probe_ent) | 885 | if (!probe_ent) |
887 | goto out_free; | 886 | goto out_free; |
888 | 887 | ||
889 | hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); | 888 | hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); |
890 | if (!hpriv) | 889 | if (!hpriv) |
891 | goto out_free; | 890 | goto out_free; |
892 | 891 | ||
893 | memset(probe_ent, 0, sizeof(*probe_ent)); | ||
894 | probe_ent->dev = pci_dev_to_dev(pdev); | 892 | probe_ent->dev = pci_dev_to_dev(pdev); |
895 | INIT_LIST_HEAD(&probe_ent->node); | 893 | INIT_LIST_HEAD(&probe_ent->node); |
896 | 894 | ||
@@ -907,7 +905,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
907 | probe_ent->mmio_base = port_base; | 905 | probe_ent->mmio_base = port_base; |
908 | probe_ent->private_data = hpriv; | 906 | probe_ent->private_data = hpriv; |
909 | 907 | ||
910 | memset(hpriv, 0, sizeof(*hpriv)); | ||
911 | hpriv->host_base = host_base; | 908 | hpriv->host_base = host_base; |
912 | hpriv->port_base = port_base; | 909 | hpriv->port_base = port_base; |
913 | 910 | ||
@@ -1011,9 +1008,9 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1011 | 1008 | ||
1012 | out_free: | 1009 | out_free: |
1013 | if (host_base) | 1010 | if (host_base) |
1014 | iounmap(host_base); | 1011 | pci_iounmap(pdev, host_base); |
1015 | if (port_base) | 1012 | if (port_base) |
1016 | iounmap(port_base); | 1013 | pci_iounmap(pdev, port_base); |
1017 | kfree(probe_ent); | 1014 | kfree(probe_ent); |
1018 | kfree(hpriv); | 1015 | kfree(hpriv); |
1019 | pci_release_regions(pdev); | 1016 | pci_release_regions(pdev); |
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c index 8f5025733def..7ac5a5f5a905 100644 --- a/drivers/scsi/sata_uli.c +++ b/drivers/scsi/sata_uli.c | |||
@@ -44,6 +44,8 @@ enum { | |||
44 | uli_5287 = 1, | 44 | uli_5287 = 1, |
45 | uli_5281 = 2, | 45 | uli_5281 = 2, |
46 | 46 | ||
47 | uli_max_ports = 4, | ||
48 | |||
47 | /* PCI configuration registers */ | 49 | /* PCI configuration registers */ |
48 | ULI5287_BASE = 0x90, /* sata0 phy SCR registers */ | 50 | ULI5287_BASE = 0x90, /* sata0 phy SCR registers */ |
49 | ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */ | 51 | ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */ |
@@ -51,6 +53,10 @@ enum { | |||
51 | ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */ | 53 | ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */ |
52 | }; | 54 | }; |
53 | 55 | ||
56 | struct uli_priv { | ||
57 | unsigned int scr_cfg_addr[uli_max_ports]; | ||
58 | }; | ||
59 | |||
54 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 60 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
55 | static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); | 61 | static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); |
56 | static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 62 | static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
@@ -137,7 +143,8 @@ MODULE_VERSION(DRV_VERSION); | |||
137 | 143 | ||
138 | static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) | 144 | static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) |
139 | { | 145 | { |
140 | return ap->ioaddr.scr_addr + (4 * sc_reg); | 146 | struct uli_priv *hpriv = ap->host_set->private_data; |
147 | return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); | ||
141 | } | 148 | } |
142 | 149 | ||
143 | static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) | 150 | static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) |
@@ -182,6 +189,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
182 | int rc; | 189 | int rc; |
183 | unsigned int board_idx = (unsigned int) ent->driver_data; | 190 | unsigned int board_idx = (unsigned int) ent->driver_data; |
184 | int pci_dev_busy = 0; | 191 | int pci_dev_busy = 0; |
192 | struct uli_priv *hpriv; | ||
185 | 193 | ||
186 | if (!printed_version++) | 194 | if (!printed_version++) |
187 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | 195 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -210,10 +218,18 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
210 | goto err_out_regions; | 218 | goto err_out_regions; |
211 | } | 219 | } |
212 | 220 | ||
221 | hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); | ||
222 | if (!hpriv) { | ||
223 | rc = -ENOMEM; | ||
224 | goto err_out_probe_ent; | ||
225 | } | ||
226 | |||
227 | probe_ent->private_data = hpriv; | ||
228 | |||
213 | switch (board_idx) { | 229 | switch (board_idx) { |
214 | case uli_5287: | 230 | case uli_5287: |
215 | probe_ent->port[0].scr_addr = ULI5287_BASE; | 231 | hpriv->scr_cfg_addr[0] = ULI5287_BASE; |
216 | probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; | 232 | hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; |
217 | probe_ent->n_ports = 4; | 233 | probe_ent->n_ports = 4; |
218 | 234 | ||
219 | probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; | 235 | probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; |
@@ -221,27 +237,27 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
221 | probe_ent->port[2].ctl_addr = | 237 | probe_ent->port[2].ctl_addr = |
222 | (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; | 238 | (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; |
223 | probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; | 239 | probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; |
224 | probe_ent->port[2].scr_addr = ULI5287_BASE + ULI5287_OFFS*4; | 240 | hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; |
225 | 241 | ||
226 | probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; | 242 | probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; |
227 | probe_ent->port[3].altstatus_addr = | 243 | probe_ent->port[3].altstatus_addr = |
228 | probe_ent->port[3].ctl_addr = | 244 | probe_ent->port[3].ctl_addr = |
229 | (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; | 245 | (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; |
230 | probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; | 246 | probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; |
231 | probe_ent->port[3].scr_addr = ULI5287_BASE + ULI5287_OFFS*5; | 247 | hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; |
232 | 248 | ||
233 | ata_std_ports(&probe_ent->port[2]); | 249 | ata_std_ports(&probe_ent->port[2]); |
234 | ata_std_ports(&probe_ent->port[3]); | 250 | ata_std_ports(&probe_ent->port[3]); |
235 | break; | 251 | break; |
236 | 252 | ||
237 | case uli_5289: | 253 | case uli_5289: |
238 | probe_ent->port[0].scr_addr = ULI5287_BASE; | 254 | hpriv->scr_cfg_addr[0] = ULI5287_BASE; |
239 | probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; | 255 | hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; |
240 | break; | 256 | break; |
241 | 257 | ||
242 | case uli_5281: | 258 | case uli_5281: |
243 | probe_ent->port[0].scr_addr = ULI5281_BASE; | 259 | hpriv->scr_cfg_addr[0] = ULI5281_BASE; |
244 | probe_ent->port[1].scr_addr = ULI5281_BASE + ULI5281_OFFS; | 260 | hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS; |
245 | break; | 261 | break; |
246 | 262 | ||
247 | default: | 263 | default: |
@@ -258,9 +274,10 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
258 | 274 | ||
259 | return 0; | 275 | return 0; |
260 | 276 | ||
277 | err_out_probe_ent: | ||
278 | kfree(probe_ent); | ||
261 | err_out_regions: | 279 | err_out_regions: |
262 | pci_release_regions(pdev); | 280 | pci_release_regions(pdev); |
263 | |||
264 | err_out: | 281 | err_out: |
265 | if (!pci_dev_busy) | 282 | if (!pci_dev_busy) |
266 | pci_disable_device(pdev); | 283 | pci_disable_device(pdev); |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 9701a806539d..836bbbb26ff2 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -230,11 +230,11 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, | |||
230 | handled += ata_host_intr(ap, qc); | 230 | handled += ata_host_intr(ap, qc); |
231 | } else if (is_vsc_sata_int_err(i, int_status)) { | 231 | } else if (is_vsc_sata_int_err(i, int_status)) { |
232 | /* | 232 | /* |
233 | * On some chips (i.e. Intel 31244), an error | 233 | * On some chips (i.e. Intel 31244), an error |
234 | * interrupt will sneak in at initialization | 234 | * interrupt will sneak in at initialization |
235 | * time (phy state changes). Clearing the SCR | 235 | * time (phy state changes). Clearing the SCR |
236 | * error register is not required, but it prevents | 236 | * error register is not required, but it prevents |
237 | * the phy state change interrupts from recurring | 237 | * the phy state change interrupts from recurring |
238 | * later. | 238 | * later. |
239 | */ | 239 | */ |
240 | u32 err_status; | 240 | u32 err_status; |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 89055494dfee..a6fde52946d6 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -286,7 +286,7 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state) | |||
286 | return err; | 286 | return err; |
287 | 287 | ||
288 | if (sht->suspend) | 288 | if (sht->suspend) |
289 | err = sht->suspend(sdev); | 289 | err = sht->suspend(sdev, state); |
290 | 290 | ||
291 | return err; | 291 | return err; |
292 | } | 292 | } |