aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-02-10 14:45:43 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-02-10 14:45:43 -0500
commit81b7bbd1932a04869d4c8635a75222dfc6089f96 (patch)
tree285ae868a1e3a41fb0dbfe346c28e380949bcb55 /drivers/ata/sata_mv.c
parent98051995ab44b993f992946055edc6115351f725 (diff)
parent66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff)
Merge branch 'linus'
Conflicts: drivers/scsi/ipr.c Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c200
1 files changed, 59 insertions, 141 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index aae0b5201c1e..769eca52442c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -34,7 +34,6 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h> 36#include <linux/libata.h>
37#include <asm/io.h>
38 37
39#define DRV_NAME "sata_mv" 38#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7" 39#define DRV_VERSION "0.7"
@@ -342,7 +341,6 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 341static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap); 342static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 343static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap); 344static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap); 345static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc); 346static void mv_qc_prep(struct ata_queued_cmd *qc);
@@ -406,19 +404,20 @@ static const struct ata_port_operations mv5_ops = {
406 404
407 .qc_prep = mv_qc_prep, 405 .qc_prep = mv_qc_prep,
408 .qc_issue = mv_qc_issue, 406 .qc_issue = mv_qc_issue,
409 .data_xfer = ata_mmio_data_xfer, 407 .data_xfer = ata_data_xfer,
410 408
411 .eng_timeout = mv_eng_timeout, 409 .eng_timeout = mv_eng_timeout,
412 410
413 .irq_handler = mv_interrupt, 411 .irq_handler = mv_interrupt,
414 .irq_clear = mv_irq_clear, 412 .irq_clear = mv_irq_clear,
413 .irq_on = ata_irq_on,
414 .irq_ack = ata_irq_ack,
415 415
416 .scr_read = mv5_scr_read, 416 .scr_read = mv5_scr_read,
417 .scr_write = mv5_scr_write, 417 .scr_write = mv5_scr_write,
418 418
419 .port_start = mv_port_start, 419 .port_start = mv_port_start,
420 .port_stop = mv_port_stop, 420 .port_stop = mv_port_stop,
421 .host_stop = mv_host_stop,
422}; 421};
423 422
424static const struct ata_port_operations mv6_ops = { 423static const struct ata_port_operations mv6_ops = {
@@ -434,19 +433,20 @@ static const struct ata_port_operations mv6_ops = {
434 433
435 .qc_prep = mv_qc_prep, 434 .qc_prep = mv_qc_prep,
436 .qc_issue = mv_qc_issue, 435 .qc_issue = mv_qc_issue,
437 .data_xfer = ata_mmio_data_xfer, 436 .data_xfer = ata_data_xfer,
438 437
439 .eng_timeout = mv_eng_timeout, 438 .eng_timeout = mv_eng_timeout,
440 439
441 .irq_handler = mv_interrupt, 440 .irq_handler = mv_interrupt,
442 .irq_clear = mv_irq_clear, 441 .irq_clear = mv_irq_clear,
442 .irq_on = ata_irq_on,
443 .irq_ack = ata_irq_ack,
443 444
444 .scr_read = mv_scr_read, 445 .scr_read = mv_scr_read,
445 .scr_write = mv_scr_write, 446 .scr_write = mv_scr_write,
446 447
447 .port_start = mv_port_start, 448 .port_start = mv_port_start,
448 .port_stop = mv_port_stop, 449 .port_stop = mv_port_stop,
449 .host_stop = mv_host_stop,
450}; 450};
451 451
452static const struct ata_port_operations mv_iie_ops = { 452static const struct ata_port_operations mv_iie_ops = {
@@ -462,19 +462,20 @@ static const struct ata_port_operations mv_iie_ops = {
462 462
463 .qc_prep = mv_qc_prep_iie, 463 .qc_prep = mv_qc_prep_iie,
464 .qc_issue = mv_qc_issue, 464 .qc_issue = mv_qc_issue,
465 .data_xfer = ata_mmio_data_xfer, 465 .data_xfer = ata_data_xfer,
466 466
467 .eng_timeout = mv_eng_timeout, 467 .eng_timeout = mv_eng_timeout,
468 468
469 .irq_handler = mv_interrupt, 469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear, 470 .irq_clear = mv_irq_clear,
471 .irq_on = ata_irq_on,
472 .irq_ack = ata_irq_ack,
471 473
472 .scr_read = mv_scr_read, 474 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write, 475 .scr_write = mv_scr_write,
474 476
475 .port_start = mv_port_start, 477 .port_start = mv_port_start,
476 .port_stop = mv_port_stop, 478 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478}; 479};
479 480
480static const struct ata_port_info mv_port_info[] = { 481static const struct ata_port_info mv_port_info[] = {
@@ -620,7 +621,7 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
620 621
621static inline void __iomem *mv_ap_base(struct ata_port *ap) 622static inline void __iomem *mv_ap_base(struct ata_port *ap)
622{ 623{
623 return mv_port_base(ap->host->mmio_base, ap->port_no); 624 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
624} 625}
625 626
626static inline int mv_get_hc_count(unsigned long port_flags) 627static inline int mv_get_hc_count(unsigned long port_flags)
@@ -809,35 +810,6 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
809 } 810 }
810} 811}
811 812
812/**
813 * mv_host_stop - Host specific cleanup/stop routine.
814 * @host: host data structure
815 *
816 * Disable ints, cleanup host memory, call general purpose
817 * host_stop.
818 *
819 * LOCKING:
820 * Inherited from caller.
821 */
822static void mv_host_stop(struct ata_host *host)
823{
824 struct mv_host_priv *hpriv = host->private_data;
825 struct pci_dev *pdev = to_pci_dev(host->dev);
826
827 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
828 pci_disable_msi(pdev);
829 } else {
830 pci_intx(pdev, 0);
831 }
832 kfree(hpriv);
833 ata_host_stop(host);
834}
835
836static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
837{
838 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
839}
840
841static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) 813static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
842{ 814{
843 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 815 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
@@ -883,22 +855,21 @@ static int mv_port_start(struct ata_port *ap)
883 void __iomem *port_mmio = mv_ap_base(ap); 855 void __iomem *port_mmio = mv_ap_base(ap);
884 void *mem; 856 void *mem;
885 dma_addr_t mem_dma; 857 dma_addr_t mem_dma;
886 int rc = -ENOMEM; 858 int rc;
887 859
888 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 860 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
889 if (!pp) 861 if (!pp)
890 goto err_out; 862 return -ENOMEM;
891 memset(pp, 0, sizeof(*pp));
892 863
893 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 864 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
894 GFP_KERNEL); 865 GFP_KERNEL);
895 if (!mem) 866 if (!mem)
896 goto err_out_pp; 867 return -ENOMEM;
897 memset(mem, 0, MV_PORT_PRIV_DMA_SZ); 868 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
898 869
899 rc = ata_pad_alloc(ap, dev); 870 rc = ata_pad_alloc(ap, dev);
900 if (rc) 871 if (rc)
901 goto err_out_priv; 872 return rc;
902 873
903 /* First item in chunk of DMA memory: 874 /* First item in chunk of DMA memory:
904 * 32-slot command request table (CRQB), 32 bytes each in size 875 * 32-slot command request table (CRQB), 32 bytes each in size
@@ -951,13 +922,6 @@ static int mv_port_start(struct ata_port *ap)
951 */ 922 */
952 ap->private_data = pp; 923 ap->private_data = pp;
953 return 0; 924 return 0;
954
955err_out_priv:
956 mv_priv_free(pp, dev);
957err_out_pp:
958 kfree(pp);
959err_out:
960 return rc;
961} 925}
962 926
963/** 927/**
@@ -971,18 +935,11 @@ err_out:
971 */ 935 */
972static void mv_port_stop(struct ata_port *ap) 936static void mv_port_stop(struct ata_port *ap)
973{ 937{
974 struct device *dev = ap->host->dev;
975 struct mv_port_priv *pp = ap->private_data;
976 unsigned long flags; 938 unsigned long flags;
977 939
978 spin_lock_irqsave(&ap->host->lock, flags); 940 spin_lock_irqsave(&ap->host->lock, flags);
979 mv_stop_dma(ap); 941 mv_stop_dma(ap);
980 spin_unlock_irqrestore(&ap->host->lock, flags); 942 spin_unlock_irqrestore(&ap->host->lock, flags);
981
982 ap->private_data = NULL;
983 ata_pad_free(ap, dev);
984 mv_priv_free(pp, dev);
985 kfree(pp);
986} 943}
987 944
988/** 945/**
@@ -1348,7 +1305,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1348 */ 1305 */
1349static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1306static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1350{ 1307{
1351 void __iomem *mmio = host->mmio_base; 1308 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1352 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1309 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1353 struct ata_queued_cmd *qc; 1310 struct ata_queued_cmd *qc;
1354 u32 hc_irq_cause; 1311 u32 hc_irq_cause;
@@ -1391,8 +1348,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1391 } else { 1348 } else {
1392 /* PIO: check for device (drive) interrupt */ 1349 /* PIO: check for device (drive) interrupt */
1393 if ((DEV_IRQ << hard_port) & hc_irq_cause) { 1350 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1394 ata_status = readb((void __iomem *) 1351 ata_status = readb(ap->ioaddr.status_addr);
1395 ap->ioaddr.status_addr);
1396 handled = 1; 1352 handled = 1;
1397 /* ignore spurious intr if drive still BUSY */ 1353 /* ignore spurious intr if drive still BUSY */
1398 if (ata_status & ATA_BUSY) { 1354 if (ata_status & ATA_BUSY) {
@@ -1452,7 +1408,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1452{ 1408{
1453 struct ata_host *host = dev_instance; 1409 struct ata_host *host = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs; 1410 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host->mmio_base; 1411 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1456 struct mv_host_priv *hpriv; 1412 struct mv_host_priv *hpriv;
1457 u32 irq_stat; 1413 u32 irq_stat;
1458 1414
@@ -1528,22 +1484,24 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1528 1484
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1485static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{ 1486{
1531 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1487 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1488 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1489 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533 1490
1534 if (ofs != 0xffffffffU) 1491 if (ofs != 0xffffffffU)
1535 return readl(mmio + ofs); 1492 return readl(addr + ofs);
1536 else 1493 else
1537 return (u32) ofs; 1494 return (u32) ofs;
1538} 1495}
1539 1496
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1497static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{ 1498{
1542 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1499 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1500 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1501 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544 1502
1545 if (ofs != 0xffffffffU) 1503 if (ofs != 0xffffffffU)
1546 writelfl(val, mmio + ofs); 1504 writelfl(val, addr + ofs);
1547} 1505}
1548 1506
1549static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1507static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
@@ -1905,7 +1863,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1905static void mv_stop_and_reset(struct ata_port *ap) 1863static void mv_stop_and_reset(struct ata_port *ap)
1906{ 1864{
1907 struct mv_host_priv *hpriv = ap->host->private_data; 1865 struct mv_host_priv *hpriv = ap->host->private_data;
1908 void __iomem *mmio = ap->host->mmio_base; 1866 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1909 1867
1910 mv_stop_dma(ap); 1868 mv_stop_dma(ap);
1911 1869
@@ -2003,10 +1961,10 @@ comreset_retry:
2003 break; 1961 break;
2004 } 1962 }
2005 1963
2006 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1964 tf.lbah = readb(ap->ioaddr.lbah_addr);
2007 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1965 tf.lbam = readb(ap->ioaddr.lbam_addr);
2008 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); 1966 tf.lbal = readb(ap->ioaddr.lbal_addr);
2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 1967 tf.nsect = readb(ap->ioaddr.nsect_addr);
2010 1968
2011 dev->class = ata_dev_classify(&tf); 1969 dev->class = ata_dev_classify(&tf);
2012 if (!ata_dev_enabled(dev)) { 1970 if (!ata_dev_enabled(dev)) {
@@ -2038,17 +1996,17 @@ static void mv_phy_reset(struct ata_port *ap)
2038 */ 1996 */
2039static void mv_eng_timeout(struct ata_port *ap) 1997static void mv_eng_timeout(struct ata_port *ap)
2040{ 1998{
1999 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2041 struct ata_queued_cmd *qc; 2000 struct ata_queued_cmd *qc;
2042 unsigned long flags; 2001 unsigned long flags;
2043 2002
2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2003 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n"); 2004 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap->host->mmio_base, ap->port_no, 2005 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2047 to_pci_dev(ap->host->dev));
2048 2006
2049 qc = ata_qc_from_tag(ap, ap->active_tag); 2007 qc = ata_qc_from_tag(ap, ap->active_tag);
2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2008 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); 2009 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2052 2010
2053 spin_lock_irqsave(&ap->host->lock, flags); 2011 spin_lock_irqsave(&ap->host->lock, flags);
2054 mv_err_intr(ap, 0); 2012 mv_err_intr(ap, 0);
@@ -2076,7 +2034,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2076 */ 2034 */
2077static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2035static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2078{ 2036{
2079 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; 2037 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2080 unsigned serr_ofs; 2038 unsigned serr_ofs;
2081 2039
2082 /* PIO related setup 2040 /* PIO related setup
@@ -2224,7 +2182,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2224 unsigned int board_idx) 2182 unsigned int board_idx)
2225{ 2183{
2226 int rc = 0, n_hc, port, hc; 2184 int rc = 0, n_hc, port, hc;
2227 void __iomem *mmio = probe_ent->mmio_base; 2185 void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
2228 struct mv_host_priv *hpriv = probe_ent->private_data; 2186 struct mv_host_priv *hpriv = probe_ent->private_data;
2229 2187
2230 /* global interrupt mask */ 2188 /* global interrupt mask */
@@ -2342,49 +2300,36 @@ static void mv_print_info(struct ata_probe_ent *probe_ent)
2342static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2300static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2343{ 2301{
2344 static int printed_version = 0; 2302 static int printed_version = 0;
2345 struct ata_probe_ent *probe_ent = NULL; 2303 struct device *dev = &pdev->dev;
2304 struct ata_probe_ent *probe_ent;
2346 struct mv_host_priv *hpriv; 2305 struct mv_host_priv *hpriv;
2347 unsigned int board_idx = (unsigned int)ent->driver_data; 2306 unsigned int board_idx = (unsigned int)ent->driver_data;
2348 void __iomem *mmio_base; 2307 int rc;
2349 int pci_dev_busy = 0, rc;
2350 2308
2351 if (!printed_version++) 2309 if (!printed_version++)
2352 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2310 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2353 2311
2354 rc = pci_enable_device(pdev); 2312 rc = pcim_enable_device(pdev);
2355 if (rc) { 2313 if (rc)
2356 return rc; 2314 return rc;
2357 }
2358 pci_set_master(pdev); 2315 pci_set_master(pdev);
2359 2316
2360 rc = pci_request_regions(pdev, DRV_NAME); 2317 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2361 if (rc) { 2318 if (rc == -EBUSY)
2362 pci_dev_busy = 1; 2319 pcim_pin_device(pdev);
2363 goto err_out; 2320 if (rc)
2364 } 2321 return rc;
2365 2322
2366 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 2323 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
2367 if (probe_ent == NULL) { 2324 if (probe_ent == NULL)
2368 rc = -ENOMEM; 2325 return -ENOMEM;
2369 goto err_out_regions;
2370 }
2371 2326
2372 memset(probe_ent, 0, sizeof(*probe_ent));
2373 probe_ent->dev = pci_dev_to_dev(pdev); 2327 probe_ent->dev = pci_dev_to_dev(pdev);
2374 INIT_LIST_HEAD(&probe_ent->node); 2328 INIT_LIST_HEAD(&probe_ent->node);
2375 2329
2376 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); 2330 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2377 if (mmio_base == NULL) { 2331 if (!hpriv)
2378 rc = -ENOMEM; 2332 return -ENOMEM;
2379 goto err_out_free_ent;
2380 }
2381
2382 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2383 if (!hpriv) {
2384 rc = -ENOMEM;
2385 goto err_out_iounmap;
2386 }
2387 memset(hpriv, 0, sizeof(*hpriv));
2388 2333
2389 probe_ent->sht = mv_port_info[board_idx].sht; 2334 probe_ent->sht = mv_port_info[board_idx].sht;
2390 probe_ent->port_flags = mv_port_info[board_idx].flags; 2335 probe_ent->port_flags = mv_port_info[board_idx].flags;
@@ -2394,53 +2339,26 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2394 2339
2395 probe_ent->irq = pdev->irq; 2340 probe_ent->irq = pdev->irq;
2396 probe_ent->irq_flags = IRQF_SHARED; 2341 probe_ent->irq_flags = IRQF_SHARED;
2397 probe_ent->mmio_base = mmio_base; 2342 probe_ent->iomap = pcim_iomap_table(pdev);
2398 probe_ent->private_data = hpriv; 2343 probe_ent->private_data = hpriv;
2399 2344
2400 /* initialize adapter */ 2345 /* initialize adapter */
2401 rc = mv_init_host(pdev, probe_ent, board_idx); 2346 rc = mv_init_host(pdev, probe_ent, board_idx);
2402 if (rc) { 2347 if (rc)
2403 goto err_out_hpriv; 2348 return rc;
2404 }
2405 2349
2406 /* Enable interrupts */ 2350 /* Enable interrupts */
2407 if (msi && pci_enable_msi(pdev) == 0) { 2351 if (msi && !pci_enable_msi(pdev))
2408 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2409 } else {
2410 pci_intx(pdev, 1); 2352 pci_intx(pdev, 1);
2411 }
2412 2353
2413 mv_dump_pci_cfg(pdev, 0x68); 2354 mv_dump_pci_cfg(pdev, 0x68);
2414 mv_print_info(probe_ent); 2355 mv_print_info(probe_ent);
2415 2356
2416 if (ata_device_add(probe_ent) == 0) { 2357 if (ata_device_add(probe_ent) == 0)
2417 rc = -ENODEV; /* No devices discovered */ 2358 return -ENODEV;
2418 goto err_out_dev_add;
2419 }
2420 2359
2421 kfree(probe_ent); 2360 devm_kfree(dev, probe_ent);
2422 return 0; 2361 return 0;
2423
2424err_out_dev_add:
2425 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2426 pci_disable_msi(pdev);
2427 } else {
2428 pci_intx(pdev, 0);
2429 }
2430err_out_hpriv:
2431 kfree(hpriv);
2432err_out_iounmap:
2433 pci_iounmap(pdev, mmio_base);
2434err_out_free_ent:
2435 kfree(probe_ent);
2436err_out_regions:
2437 pci_release_regions(pdev);
2438err_out:
2439 if (!pci_dev_busy) {
2440 pci_disable_device(pdev);
2441 }
2442
2443 return rc;
2444} 2362}
2445 2363
2446static int __init mv_init(void) 2364static int __init mv_init(void)