diff options
author | Tejun Heo <htejun@gmail.com> | 2007-02-01 01:06:36 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-09 17:39:38 -0500 |
commit | 0d5ff566779f894ca9937231a181eb31e4adff0e (patch) | |
tree | d1c7495c932581c1d41aa7f0fdb303348da49106 /drivers/ata/sata_nv.c | |
parent | 1a68ff13c8a9b517de3fd4187dc525412a6eba1b (diff) |
libata: convert to iomap
Convert libata core layer and LLDs to use iomap.
* managed iomap is used. Pointer to pcim_iomap_table() is cached at
host->iomap and used through out LLDs. This basically replaces
host->mmio_base.
* if possible, pcim_iomap_regions() is used
Most iomap operation conversions are taken from Jeff Garzik
<jgarzik@pobox.com>'s iomap branch.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_nv.c')
-rw-r--r-- | drivers/ata/sata_nv.c | 95 |
1 files changed, 48 insertions, 47 deletions
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 18361a38aee7..b9ef6f5f4024 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -54,6 +54,8 @@ | |||
54 | #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL | 54 | #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL |
55 | 55 | ||
56 | enum { | 56 | enum { |
57 | NV_MMIO_BAR = 5, | ||
58 | |||
57 | NV_PORTS = 2, | 59 | NV_PORTS = 2, |
58 | NV_PIO_MASK = 0x1f, | 60 | NV_PIO_MASK = 0x1f, |
59 | NV_MWDMA_MASK = 0x07, | 61 | NV_MWDMA_MASK = 0x07, |
@@ -357,7 +359,7 @@ static const struct ata_port_operations nv_generic_ops = { | |||
357 | .thaw = ata_bmdma_thaw, | 359 | .thaw = ata_bmdma_thaw, |
358 | .error_handler = nv_error_handler, | 360 | .error_handler = nv_error_handler, |
359 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 361 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
360 | .data_xfer = ata_pio_data_xfer, | 362 | .data_xfer = ata_data_xfer, |
361 | .irq_handler = nv_generic_interrupt, | 363 | .irq_handler = nv_generic_interrupt, |
362 | .irq_clear = ata_bmdma_irq_clear, | 364 | .irq_clear = ata_bmdma_irq_clear, |
363 | .scr_read = nv_scr_read, | 365 | .scr_read = nv_scr_read, |
@@ -382,7 +384,7 @@ static const struct ata_port_operations nv_nf2_ops = { | |||
382 | .thaw = nv_nf2_thaw, | 384 | .thaw = nv_nf2_thaw, |
383 | .error_handler = nv_error_handler, | 385 | .error_handler = nv_error_handler, |
384 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 386 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
385 | .data_xfer = ata_pio_data_xfer, | 387 | .data_xfer = ata_data_xfer, |
386 | .irq_handler = nv_nf2_interrupt, | 388 | .irq_handler = nv_nf2_interrupt, |
387 | .irq_clear = ata_bmdma_irq_clear, | 389 | .irq_clear = ata_bmdma_irq_clear, |
388 | .scr_read = nv_scr_read, | 390 | .scr_read = nv_scr_read, |
@@ -407,7 +409,7 @@ static const struct ata_port_operations nv_ck804_ops = { | |||
407 | .thaw = nv_ck804_thaw, | 409 | .thaw = nv_ck804_thaw, |
408 | .error_handler = nv_error_handler, | 410 | .error_handler = nv_error_handler, |
409 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | 411 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
410 | .data_xfer = ata_pio_data_xfer, | 412 | .data_xfer = ata_data_xfer, |
411 | .irq_handler = nv_ck804_interrupt, | 413 | .irq_handler = nv_ck804_interrupt, |
412 | .irq_clear = ata_bmdma_irq_clear, | 414 | .irq_clear = ata_bmdma_irq_clear, |
413 | .scr_read = nv_scr_read, | 415 | .scr_read = nv_scr_read, |
@@ -434,7 +436,7 @@ static const struct ata_port_operations nv_adma_ops = { | |||
434 | .thaw = nv_ck804_thaw, | 436 | .thaw = nv_ck804_thaw, |
435 | .error_handler = nv_adma_error_handler, | 437 | .error_handler = nv_adma_error_handler, |
436 | .post_internal_cmd = nv_adma_bmdma_stop, | 438 | .post_internal_cmd = nv_adma_bmdma_stop, |
437 | .data_xfer = ata_mmio_data_xfer, | 439 | .data_xfer = ata_data_xfer, |
438 | .irq_handler = nv_adma_interrupt, | 440 | .irq_handler = nv_adma_interrupt, |
439 | .irq_clear = nv_adma_irq_clear, | 441 | .irq_clear = nv_adma_irq_clear, |
440 | .scr_read = nv_scr_read, | 442 | .scr_read = nv_scr_read, |
@@ -736,7 +738,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
736 | 738 | ||
737 | /* if in ATA register mode, use standard ata interrupt handler */ | 739 | /* if in ATA register mode, use standard ata interrupt handler */ |
738 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | 740 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { |
739 | u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804) | 741 | u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) |
740 | >> (NV_INT_PORT_SHIFT * i); | 742 | >> (NV_INT_PORT_SHIFT * i); |
741 | if(ata_tag_valid(ap->active_tag)) | 743 | if(ata_tag_valid(ap->active_tag)) |
742 | /** NV_INT_DEV indication seems unreliable at times | 744 | /** NV_INT_DEV indication seems unreliable at times |
@@ -827,7 +829,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) | |||
827 | u16 status = readw(mmio + NV_ADMA_STAT); | 829 | u16 status = readw(mmio + NV_ADMA_STAT); |
828 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); | 830 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); |
829 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | 831 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); |
830 | unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; | 832 | void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; |
831 | 833 | ||
832 | /* clear ADMA status */ | 834 | /* clear ADMA status */ |
833 | writew(status, mmio + NV_ADMA_STAT); | 835 | writew(status, mmio + NV_ADMA_STAT); |
@@ -835,7 +837,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) | |||
835 | pp->notifier_clear_block); | 837 | pp->notifier_clear_block); |
836 | 838 | ||
837 | /** clear legacy status */ | 839 | /** clear legacy status */ |
838 | outb(inb(dma_stat_addr), dma_stat_addr); | 840 | iowrite8(ioread8(dma_stat_addr), dma_stat_addr); |
839 | } | 841 | } |
840 | 842 | ||
841 | static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) | 843 | static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) |
@@ -851,15 +853,15 @@ static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) | |||
851 | } | 853 | } |
852 | 854 | ||
853 | /* load PRD table addr. */ | 855 | /* load PRD table addr. */ |
854 | outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); | 856 | iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); |
855 | 857 | ||
856 | /* specify data direction, triple-check start bit is clear */ | 858 | /* specify data direction, triple-check start bit is clear */ |
857 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 859 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
858 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | 860 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); |
859 | if (!rw) | 861 | if (!rw) |
860 | dmactl |= ATA_DMA_WR; | 862 | dmactl |= ATA_DMA_WR; |
861 | 863 | ||
862 | outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 864 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
863 | 865 | ||
864 | /* issue r/w command */ | 866 | /* issue r/w command */ |
865 | ata_exec_command(ap, &qc->tf); | 867 | ata_exec_command(ap, &qc->tf); |
@@ -877,9 +879,9 @@ static void nv_adma_bmdma_start(struct ata_queued_cmd *qc) | |||
877 | } | 879 | } |
878 | 880 | ||
879 | /* start host DMA transaction */ | 881 | /* start host DMA transaction */ |
880 | dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 882 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
881 | outb(dmactl | ATA_DMA_START, | 883 | iowrite8(dmactl | ATA_DMA_START, |
882 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 884 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
883 | } | 885 | } |
884 | 886 | ||
885 | static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) | 887 | static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) |
@@ -891,8 +893,8 @@ static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) | |||
891 | return; | 893 | return; |
892 | 894 | ||
893 | /* clear start/stop bit */ | 895 | /* clear start/stop bit */ |
894 | outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, | 896 | iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, |
895 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | 897 | ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
896 | 898 | ||
897 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 899 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
898 | ata_altstatus(ap); /* dummy read */ | 900 | ata_altstatus(ap); /* dummy read */ |
@@ -904,7 +906,7 @@ static u8 nv_adma_bmdma_status(struct ata_port *ap) | |||
904 | 906 | ||
905 | WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)); | 907 | WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)); |
906 | 908 | ||
907 | return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 909 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
908 | } | 910 | } |
909 | 911 | ||
910 | static int nv_adma_port_start(struct ata_port *ap) | 912 | static int nv_adma_port_start(struct ata_port *ap) |
@@ -927,10 +929,10 @@ static int nv_adma_port_start(struct ata_port *ap) | |||
927 | if (!pp) | 929 | if (!pp) |
928 | return -ENOMEM; | 930 | return -ENOMEM; |
929 | 931 | ||
930 | mmio = ap->host->mmio_base + NV_ADMA_PORT + | 932 | mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + |
931 | ap->port_no * NV_ADMA_PORT_SIZE; | 933 | ap->port_no * NV_ADMA_PORT_SIZE; |
932 | pp->ctl_block = mmio; | 934 | pp->ctl_block = mmio; |
933 | pp->gen_block = ap->host->mmio_base + NV_ADMA_GEN; | 935 | pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; |
934 | pp->notifier_clear_block = pp->gen_block + | 936 | pp->notifier_clear_block = pp->gen_block + |
935 | NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); | 937 | NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); |
936 | 938 | ||
@@ -1046,26 +1048,26 @@ static int nv_adma_port_resume(struct ata_port *ap) | |||
1046 | 1048 | ||
1047 | static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port) | 1049 | static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port) |
1048 | { | 1050 | { |
1049 | void __iomem *mmio = probe_ent->mmio_base; | 1051 | void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR]; |
1050 | struct ata_ioports *ioport = &probe_ent->port[port]; | 1052 | struct ata_ioports *ioport = &probe_ent->port[port]; |
1051 | 1053 | ||
1052 | VPRINTK("ENTER\n"); | 1054 | VPRINTK("ENTER\n"); |
1053 | 1055 | ||
1054 | mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE; | 1056 | mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE; |
1055 | 1057 | ||
1056 | ioport->cmd_addr = (unsigned long) mmio; | 1058 | ioport->cmd_addr = mmio; |
1057 | ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4); | 1059 | ioport->data_addr = mmio + (ATA_REG_DATA * 4); |
1058 | ioport->error_addr = | 1060 | ioport->error_addr = |
1059 | ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4); | 1061 | ioport->feature_addr = mmio + (ATA_REG_ERR * 4); |
1060 | ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4); | 1062 | ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); |
1061 | ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4); | 1063 | ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); |
1062 | ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4); | 1064 | ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); |
1063 | ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4); | 1065 | ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); |
1064 | ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4); | 1066 | ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); |
1065 | ioport->status_addr = | 1067 | ioport->status_addr = |
1066 | ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4); | 1068 | ioport->command_addr = mmio + (ATA_REG_STATUS * 4); |
1067 | ioport->altstatus_addr = | 1069 | ioport->altstatus_addr = |
1068 | ioport->ctl_addr = (unsigned long) mmio + 0x20; | 1070 | ioport->ctl_addr = mmio + 0x20; |
1069 | } | 1071 | } |
1070 | 1072 | ||
1071 | static int nv_adma_host_init(struct ata_probe_ent *probe_ent) | 1073 | static int nv_adma_host_init(struct ata_probe_ent *probe_ent) |
@@ -1252,7 +1254,7 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance) | |||
1252 | irqreturn_t ret; | 1254 | irqreturn_t ret; |
1253 | 1255 | ||
1254 | spin_lock(&host->lock); | 1256 | spin_lock(&host->lock); |
1255 | irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); | 1257 | irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); |
1256 | ret = nv_do_interrupt(host, irq_stat); | 1258 | ret = nv_do_interrupt(host, irq_stat); |
1257 | spin_unlock(&host->lock); | 1259 | spin_unlock(&host->lock); |
1258 | 1260 | ||
@@ -1266,7 +1268,7 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance) | |||
1266 | irqreturn_t ret; | 1268 | irqreturn_t ret; |
1267 | 1269 | ||
1268 | spin_lock(&host->lock); | 1270 | spin_lock(&host->lock); |
1269 | irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804); | 1271 | irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); |
1270 | ret = nv_do_interrupt(host, irq_stat); | 1272 | ret = nv_do_interrupt(host, irq_stat); |
1271 | spin_unlock(&host->lock); | 1273 | spin_unlock(&host->lock); |
1272 | 1274 | ||
@@ -1278,7 +1280,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) | |||
1278 | if (sc_reg > SCR_CONTROL) | 1280 | if (sc_reg > SCR_CONTROL) |
1279 | return 0xffffffffU; | 1281 | return 0xffffffffU; |
1280 | 1282 | ||
1281 | return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); | 1283 | return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); |
1282 | } | 1284 | } |
1283 | 1285 | ||
1284 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 1286 | static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) |
@@ -1286,36 +1288,36 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
1286 | if (sc_reg > SCR_CONTROL) | 1288 | if (sc_reg > SCR_CONTROL) |
1287 | return; | 1289 | return; |
1288 | 1290 | ||
1289 | iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); | 1291 | iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); |
1290 | } | 1292 | } |
1291 | 1293 | ||
1292 | static void nv_nf2_freeze(struct ata_port *ap) | 1294 | static void nv_nf2_freeze(struct ata_port *ap) |
1293 | { | 1295 | { |
1294 | unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; | 1296 | void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; |
1295 | int shift = ap->port_no * NV_INT_PORT_SHIFT; | 1297 | int shift = ap->port_no * NV_INT_PORT_SHIFT; |
1296 | u8 mask; | 1298 | u8 mask; |
1297 | 1299 | ||
1298 | mask = inb(scr_addr + NV_INT_ENABLE); | 1300 | mask = ioread8(scr_addr + NV_INT_ENABLE); |
1299 | mask &= ~(NV_INT_ALL << shift); | 1301 | mask &= ~(NV_INT_ALL << shift); |
1300 | outb(mask, scr_addr + NV_INT_ENABLE); | 1302 | iowrite8(mask, scr_addr + NV_INT_ENABLE); |
1301 | } | 1303 | } |
1302 | 1304 | ||
1303 | static void nv_nf2_thaw(struct ata_port *ap) | 1305 | static void nv_nf2_thaw(struct ata_port *ap) |
1304 | { | 1306 | { |
1305 | unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; | 1307 | void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; |
1306 | int shift = ap->port_no * NV_INT_PORT_SHIFT; | 1308 | int shift = ap->port_no * NV_INT_PORT_SHIFT; |
1307 | u8 mask; | 1309 | u8 mask; |
1308 | 1310 | ||
1309 | outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); | 1311 | iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); |
1310 | 1312 | ||
1311 | mask = inb(scr_addr + NV_INT_ENABLE); | 1313 | mask = ioread8(scr_addr + NV_INT_ENABLE); |
1312 | mask |= (NV_INT_MASK << shift); | 1314 | mask |= (NV_INT_MASK << shift); |
1313 | outb(mask, scr_addr + NV_INT_ENABLE); | 1315 | iowrite8(mask, scr_addr + NV_INT_ENABLE); |
1314 | } | 1316 | } |
1315 | 1317 | ||
1316 | static void nv_ck804_freeze(struct ata_port *ap) | 1318 | static void nv_ck804_freeze(struct ata_port *ap) |
1317 | { | 1319 | { |
1318 | void __iomem *mmio_base = ap->host->mmio_base; | 1320 | void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; |
1319 | int shift = ap->port_no * NV_INT_PORT_SHIFT; | 1321 | int shift = ap->port_no * NV_INT_PORT_SHIFT; |
1320 | u8 mask; | 1322 | u8 mask; |
1321 | 1323 | ||
@@ -1326,7 +1328,7 @@ static void nv_ck804_freeze(struct ata_port *ap) | |||
1326 | 1328 | ||
1327 | static void nv_ck804_thaw(struct ata_port *ap) | 1329 | static void nv_ck804_thaw(struct ata_port *ap) |
1328 | { | 1330 | { |
1329 | void __iomem *mmio_base = ap->host->mmio_base; | 1331 | void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; |
1330 | int shift = ap->port_no * NV_INT_PORT_SHIFT; | 1332 | int shift = ap->port_no * NV_INT_PORT_SHIFT; |
1331 | u8 mask; | 1333 | u8 mask; |
1332 | 1334 | ||
@@ -1412,7 +1414,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1412 | struct nv_host_priv *hpriv; | 1414 | struct nv_host_priv *hpriv; |
1413 | int rc; | 1415 | int rc; |
1414 | u32 bar; | 1416 | u32 bar; |
1415 | unsigned long base; | 1417 | void __iomem *base; |
1416 | unsigned long type = ent->driver_data; | 1418 | unsigned long type = ent->driver_data; |
1417 | int mask_set = 0; | 1419 | int mask_set = 0; |
1418 | 1420 | ||
@@ -1464,15 +1466,14 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1464 | if (!probe_ent) | 1466 | if (!probe_ent) |
1465 | return -ENOMEM; | 1467 | return -ENOMEM; |
1466 | 1468 | ||
1467 | probe_ent->mmio_base = pcim_iomap(pdev, 5, 0); | 1469 | if (!pcim_iomap(pdev, NV_MMIO_BAR, 0)) |
1468 | if (!probe_ent->mmio_base) | ||
1469 | return -EIO; | 1470 | return -EIO; |
1471 | probe_ent->iomap = pcim_iomap_table(pdev); | ||
1470 | 1472 | ||
1471 | probe_ent->private_data = hpriv; | 1473 | probe_ent->private_data = hpriv; |
1472 | hpriv->type = type; | 1474 | hpriv->type = type; |
1473 | 1475 | ||
1474 | base = (unsigned long)probe_ent->mmio_base; | 1476 | base = probe_ent->iomap[NV_MMIO_BAR]; |
1475 | |||
1476 | probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; | 1477 | probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; |
1477 | probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; | 1478 | probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; |
1478 | 1479 | ||