diff options
Diffstat (limited to 'drivers/ide/pci/sgiioc4.c')
| -rw-r--r-- | drivers/ide/pci/sgiioc4.c | 103 |
1 files changed, 45 insertions, 58 deletions
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 6bd9523cf642..321a4e28ac19 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
| @@ -170,10 +170,10 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
| 170 | printk(KERN_ERR | 170 | printk(KERN_ERR |
| 171 | "%s(%s) : PCI Bus Error when doing DMA:" | 171 | "%s(%s) : PCI Bus Error when doing DMA:" |
| 172 | " status-cmd reg is 0x%x\n", | 172 | " status-cmd reg is 0x%x\n", |
| 173 | __FUNCTION__, drive->name, pci_stat_cmd_reg); | 173 | __func__, drive->name, pci_stat_cmd_reg); |
| 174 | printk(KERN_ERR | 174 | printk(KERN_ERR |
| 175 | "%s(%s) : PCI Error Address is 0x%x%x\n", | 175 | "%s(%s) : PCI Error Address is 0x%x%x\n", |
| 176 | __FUNCTION__, drive->name, | 176 | __func__, drive->name, |
| 177 | pci_err_addr_high, pci_err_addr_low); | 177 | pci_err_addr_high, pci_err_addr_low); |
| 178 | /* Clear the PCI Error indicator */ | 178 | /* Clear the PCI Error indicator */ |
| 179 | pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); | 179 | pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); |
| @@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
| 188 | return intr_reg & 3; | 188 | return intr_reg & 3; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static void sgiioc4_ide_dma_start(ide_drive_t * drive) | 191 | static void sgiioc4_dma_start(ide_drive_t *drive) |
| 192 | { | 192 | { |
| 193 | ide_hwif_t *hwif = HWIF(drive); | 193 | ide_hwif_t *hwif = HWIF(drive); |
| 194 | unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; | 194 | unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; |
| @@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base) | |||
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | /* Stops the IOC4 DMA Engine */ | 217 | /* Stops the IOC4 DMA Engine */ |
| 218 | static int | 218 | static int sgiioc4_dma_end(ide_drive_t *drive) |
| 219 | sgiioc4_ide_dma_end(ide_drive_t * drive) | ||
| 220 | { | 219 | { |
| 221 | u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; | 220 | u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; |
| 222 | ide_hwif_t *hwif = HWIF(drive); | 221 | ide_hwif_t *hwif = HWIF(drive); |
| @@ -232,7 +231,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive) | |||
| 232 | printk(KERN_ERR | 231 | printk(KERN_ERR |
| 233 | "%s(%s): IOC4 DMA STOP bit is still 1 :" | 232 | "%s(%s): IOC4 DMA STOP bit is still 1 :" |
| 234 | "ioc4_dma_reg 0x%x\n", | 233 | "ioc4_dma_reg 0x%x\n", |
| 235 | __FUNCTION__, drive->name, ioc4_dma); | 234 | __func__, drive->name, ioc4_dma); |
| 236 | dma_stat = 1; | 235 | dma_stat = 1; |
| 237 | } | 236 | } |
| 238 | 237 | ||
| @@ -251,7 +250,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive) | |||
| 251 | udelay(1); | 250 | udelay(1); |
| 252 | } | 251 | } |
| 253 | if (!valid) { | 252 | if (!valid) { |
| 254 | printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__, | 253 | printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__, |
| 255 | drive->name); | 254 | drive->name); |
| 256 | dma_stat = 1; | 255 | dma_stat = 1; |
| 257 | } | 256 | } |
| @@ -264,7 +263,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive) | |||
| 264 | printk(KERN_ERR | 263 | printk(KERN_ERR |
| 265 | "%s(%s): WARNING!! byte_count_dev %d " | 264 | "%s(%s): WARNING!! byte_count_dev %d " |
| 266 | "!= byte_count_mem %d\n", | 265 | "!= byte_count_mem %d\n", |
| 267 | __FUNCTION__, drive->name, bc_dev, bc_mem); | 266 | __func__, drive->name, bc_dev, bc_mem); |
| 268 | } | 267 | } |
| 269 | } | 268 | } |
| 270 | 269 | ||
| @@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) | |||
| 279 | } | 278 | } |
| 280 | 279 | ||
| 281 | /* returns 1 if dma irq issued, 0 otherwise */ | 280 | /* returns 1 if dma irq issued, 0 otherwise */ |
| 282 | static int | 281 | static int sgiioc4_dma_test_irq(ide_drive_t *drive) |
| 283 | sgiioc4_ide_dma_test_irq(ide_drive_t * drive) | ||
| 284 | { | 282 | { |
| 285 | return sgiioc4_checkirq(HWIF(drive)); | 283 | return sgiioc4_checkirq(HWIF(drive)); |
| 286 | } | 284 | } |
| @@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on) | |||
| 294 | static void | 292 | static void |
| 295 | sgiioc4_resetproc(ide_drive_t * drive) | 293 | sgiioc4_resetproc(ide_drive_t * drive) |
| 296 | { | 294 | { |
| 297 | sgiioc4_ide_dma_end(drive); | 295 | sgiioc4_dma_end(drive); |
| 298 | sgiioc4_clearirq(drive); | 296 | sgiioc4_clearirq(drive); |
| 299 | } | 297 | } |
| 300 | 298 | ||
| @@ -329,13 +327,17 @@ sgiioc4_INB(unsigned long port) | |||
| 329 | 327 | ||
| 330 | /* Creates a dma map for the scatter-gather list entries */ | 328 | /* Creates a dma map for the scatter-gather list entries */ |
| 331 | static int __devinit | 329 | static int __devinit |
| 332 | ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) | 330 | ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d) |
| 333 | { | 331 | { |
| 334 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 332 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
| 333 | unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; | ||
| 335 | void __iomem *virt_dma_base; | 334 | void __iomem *virt_dma_base; |
| 336 | int num_ports = sizeof (ioc4_dma_regs_t); | 335 | int num_ports = sizeof (ioc4_dma_regs_t); |
| 337 | void *pad; | 336 | void *pad; |
| 338 | 337 | ||
| 338 | if (dma_base == 0) | ||
| 339 | return -1; | ||
| 340 | |||
| 339 | printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, | 341 | printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, |
| 340 | dma_base, dma_base + num_ports - 1); | 342 | dma_base, dma_base + num_ports - 1); |
| 341 | 343 | ||
| @@ -343,7 +345,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) | |||
| 343 | printk(KERN_ERR | 345 | printk(KERN_ERR |
| 344 | "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " | 346 | "%s(%s) -- ERROR, Addresses 0x%p to 0x%p " |
| 345 | "ALREADY in use\n", | 347 | "ALREADY in use\n", |
| 346 | __FUNCTION__, hwif->name, (void *) dma_base, | 348 | __func__, hwif->name, (void *) dma_base, |
| 347 | (void *) dma_base + num_ports - 1); | 349 | (void *) dma_base + num_ports - 1); |
| 348 | return -1; | 350 | return -1; |
| 349 | } | 351 | } |
| @@ -352,7 +354,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) | |||
| 352 | if (virt_dma_base == NULL) { | 354 | if (virt_dma_base == NULL) { |
| 353 | printk(KERN_ERR | 355 | printk(KERN_ERR |
| 354 | "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n", | 356 | "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n", |
| 355 | __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1); | 357 | __func__, hwif->name, dma_base, dma_base + num_ports - 1); |
| 356 | goto dma_remap_failure; | 358 | goto dma_remap_failure; |
| 357 | } | 359 | } |
| 358 | hwif->dma_base = (unsigned long) virt_dma_base; | 360 | hwif->dma_base = (unsigned long) virt_dma_base; |
| @@ -378,7 +380,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base) | |||
| 378 | hwif->dmatable_cpu, hwif->dmatable_dma); | 380 | hwif->dmatable_cpu, hwif->dmatable_dma); |
| 379 | printk(KERN_INFO | 381 | printk(KERN_INFO |
| 380 | "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", | 382 | "%s() -- Error! Unable to allocate DMA Maps for drive %s\n", |
| 381 | __FUNCTION__, hwif->name); | 383 | __func__, hwif->name); |
| 382 | printk(KERN_INFO | 384 | printk(KERN_INFO |
| 383 | "Changing from DMA to PIO mode for Drive %s\n", hwif->name); | 385 | "Changing from DMA to PIO mode for Drive %s\n", hwif->name); |
| 384 | 386 | ||
| @@ -406,14 +408,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive) | |||
| 406 | if (ioc4_dma & IOC4_S_DMA_ACTIVE) { | 408 | if (ioc4_dma & IOC4_S_DMA_ACTIVE) { |
| 407 | printk(KERN_WARNING | 409 | printk(KERN_WARNING |
| 408 | "%s(%s):Warning!! DMA from previous transfer was still active\n", | 410 | "%s(%s):Warning!! DMA from previous transfer was still active\n", |
| 409 | __FUNCTION__, drive->name); | 411 | __func__, drive->name); |
| 410 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); | 412 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
| 411 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); | 413 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
| 412 | 414 | ||
| 413 | if (ioc4_dma & IOC4_S_DMA_STOP) | 415 | if (ioc4_dma & IOC4_S_DMA_STOP) |
| 414 | printk(KERN_ERR | 416 | printk(KERN_ERR |
| 415 | "%s(%s) : IOC4 Dma STOP bit is still 1\n", | 417 | "%s(%s) : IOC4 Dma STOP bit is still 1\n", |
| 416 | __FUNCTION__, drive->name); | 418 | __func__, drive->name); |
| 417 | } | 419 | } |
| 418 | 420 | ||
| 419 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); | 421 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
| @@ -421,14 +423,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive) | |||
| 421 | printk(KERN_WARNING | 423 | printk(KERN_WARNING |
| 422 | "%s(%s) : Warning!! - DMA Error during Previous" | 424 | "%s(%s) : Warning!! - DMA Error during Previous" |
| 423 | " transfer | status 0x%x\n", | 425 | " transfer | status 0x%x\n", |
| 424 | __FUNCTION__, drive->name, ioc4_dma); | 426 | __func__, drive->name, ioc4_dma); |
| 425 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); | 427 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
| 426 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); | 428 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
| 427 | 429 | ||
| 428 | if (ioc4_dma & IOC4_S_DMA_STOP) | 430 | if (ioc4_dma & IOC4_S_DMA_STOP) |
| 429 | printk(KERN_ERR | 431 | printk(KERN_ERR |
| 430 | "%s(%s) : IOC4 DMA STOP bit is still 1\n", | 432 | "%s(%s) : IOC4 DMA STOP bit is still 1\n", |
| 431 | __FUNCTION__, drive->name); | 433 | __func__, drive->name); |
| 432 | } | 434 | } |
| 433 | 435 | ||
| 434 | /* Address of the Scatter Gather List */ | 436 | /* Address of the Scatter Gather List */ |
| @@ -519,7 +521,7 @@ use_pio_instead: | |||
| 519 | return 0; /* revert to PIO for this request */ | 521 | return 0; /* revert to PIO for this request */ |
| 520 | } | 522 | } |
| 521 | 523 | ||
| 522 | static int sgiioc4_ide_dma_setup(ide_drive_t *drive) | 524 | static int sgiioc4_dma_setup(ide_drive_t *drive) |
| 523 | { | 525 | { |
| 524 | struct request *rq = HWGROUP(drive)->rq; | 526 | struct request *rq = HWGROUP(drive)->rq; |
| 525 | unsigned int count = 0; | 527 | unsigned int count = 0; |
| @@ -548,45 +550,37 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive) | |||
| 548 | return 0; | 550 | return 0; |
| 549 | } | 551 | } |
| 550 | 552 | ||
| 551 | static void __devinit | 553 | static const struct ide_port_ops sgiioc4_port_ops = { |
| 552 | ide_init_sgiioc4(ide_hwif_t * hwif) | 554 | .set_dma_mode = sgiioc4_set_dma_mode, |
| 553 | { | 555 | /* reset DMA engine, clear IRQs */ |
| 554 | hwif->mmio = 1; | 556 | .resetproc = sgiioc4_resetproc, |
| 555 | hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */ | 557 | /* mask on/off NIEN register */ |
| 556 | hwif->set_dma_mode = &sgiioc4_set_dma_mode; | 558 | .maskproc = sgiioc4_maskproc, |
| 557 | hwif->selectproc = NULL;/* Use the default routine to select drive */ | 559 | }; |
| 558 | hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */ | ||
| 559 | hwif->pre_reset = NULL; /* No HBA specific pre_set needed */ | ||
| 560 | hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine, | ||
| 561 | clear interrupts */ | ||
| 562 | hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */ | ||
| 563 | hwif->quirkproc = NULL; | ||
| 564 | |||
| 565 | hwif->INB = &sgiioc4_INB; | ||
| 566 | |||
| 567 | if (hwif->dma_base == 0) | ||
| 568 | return; | ||
| 569 | 560 | ||
| 570 | hwif->dma_host_set = &sgiioc4_dma_host_set; | 561 | static const struct ide_dma_ops sgiioc4_dma_ops = { |
| 571 | hwif->dma_setup = &sgiioc4_ide_dma_setup; | 562 | .dma_host_set = sgiioc4_dma_host_set, |
| 572 | hwif->dma_start = &sgiioc4_ide_dma_start; | 563 | .dma_setup = sgiioc4_dma_setup, |
| 573 | hwif->ide_dma_end = &sgiioc4_ide_dma_end; | 564 | .dma_start = sgiioc4_dma_start, |
| 574 | hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; | 565 | .dma_end = sgiioc4_dma_end, |
| 575 | hwif->dma_lost_irq = &sgiioc4_dma_lost_irq; | 566 | .dma_test_irq = sgiioc4_dma_test_irq, |
| 576 | hwif->dma_timeout = &ide_dma_timeout; | 567 | .dma_lost_irq = sgiioc4_dma_lost_irq, |
| 577 | } | 568 | .dma_timeout = ide_dma_timeout, |
| 569 | }; | ||
| 578 | 570 | ||
| 579 | static const struct ide_port_info sgiioc4_port_info __devinitdata = { | 571 | static const struct ide_port_info sgiioc4_port_info __devinitdata = { |
| 580 | .chipset = ide_pci, | 572 | .chipset = ide_pci, |
| 581 | .host_flags = IDE_HFLAG_NO_DMA | /* no SFF-style DMA */ | 573 | .init_dma = ide_dma_sgiioc4, |
| 582 | IDE_HFLAG_NO_AUTOTUNE, | 574 | .port_ops = &sgiioc4_port_ops, |
| 575 | .dma_ops = &sgiioc4_dma_ops, | ||
| 576 | .host_flags = IDE_HFLAG_NO_AUTOTUNE, | ||
| 583 | .mwdma_mask = ATA_MWDMA2_ONLY, | 577 | .mwdma_mask = ATA_MWDMA2_ONLY, |
| 584 | }; | 578 | }; |
| 585 | 579 | ||
| 586 | static int __devinit | 580 | static int __devinit |
| 587 | sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | 581 | sgiioc4_ide_setup_pci_device(struct pci_dev *dev) |
| 588 | { | 582 | { |
| 589 | unsigned long cmd_base, dma_base, irqport; | 583 | unsigned long cmd_base, irqport; |
| 590 | unsigned long bar0, cmd_phys_base, ctl; | 584 | unsigned long bar0, cmd_phys_base, ctl; |
| 591 | void __iomem *virt_base; | 585 | void __iomem *virt_base; |
| 592 | ide_hwif_t *hwif; | 586 | ide_hwif_t *hwif; |
| @@ -612,7 +606,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
| 612 | cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET; | 606 | cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET; |
| 613 | ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET; | 607 | ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET; |
| 614 | irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET; | 608 | irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET; |
| 615 | dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; | ||
| 616 | 609 | ||
| 617 | cmd_phys_base = bar0 + IOC4_CMD_OFFSET; | 610 | cmd_phys_base = bar0 + IOC4_CMD_OFFSET; |
| 618 | if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, | 611 | if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, |
| @@ -620,7 +613,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
| 620 | printk(KERN_ERR | 613 | printk(KERN_ERR |
| 621 | "%s : %s -- ERROR, Addresses " | 614 | "%s : %s -- ERROR, Addresses " |
| 622 | "0x%p to 0x%p ALREADY in use\n", | 615 | "0x%p to 0x%p ALREADY in use\n", |
| 623 | __FUNCTION__, hwif->name, (void *) cmd_phys_base, | 616 | __func__, hwif->name, (void *) cmd_phys_base, |
| 624 | (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); | 617 | (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); |
| 625 | return -ENOMEM; | 618 | return -ENOMEM; |
| 626 | } | 619 | } |
| @@ -641,13 +634,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) | |||
| 641 | /* Initializing chipset IRQ Registers */ | 634 | /* Initializing chipset IRQ Registers */ |
| 642 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); | 635 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); |
| 643 | 636 | ||
| 644 | if (dma_base == 0 || ide_dma_sgiioc4(hwif, dma_base)) { | 637 | hwif->INB = &sgiioc4_INB; |
| 645 | printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n", | ||
| 646 | hwif->name, DRV_NAME); | ||
| 647 | d.mwdma_mask = 0; | ||
| 648 | } | ||
| 649 | |||
| 650 | ide_init_sgiioc4(hwif); | ||
| 651 | 638 | ||
| 652 | idx[0] = hwif->index; | 639 | idx[0] = hwif->index; |
| 653 | 640 | ||
