diff options
author | Ke Wei <kewei@marvell.com> | 2008-03-27 02:55:23 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-03-28 13:31:54 -0400 |
commit | 963829e650516d140e1f2ddaa6c9ba7cce4c2c6a (patch) | |
tree | 12a22ce76f97f242a47c5a7676b484b129deece9 | |
parent | 8121ed420285885654af133a6ca1919590f98917 (diff) |
[SCSI] mvsas: fix the buffer of rx DMA overflow bug
fix the buffer of rx DMA overflow bug.
fix default queue depth.
Signed-off-by: Ke Wei <kewei@marvell.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r-- | drivers/scsi/mvsas.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c index f4f7b0af3027..761beebcb871 100644 --- a/drivers/scsi/mvsas.c +++ b/drivers/scsi/mvsas.c | |||
@@ -2240,7 +2240,7 @@ static void mvs_free(struct mvs_info *mvi) | |||
2240 | mvi->rx_fis, mvi->rx_fis_dma); | 2240 | mvi->rx_fis, mvi->rx_fis_dma); |
2241 | if (mvi->rx) | 2241 | if (mvi->rx) |
2242 | dma_free_coherent(&mvi->pdev->dev, | 2242 | dma_free_coherent(&mvi->pdev->dev, |
2243 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | 2243 | sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), |
2244 | mvi->rx, mvi->rx_dma); | 2244 | mvi->rx, mvi->rx_dma); |
2245 | if (mvi->slot) | 2245 | if (mvi->slot) |
2246 | dma_free_coherent(&mvi->pdev->dev, | 2246 | dma_free_coherent(&mvi->pdev->dev, |
@@ -2348,6 +2348,9 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | |||
2348 | return NULL; | 2348 | return NULL; |
2349 | 2349 | ||
2350 | spin_lock_init(&mvi->lock); | 2350 | spin_lock_init(&mvi->lock); |
2351 | #ifdef MVS_USE_TASKLET | ||
2352 | tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); | ||
2353 | #endif | ||
2351 | mvi->pdev = pdev; | 2354 | mvi->pdev = pdev; |
2352 | mvi->chip = chip; | 2355 | mvi->chip = chip; |
2353 | 2356 | ||
@@ -2371,6 +2374,10 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | |||
2371 | mvs_phy_init(mvi, i); | 2374 | mvs_phy_init(mvi, i); |
2372 | arr_phy[i] = &mvi->phy[i].sas_phy; | 2375 | arr_phy[i] = &mvi->phy[i].sas_phy; |
2373 | arr_port[i] = &mvi->port[i].sas_port; | 2376 | arr_port[i] = &mvi->port[i].sas_port; |
2377 | mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; | ||
2378 | mvi->port[i].wide_port_phymap = 0; | ||
2379 | mvi->port[i].port_attached = 0; | ||
2380 | INIT_LIST_HEAD(&mvi->port[i].list); | ||
2374 | } | 2381 | } |
2375 | 2382 | ||
2376 | SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; | 2383 | SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; |
@@ -2387,9 +2394,10 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | |||
2387 | mvi->sas.sas_phy = arr_phy; | 2394 | mvi->sas.sas_phy = arr_phy; |
2388 | mvi->sas.sas_port = arr_port; | 2395 | mvi->sas.sas_port = arr_port; |
2389 | mvi->sas.num_phys = chip->n_phy; | 2396 | mvi->sas.num_phys = chip->n_phy; |
2390 | mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1; | 2397 | mvi->sas.lldd_max_execute_num = 1; |
2391 | mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; | 2398 | mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; |
2392 | mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1; | 2399 | mvi->shost->can_queue = MVS_CAN_QUEUE; |
2400 | mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; | ||
2393 | mvi->sas.lldd_ha = mvi; | 2401 | mvi->sas.lldd_ha = mvi; |
2394 | mvi->sas.core.shost = mvi->shost; | 2402 | mvi->sas.core.shost = mvi->shost; |
2395 | 2403 | ||
@@ -2442,11 +2450,11 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | |||
2442 | memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); | 2450 | memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); |
2443 | 2451 | ||
2444 | mvi->rx = dma_alloc_coherent(&pdev->dev, | 2452 | mvi->rx = dma_alloc_coherent(&pdev->dev, |
2445 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | 2453 | sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), |
2446 | &mvi->rx_dma, GFP_KERNEL); | 2454 | &mvi->rx_dma, GFP_KERNEL); |
2447 | if (!mvi->rx) | 2455 | if (!mvi->rx) |
2448 | goto err_out; | 2456 | goto err_out; |
2449 | memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ); | 2457 | memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); |
2450 | 2458 | ||
2451 | mvi->rx[0] = cpu_to_le32(0xfff); | 2459 | mvi->rx[0] = cpu_to_le32(0xfff); |
2452 | mvi->rx_cons = 0xfff; | 2460 | mvi->rx_cons = 0xfff; |
@@ -2596,7 +2604,7 @@ static void __devinit mvs_phy_hacks(struct mvs_info *mvi) | |||
2596 | mvs_cw32(regs, CMD_SAS_CTL0, tmp); | 2604 | mvs_cw32(regs, CMD_SAS_CTL0, tmp); |
2597 | 2605 | ||
2598 | /* workaround for WDTIMEOUT , set to 550 ms */ | 2606 | /* workaround for WDTIMEOUT , set to 550 ms */ |
2599 | mvs_cw32(regs, CMD_WD_TIMER, 0xffffff); | 2607 | mvs_cw32(regs, CMD_WD_TIMER, 0x86470); |
2600 | 2608 | ||
2601 | /* not to halt for different port op during wideport link change */ | 2609 | /* not to halt for different port op during wideport link change */ |
2602 | mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); | 2610 | mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); |
@@ -2704,17 +2712,16 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) | |||
2704 | { | 2712 | { |
2705 | u32 tmp; | 2713 | u32 tmp; |
2706 | struct mvs_phy *phy = &mvi->phy[i]; | 2714 | struct mvs_phy *phy = &mvi->phy[i]; |
2707 | struct mvs_port *port; | 2715 | struct mvs_port *port = phy->port;; |
2708 | 2716 | ||
2709 | tmp = mvs_read_phy_ctl(mvi, i); | 2717 | tmp = mvs_read_phy_ctl(mvi, i); |
2710 | 2718 | ||
2711 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { | 2719 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { |
2712 | if (!phy->port) | 2720 | if (!port) |
2713 | phy->phy_attached = 1; | 2721 | phy->phy_attached = 1; |
2714 | return tmp; | 2722 | return tmp; |
2715 | } | 2723 | } |
2716 | 2724 | ||
2717 | port = phy->port; | ||
2718 | if (port) { | 2725 | if (port) { |
2719 | if (phy->phy_type & PORT_TYPE_SAS) { | 2726 | if (phy->phy_type & PORT_TYPE_SAS) { |
2720 | port->wide_port_phymap &= ~(1U << i); | 2727 | port->wide_port_phymap &= ~(1U << i); |