diff options
| author | Tomas Henzl <thenzl@redhat.com> | 2011-04-29 10:28:30 -0400 |
|---|---|---|
| committer | James Bottomley <James.Bottomley@suse.de> | 2011-05-01 17:32:23 -0400 |
| commit | 87f76152dfb1bf3de18dc01cf97c70a5c9d2583e (patch) | |
| tree | 6a0b4a19fe3a3c42155f3e07b4a8d91bfa9812c6 | |
| parent | 8b7eb86f61a10132aad7f90e89a15dc89e495d9e (diff) | |
[SCSI] arcmsr: simplify assumptions in dma_alloc_coherent()
The code currently computes an offset into a dma_alloc_coherent() area
on the assumption that the alignment is imprecise. In fact, the API
guarantees PAGE_SIZE alignment, so the offset calculation is always
zero: remove it.
[jejb: make description actually descriptive]
Signed-off-by: Tomas henzl <thenzl@redhat.com>
Acked-by: Nick Cheng<nick.cheng@areca.com.tw>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
| -rw-r--r-- | drivers/scsi/arcmsr/arcmsr_hba.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index a47327fe162c..f980600f78a8 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
| @@ -441,10 +441,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 441 | struct CommandControlBlock *ccb_tmp; | 441 | struct CommandControlBlock *ccb_tmp; |
| 442 | int i = 0, j = 0; | 442 | int i = 0, j = 0; |
| 443 | dma_addr_t cdb_phyaddr; | 443 | dma_addr_t cdb_phyaddr; |
| 444 | unsigned long roundup_ccbsize = 0, offset; | 444 | unsigned long roundup_ccbsize; |
| 445 | unsigned long max_xfer_len; | 445 | unsigned long max_xfer_len; |
| 446 | unsigned long max_sg_entrys; | 446 | unsigned long max_sg_entrys; |
| 447 | uint32_t firm_config_version; | 447 | uint32_t firm_config_version; |
| 448 | |||
| 448 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 449 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
| 449 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) | 450 | for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) |
| 450 | acb->devstate[i][j] = ARECA_RAID_GONE; | 451 | acb->devstate[i][j] = ARECA_RAID_GONE; |
| @@ -454,23 +455,20 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
| 454 | firm_config_version = acb->firm_cfg_version; | 455 | firm_config_version = acb->firm_cfg_version; |
| 455 | if((firm_config_version & 0xFF) >= 3){ | 456 | if((firm_config_version & 0xFF) >= 3){ |
| 456 | max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ | 457 | max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ |
| 457 | max_sg_entrys = (max_xfer_len/4096); | 458 | max_sg_entrys = (max_xfer_len/4096); |
| 458 | } | 459 | } |
| 459 | acb->host->max_sectors = max_xfer_len/512; | 460 | acb->host->max_sectors = max_xfer_len/512; |
| 460 | acb->host->sg_tablesize = max_sg_entrys; | 461 | acb->host->sg_tablesize = max_sg_entrys; |
| 461 | roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); | 462 | roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); |
| 462 | acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32; | 463 | acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; |
| 463 | dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); | 464 | dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); |
| 464 | if(!dma_coherent){ | 465 | if(!dma_coherent){ |
| 465 | printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no); | 466 | printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); |
| 466 | return -ENOMEM; | 467 | return -ENOMEM; |
| 467 | } | 468 | } |
| 468 | acb->dma_coherent = dma_coherent; | 469 | acb->dma_coherent = dma_coherent; |
| 469 | acb->dma_coherent_handle = dma_coherent_handle; | 470 | acb->dma_coherent_handle = dma_coherent_handle; |
| 470 | memset(dma_coherent, 0, acb->uncache_size); | 471 | memset(dma_coherent, 0, acb->uncache_size); |
| 471 | offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent; | ||
| 472 | dma_coherent_handle = dma_coherent_handle + offset; | ||
| 473 | dma_coherent = (struct CommandControlBlock *)dma_coherent + offset; | ||
| 474 | ccb_tmp = dma_coherent; | 472 | ccb_tmp = dma_coherent; |
| 475 | acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; | 473 | acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; |
| 476 | for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ | 474 | for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ |
