diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:27:26 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:27:26 -0400 |
commit | 9b6eccfccbfb2cde5405021beaad2ebb8081a2e9 (patch) | |
tree | e78df431ed46019dedcd7e4c3e0282a7de20686f /arch/ia64 | |
parent | 46856afa01769db3a5b16c3f57aa5bca45729edd (diff) |
IA64: sg chaining support
This updates the ia64 iommu/pci dma mappers to sg chaining.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 14 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 11 |
2 files changed, 13 insertions, 12 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e980e7aa2306..4338f4123f31 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
396 | printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, | 396 | printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, |
397 | startsg->dma_address, startsg->dma_length, | 397 | startsg->dma_address, startsg->dma_length, |
398 | sba_sg_address(startsg)); | 398 | sba_sg_address(startsg)); |
399 | startsg++; | 399 | startsg = sg_next(startsg); |
400 | } | 400 | } |
401 | } | 401 | } |
402 | 402 | ||
@@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
409 | while (the_nents-- > 0) { | 409 | while (the_nents-- > 0) { |
410 | if (sba_sg_address(the_sg) == 0x0UL) | 410 | if (sba_sg_address(the_sg) == 0x0UL) |
411 | sba_dump_sg(NULL, startsg, nents); | 411 | sba_dump_sg(NULL, startsg, nents); |
412 | the_sg++; | 412 | the_sg = sg_next(the_sg); |
413 | } | 413 | } |
414 | } | 414 | } |
415 | 415 | ||
@@ -1201,7 +1201,7 @@ sba_fill_pdir( | |||
1201 | u32 pide = startsg->dma_address & ~PIDE_FLAG; | 1201 | u32 pide = startsg->dma_address & ~PIDE_FLAG; |
1202 | dma_offset = (unsigned long) pide & ~iovp_mask; | 1202 | dma_offset = (unsigned long) pide & ~iovp_mask; |
1203 | startsg->dma_address = 0; | 1203 | startsg->dma_address = 0; |
1204 | dma_sg++; | 1204 | dma_sg = sg_next(dma_sg); |
1205 | dma_sg->dma_address = pide | ioc->ibase; | 1205 | dma_sg->dma_address = pide | ioc->ibase; |
1206 | pdirp = &(ioc->pdir_base[pide >> iovp_shift]); | 1206 | pdirp = &(ioc->pdir_base[pide >> iovp_shift]); |
1207 | n_mappings++; | 1207 | n_mappings++; |
@@ -1228,7 +1228,7 @@ sba_fill_pdir( | |||
1228 | pdirp++; | 1228 | pdirp++; |
1229 | } while (cnt > 0); | 1229 | } while (cnt > 0); |
1230 | } | 1230 | } |
1231 | startsg++; | 1231 | startsg = sg_next(startsg); |
1232 | } | 1232 | } |
1233 | /* force pdir update */ | 1233 | /* force pdir update */ |
1234 | wmb(); | 1234 | wmb(); |
@@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc, | |||
1297 | while (--nents > 0) { | 1297 | while (--nents > 0) { |
1298 | unsigned long vaddr; /* tmp */ | 1298 | unsigned long vaddr; /* tmp */ |
1299 | 1299 | ||
1300 | startsg++; | 1300 | startsg = sg_next(startsg); |
1301 | 1301 | ||
1302 | /* PARANOID */ | 1302 | /* PARANOID */ |
1303 | startsg->dma_address = startsg->dma_length = 0; | 1303 | startsg->dma_address = startsg->dma_length = 0; |
@@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1407 | #ifdef ALLOW_IOV_BYPASS_SG | 1407 | #ifdef ALLOW_IOV_BYPASS_SG |
1408 | ASSERT(to_pci_dev(dev)->dma_mask); | 1408 | ASSERT(to_pci_dev(dev)->dma_mask); |
1409 | if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { | 1409 | if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { |
1410 | for (sg = sglist ; filled < nents ; filled++, sg++){ | 1410 | for_each_sg(sglist, sg, nents, filled) { |
1411 | sg->dma_length = sg->length; | 1411 | sg->dma_length = sg->length; |
1412 | sg->dma_address = virt_to_phys(sba_sg_address(sg)); | 1412 | sg->dma_address = virt_to_phys(sba_sg_address(sg)); |
1413 | } | 1413 | } |
@@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
1501 | while (nents && sglist->dma_length) { | 1501 | while (nents && sglist->dma_length) { |
1502 | 1502 | ||
1503 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | 1503 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); |
1504 | sglist++; | 1504 | sglist = sg_next(sglist); |
1505 | nents--; | 1505 | nents--; |
1506 | } | 1506 | } |
1507 | 1507 | ||
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d79ddacfba2d..ecd8a52b9b9e 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single); | |||
218 | * | 218 | * |
219 | * Unmap a set of streaming mode DMA translations. | 219 | * Unmap a set of streaming mode DMA translations. |
220 | */ | 220 | */ |
221 | void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 221 | void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
222 | int nhwentries, int direction) | 222 | int nhwentries, int direction) |
223 | { | 223 | { |
224 | int i; | 224 | int i; |
225 | struct pci_dev *pdev = to_pci_dev(dev); | 225 | struct pci_dev *pdev = to_pci_dev(dev); |
226 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 226 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
227 | struct scatterlist *sg; | ||
227 | 228 | ||
228 | BUG_ON(dev->bus != &pci_bus_type); | 229 | BUG_ON(dev->bus != &pci_bus_type); |
229 | 230 | ||
230 | for (i = 0; i < nhwentries; i++, sg++) { | 231 | for_each_sg(sgl, sg, nhwentries, i) { |
231 | provider->dma_unmap(pdev, sg->dma_address, direction); | 232 | provider->dma_unmap(pdev, sg->dma_address, direction); |
232 | sg->dma_address = (dma_addr_t) NULL; | 233 | sg->dma_address = (dma_addr_t) NULL; |
233 | sg->dma_length = 0; | 234 | sg->dma_length = 0; |
@@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg); | |||
244 | * | 245 | * |
245 | * Maps each entry of @sg for DMA. | 246 | * Maps each entry of @sg for DMA. |
246 | */ | 247 | */ |
247 | int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 248 | int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, |
248 | int direction) | 249 | int direction) |
249 | { | 250 | { |
250 | unsigned long phys_addr; | 251 | unsigned long phys_addr; |
251 | struct scatterlist *saved_sg = sg; | 252 | struct scatterlist *saved_sg = sgl, *sg; |
252 | struct pci_dev *pdev = to_pci_dev(dev); | 253 | struct pci_dev *pdev = to_pci_dev(dev); |
253 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 254 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
254 | int i; | 255 | int i; |
@@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
258 | /* | 259 | /* |
259 | * Setup a DMA address for each entry in the scatterlist. | 260 | * Setup a DMA address for each entry in the scatterlist. |
260 | */ | 261 | */ |
261 | for (i = 0; i < nhwentries; i++, sg++) { | 262 | for_each_sg(sgl, sg, nhwentries, i) { |
262 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 263 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
263 | sg->dma_address = provider->dma_map(pdev, | 264 | sg->dma_address = provider->dma_map(pdev, |
264 | phys_addr, sg->length, | 265 | phys_addr, sg->length, |