diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2009-06-22 19:13:48 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-26 00:37:22 -0400 |
commit | 6f0b1c6094b3e8eeeb13f8f16c1b2ef452a6f519 (patch) | |
tree | 37c076363e30101b9c84babce595cfe7956104a7 /arch | |
parent | 987fed3bf6982f2627d4fa242caa9026ef61132a (diff) |
powerpc: Swiotlb breaks pseries
Turning on SWIOTLB selects or enables PPC_NEED_DMA_SYNC_OPS, which means
we get the non empty versions of dma_sync_* in asm/dma-mapping.h
On my pseries machine the dma_ops have no such routines and we die with
a null pointer - this patch gets it booting, is there a more elegant way
to do it?
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0c..b44aaabdd1a6 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, | |||
309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
310 | 310 | ||
311 | BUG_ON(!dma_ops); | 311 | BUG_ON(!dma_ops); |
312 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | 312 | |
313 | if (dma_ops->sync_single_range_for_cpu) | ||
314 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
313 | size, direction); | 315 | size, direction); |
314 | } | 316 | } |
315 | 317 | ||
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
320 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 322 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
321 | 323 | ||
322 | BUG_ON(!dma_ops); | 324 | BUG_ON(!dma_ops); |
323 | dma_ops->sync_single_range_for_device(dev, dma_handle, | 325 | |
326 | if (dma_ops->sync_single_range_for_device) | ||
327 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
324 | 0, size, direction); | 328 | 0, size, direction); |
325 | } | 329 | } |
326 | 330 | ||
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, | |||
331 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 335 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
332 | 336 | ||
333 | BUG_ON(!dma_ops); | 337 | BUG_ON(!dma_ops); |
334 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | 338 | |
339 | if (dma_ops->sync_sg_for_cpu) | ||
340 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | ||
335 | } | 341 | } |
336 | 342 | ||
337 | static inline void dma_sync_sg_for_device(struct device *dev, | 343 | static inline void dma_sync_sg_for_device(struct device *dev, |
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
341 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 347 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
342 | 348 | ||
343 | BUG_ON(!dma_ops); | 349 | BUG_ON(!dma_ops); |
344 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | 350 | |
351 | if (dma_ops->sync_sg_for_device) | ||
352 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
345 | } | 353 | } |
346 | 354 | ||
347 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 355 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
351 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
352 | 360 | ||
353 | BUG_ON(!dma_ops); | 361 | BUG_ON(!dma_ops); |
354 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | 362 | |
363 | if (dma_ops->sync_single_range_for_cpu) | ||
364 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | ||
355 | offset, size, direction); | 365 | offset, size, direction); |
356 | } | 366 | } |
357 | 367 | ||
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
362 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 372 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
363 | 373 | ||
364 | BUG_ON(!dma_ops); | 374 | BUG_ON(!dma_ops); |
365 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | 375 | |
376 | if (dma_ops->sync_single_range_for_device) | ||
377 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
366 | size, direction); | 378 | size, direction); |
367 | } | 379 | } |
368 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | 380 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |