aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorKevin Hilman <khilman@deeprooted.net>2006-06-22 17:27:14 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-06-22 17:27:14 -0400
commit823588c18689ddd49d4643eda7654302f18a275f (patch)
tree3cd555063265808fdf2b5c3375f27d32148bf822 /arch/arm/common/dmabounce.c
parentfa3e686a34f4c33de31a128cc36def0b466bfe1a (diff)
[ARM] 3537/1: Rework DMA-bounce locking for finer granularity
Patch from Kevin Hilman This time with IRQ versions of locks. Rework also enables compatability with realtime-preemption patch. With the current locking via interrupt disabling, under RT, potentially sleeping functions can be called with interrupts disabled. Signed-off-by: Kevin Hilman <khilman@mvista.com> Signed-off-by: Deepak Saxena <dsaxena@plexity.net> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c67
1 files changed, 21 insertions, 46 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 7971d0dc6892..5b7c26395b44 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -77,6 +77,8 @@ struct dmabounce_device_info {
77#endif 77#endif
78 struct dmabounce_pool small; 78 struct dmabounce_pool small;
79 struct dmabounce_pool large; 79 struct dmabounce_pool large;
80
81 rwlock_t lock;
80}; 82};
81 83
82static LIST_HEAD(dmabounce_devs); 84static LIST_HEAD(dmabounce_devs);
@@ -116,6 +118,7 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
116 struct safe_buffer *buf; 118 struct safe_buffer *buf;
117 struct dmabounce_pool *pool; 119 struct dmabounce_pool *pool;
118 struct device *dev = device_info->dev; 120 struct device *dev = device_info->dev;
121 unsigned long flags;
119 122
120 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", 123 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
121 __func__, ptr, size, dir); 124 __func__, ptr, size, dir);
@@ -163,8 +166,12 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
163 print_alloc_stats(device_info); 166 print_alloc_stats(device_info);
164#endif 167#endif
165 168
169 write_lock_irqsave(&device_info->lock, flags);
170
166 list_add(&buf->node, &device_info->safe_buffers); 171 list_add(&buf->node, &device_info->safe_buffers);
167 172
173 write_unlock_irqrestore(&device_info->lock, flags);
174
168 return buf; 175 return buf;
169} 176}
170 177
@@ -172,22 +179,32 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
172static inline struct safe_buffer * 179static inline struct safe_buffer *
173find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) 180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
174{ 181{
175 struct safe_buffer *b; 182 struct safe_buffer *b = NULL;
183 unsigned long flags;
184
185 read_lock_irqsave(&device_info->lock, flags);
176 186
177 list_for_each_entry(b, &device_info->safe_buffers, node) 187 list_for_each_entry(b, &device_info->safe_buffers, node)
178 if (b->safe_dma_addr == safe_dma_addr) 188 if (b->safe_dma_addr == safe_dma_addr)
179 return b; 189 break;
180 190
181 return NULL; 191 read_unlock_irqrestore(&device_info->lock, flags);
192 return b;
182} 193}
183 194
184static inline void 195static inline void
185free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) 196free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
186{ 197{
198 unsigned long flags;
199
187 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); 200 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
188 201
202 write_lock_irqsave(&device_info->lock, flags);
203
189 list_del(&buf->node); 204 list_del(&buf->node);
190 205
206 write_unlock_irqrestore(&device_info->lock, flags);
207
191 if (buf->pool) 208 if (buf->pool)
192 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); 209 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
193 else 210 else
@@ -396,7 +413,6 @@ dma_addr_t
396dma_map_single(struct device *dev, void *ptr, size_t size, 413dma_map_single(struct device *dev, void *ptr, size_t size,
397 enum dma_data_direction dir) 414 enum dma_data_direction dir)
398{ 415{
399 unsigned long flags;
400 dma_addr_t dma_addr; 416 dma_addr_t dma_addr;
401 417
402 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 418 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -404,12 +420,8 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
404 420
405 BUG_ON(dir == DMA_NONE); 421 BUG_ON(dir == DMA_NONE);
406 422
407 local_irq_save(flags);
408
409 dma_addr = map_single(dev, ptr, size, dir); 423 dma_addr = map_single(dev, ptr, size, dir);
410 424
411 local_irq_restore(flags);
412
413 return dma_addr; 425 return dma_addr;
414} 426}
415 427
@@ -424,25 +436,18 @@ void
424dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 436dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
425 enum dma_data_direction dir) 437 enum dma_data_direction dir)
426{ 438{
427 unsigned long flags;
428
429 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 439 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
430 __func__, (void *) dma_addr, size, dir); 440 __func__, (void *) dma_addr, size, dir);
431 441
432 BUG_ON(dir == DMA_NONE); 442 BUG_ON(dir == DMA_NONE);
433 443
434 local_irq_save(flags);
435
436 unmap_single(dev, dma_addr, size, dir); 444 unmap_single(dev, dma_addr, size, dir);
437
438 local_irq_restore(flags);
439} 445}
440 446
441int 447int
442dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 448dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
443 enum dma_data_direction dir) 449 enum dma_data_direction dir)
444{ 450{
445 unsigned long flags;
446 int i; 451 int i;
447 452
448 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 453 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -450,8 +455,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
450 455
451 BUG_ON(dir == DMA_NONE); 456 BUG_ON(dir == DMA_NONE);
452 457
453 local_irq_save(flags);
454
455 for (i = 0; i < nents; i++, sg++) { 458 for (i = 0; i < nents; i++, sg++) {
456 struct page *page = sg->page; 459 struct page *page = sg->page;
457 unsigned int offset = sg->offset; 460 unsigned int offset = sg->offset;
@@ -462,8 +465,6 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
462 map_single(dev, ptr, length, dir); 465 map_single(dev, ptr, length, dir);
463 } 466 }
464 467
465 local_irq_restore(flags);
466
467 return nents; 468 return nents;
468} 469}
469 470
@@ -471,7 +472,6 @@ void
471dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 472dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
472 enum dma_data_direction dir) 473 enum dma_data_direction dir)
473{ 474{
474 unsigned long flags;
475 int i; 475 int i;
476 476
477 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 477 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -479,55 +479,38 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
479 479
480 BUG_ON(dir == DMA_NONE); 480 BUG_ON(dir == DMA_NONE);
481 481
482 local_irq_save(flags);
483
484 for (i = 0; i < nents; i++, sg++) { 482 for (i = 0; i < nents; i++, sg++) {
485 dma_addr_t dma_addr = sg->dma_address; 483 dma_addr_t dma_addr = sg->dma_address;
486 unsigned int length = sg->length; 484 unsigned int length = sg->length;
487 485
488 unmap_single(dev, dma_addr, length, dir); 486 unmap_single(dev, dma_addr, length, dir);
489 } 487 }
490
491 local_irq_restore(flags);
492} 488}
493 489
494void 490void
495dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, 491dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
496 enum dma_data_direction dir) 492 enum dma_data_direction dir)
497{ 493{
498 unsigned long flags;
499
500 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 494 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
501 __func__, (void *) dma_addr, size, dir); 495 __func__, (void *) dma_addr, size, dir);
502 496
503 local_irq_save(flags);
504
505 sync_single(dev, dma_addr, size, dir); 497 sync_single(dev, dma_addr, size, dir);
506
507 local_irq_restore(flags);
508} 498}
509 499
510void 500void
511dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, 501dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
512 enum dma_data_direction dir) 502 enum dma_data_direction dir)
513{ 503{
514 unsigned long flags;
515
516 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 504 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
517 __func__, (void *) dma_addr, size, dir); 505 __func__, (void *) dma_addr, size, dir);
518 506
519 local_irq_save(flags);
520
521 sync_single(dev, dma_addr, size, dir); 507 sync_single(dev, dma_addr, size, dir);
522
523 local_irq_restore(flags);
524} 508}
525 509
526void 510void
527dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, 511dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
528 enum dma_data_direction dir) 512 enum dma_data_direction dir)
529{ 513{
530 unsigned long flags;
531 int i; 514 int i;
532 515
533 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 516 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -535,23 +518,18 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
535 518
536 BUG_ON(dir == DMA_NONE); 519 BUG_ON(dir == DMA_NONE);
537 520
538 local_irq_save(flags);
539
540 for (i = 0; i < nents; i++, sg++) { 521 for (i = 0; i < nents; i++, sg++) {
541 dma_addr_t dma_addr = sg->dma_address; 522 dma_addr_t dma_addr = sg->dma_address;
542 unsigned int length = sg->length; 523 unsigned int length = sg->length;
543 524
544 sync_single(dev, dma_addr, length, dir); 525 sync_single(dev, dma_addr, length, dir);
545 } 526 }
546
547 local_irq_restore(flags);
548} 527}
549 528
550void 529void
551dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, 530dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
552 enum dma_data_direction dir) 531 enum dma_data_direction dir)
553{ 532{
554 unsigned long flags;
555 int i; 533 int i;
556 534
557 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 535 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -559,16 +537,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
559 537
560 BUG_ON(dir == DMA_NONE); 538 BUG_ON(dir == DMA_NONE);
561 539
562 local_irq_save(flags);
563
564 for (i = 0; i < nents; i++, sg++) { 540 for (i = 0; i < nents; i++, sg++) {
565 dma_addr_t dma_addr = sg->dma_address; 541 dma_addr_t dma_addr = sg->dma_address;
566 unsigned int length = sg->length; 542 unsigned int length = sg->length;
567 543
568 sync_single(dev, dma_addr, length, dir); 544 sync_single(dev, dma_addr, length, dir);
569 } 545 }
570
571 local_irq_restore(flags);
572} 546}
573 547
574static int 548static int
@@ -622,6 +596,7 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
622 596
623 device_info->dev = dev; 597 device_info->dev = dev;
624 INIT_LIST_HEAD(&device_info->safe_buffers); 598 INIT_LIST_HEAD(&device_info->safe_buffers);
599 rwlock_init(&device_info->lock);
625 600
626#ifdef STATS 601#ifdef STATS
627 device_info->total_allocs = 0; 602 device_info->total_allocs = 0;