aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-03-08 20:57:26 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-08 21:10:31 -0500
commit5ee1af9f519e6dc5a7d7912e87a1aaec857c8818 (patch)
tree736a82a84beb422b4b48d9fba6d0e9129ab1e1fd /block
parentf9262c12c0084ddba445a9a42e98994018e51400 (diff)
[PATCH] block: disable block layer bouncing for most memory on 64bit systems
The low level PCI DMA mapping functions should handle it in most cases. This should fix problems with depleting the DMA zone early. The old code used precious GFP_DMA memory in many cases where it was not needed. Signed-off-by: Andi Kleen <ak@suse.de> Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 03d9c82b0fe7..0ef2971a9e82 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -625,26 +625,31 @@ static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
625 * Different hardware can have different requirements as to what pages 625 * Different hardware can have different requirements as to what pages
626 * it can do I/O directly to. A low level driver can call 626 * it can do I/O directly to. A low level driver can call
627 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 627 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
628 * buffers for doing I/O to pages residing above @page. By default 628 * buffers for doing I/O to pages residing above @page.
629 * the block layer sets this to the highest numbered "low" memory page.
630 **/ 629 **/
631void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) 630void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
632{ 631{
633 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 632 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
634 633 int dma = 0;
635 /* 634
636 * set appropriate bounce gfp mask -- unfortunately we don't have a 635 q->bounce_gfp = GFP_NOIO;
637 * full 4GB zone, so we have to resort to low memory for any bounces. 636#if BITS_PER_LONG == 64
638 * ISA has its own < 16MB zone. 637 /* Assume anything <= 4GB can be handled by IOMMU.
639 */ 638 Actually some IOMMUs can handle everything, but I don't
640 if (bounce_pfn < blk_max_low_pfn) { 639 know of a way to test this here. */
641 BUG_ON(dma_addr < BLK_BOUNCE_ISA); 640 if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
641 dma = 1;
642 q->bounce_pfn = max_low_pfn;
643#else
644 if (bounce_pfn < blk_max_low_pfn)
645 dma = 1;
646 q->bounce_pfn = bounce_pfn;
647#endif
648 if (dma) {
642 init_emergency_isa_pool(); 649 init_emergency_isa_pool();
643 q->bounce_gfp = GFP_NOIO | GFP_DMA; 650 q->bounce_gfp = GFP_NOIO | GFP_DMA;
644 } else 651 q->bounce_pfn = bounce_pfn;
645 q->bounce_gfp = GFP_NOIO; 652 }
646
647 q->bounce_pfn = bounce_pfn;
648} 653}
649 654
650EXPORT_SYMBOL(blk_queue_bounce_limit); 655EXPORT_SYMBOL(blk_queue_bounce_limit);