aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-15 09:10:25 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-22 02:35:09 -0400
commitcd0aca2d550f238d80ba58e7dcade4ea3d0a3aa7 (patch)
tree9581a77ce54247a18963c9d827063923a667add7 /block/blk-settings.c
parent25636e282fe95508cae96bb27f86407aef935817 (diff)
block: fix queue bounce limit setting
Impact: don't set GFP_DMA in q->bounce_gfp unnecessarily All DMA address limits are expressed in terms of the last addressable unit (byte or page) instead of one plus that. However, when determining bounce_gfp for 64bit machines in blk_queue_bounce_limit(), it compares the specified limit against 0x100000000UL to determine whether it's below 4G ending up falsely setting GFP_DMA in q->bounce_gfp. As DMA zone is very small on x86_64, this makes larger SG_IO transfers very eager to trigger OOM killer. Fix it. While at it, rename the parameter to @dma_mask for clarity and convert comment to proper winged style. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 69c42adde52b..57af728d94bb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -156,26 +156,28 @@ EXPORT_SYMBOL(blk_queue_make_request);
156 156
157/** 157/**
158 * blk_queue_bounce_limit - set bounce buffer limit for queue 158 * blk_queue_bounce_limit - set bounce buffer limit for queue
159 * @q: the request queue for the device 159 * @q: the request queue for the device
160 * @dma_addr: bus address limit 160 * @dma_mask: the maximum address the device can handle
161 * 161 *
162 * Description: 162 * Description:
163 * Different hardware can have different requirements as to what pages 163 * Different hardware can have different requirements as to what pages
164 * it can do I/O directly to. A low level driver can call 164 * it can do I/O directly to. A low level driver can call
165 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 165 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
166 * buffers for doing I/O to pages residing above @dma_addr. 166 * buffers for doing I/O to pages residing above @dma_mask.
167 **/ 167 **/
168void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 168void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
169{ 169{
170 unsigned long b_pfn = dma_addr >> PAGE_SHIFT; 170 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
171 int dma = 0; 171 int dma = 0;
172 172
173 q->bounce_gfp = GFP_NOIO; 173 q->bounce_gfp = GFP_NOIO;
174#if BITS_PER_LONG == 64 174#if BITS_PER_LONG == 64
175 /* Assume anything <= 4GB can be handled by IOMMU. 175 /*
176 Actually some IOMMUs can handle everything, but I don't 176 * Assume anything <= 4GB can be handled by IOMMU. Actually
177 know of a way to test this here. */ 177 * some IOMMUs can handle everything, but I don't know of a
178 if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 178 * way to test this here.
179 */
180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
179 dma = 1; 181 dma = 1;
180 q->bounce_pfn = max_low_pfn; 182 q->bounce_pfn = max_low_pfn;
181#else 183#else