diff options
-rw-r--r-- | block/blk-settings.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
3 files changed, 19 insertions, 1 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 8d3393492891..9acd0b7e802a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -194,6 +194,23 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
194 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 194 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * blk_queue_bounce_pfn - set the bounce buffer limit for queue | ||
198 | * @q: the request queue for the device | ||
199 | * @pfn: max address | ||
200 | * | ||
201 | * Description: | ||
202 | * This function is similar to blk_queue_bounce_limit except it | ||
203 | * neither changes allocation flags, nor does it set up the ISA DMA | ||
204 | * pool. This function should only be used by stacking drivers. | ||
205 | * Hardware drivers should use blk_queue_bounce_limit instead. | ||
206 | */ | ||
207 | void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn) | ||
208 | { | ||
209 | q->limits.bounce_pfn = pfn; | ||
210 | } | ||
211 | EXPORT_SYMBOL(blk_queue_bounce_pfn); | ||
212 | |||
213 | /** | ||
197 | * blk_queue_max_sectors - set max sectors for a request for this queue | 214 | * blk_queue_max_sectors - set max sectors for a request for this queue |
198 | * @q: the request queue for the device | 215 | * @q: the request queue for the device |
199 | * @max_sectors: max sectors in the usual 512b unit | 216 | * @max_sectors: max sectors in the usual 512b unit |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e9a73bb242b0..3ca1604ddd5c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); | 920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); |
921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); | 921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); |
922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); | 922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); |
923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); | 923 | blk_queue_bounce_pfn(q, t->limits.bounce_pfn); |
924 | 924 | ||
925 | if (t->limits.no_cluster) | 925 | if (t->limits.no_cluster) |
926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5e740a135e73..989aa1790f48 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -910,6 +910,7 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | |||
910 | extern void blk_cleanup_queue(struct request_queue *); | 910 | extern void blk_cleanup_queue(struct request_queue *); |
911 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 911 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
912 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 912 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
913 | extern void blk_queue_bounce_pfn(struct request_queue *, u64); | ||
913 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 914 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
914 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 915 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
915 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 916 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |