diff options
-rw-r--r-- | block/blk-settings.c | 17 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
3 files changed, 1 insertions, 19 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 9acd0b7e802a..8d3393492891 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -194,23 +194,6 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
194 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 194 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * blk_queue_bounce_pfn - set the bounce buffer limit for queue | ||
198 | * @q: the request queue for the device | ||
199 | * @pfn: max address | ||
200 | * | ||
201 | * Description: | ||
202 | * This function is similar to blk_queue_bounce_limit except it | ||
203 | * neither changes allocation flags, nor does it set up the ISA DMA | ||
204 | * pool. This function should only be used by stacking drivers. | ||
205 | * Hardware drivers should use blk_queue_bounce_limit instead. | ||
206 | */ | ||
207 | void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn) | ||
208 | { | ||
209 | q->limits.bounce_pfn = pfn; | ||
210 | } | ||
211 | EXPORT_SYMBOL(blk_queue_bounce_pfn); | ||
212 | |||
213 | /** | ||
214 | * blk_queue_max_sectors - set max sectors for a request for this queue | 197 | * blk_queue_max_sectors - set max sectors for a request for this queue |
215 | * @q: the request queue for the device | 198 | * @q: the request queue for the device |
216 | * @max_sectors: max sectors in the usual 512b unit | 199 | * @max_sectors: max sectors in the usual 512b unit |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3ca1604ddd5c..e9a73bb242b0 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); | 920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); |
921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); | 921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); |
922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); | 922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); |
923 | blk_queue_bounce_pfn(q, t->limits.bounce_pfn); | 923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); |
924 | 924 | ||
925 | if (t->limits.no_cluster) | 925 | if (t->limits.no_cluster) |
926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 989aa1790f48..5e740a135e73 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -910,7 +910,6 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | |||
910 | extern void blk_cleanup_queue(struct request_queue *); | 910 | extern void blk_cleanup_queue(struct request_queue *); |
911 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 911 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
912 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 912 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
913 | extern void blk_queue_bounce_pfn(struct request_queue *, u64); | ||
914 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 913 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
915 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 914 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
916 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 915 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |