aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c9
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-map.c27
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c22
-rw-r--r--block/blk-tag.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c8
-rw-r--r--block/genhd.c14
-rw-r--r--block/scsi_ioctl.c4
10 files changed, 64 insertions, 33 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6901eedeffce..55c5f1fc4f1f 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -259,8 +259,11 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
259 259
260static void bio_end_empty_barrier(struct bio *bio, int err) 260static void bio_end_empty_barrier(struct bio *bio, int err)
261{ 261{
262 if (err) 262 if (err) {
263 if (err == -EOPNOTSUPP)
264 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
263 clear_bit(BIO_UPTODATE, &bio->bi_flags); 265 clear_bit(BIO_UPTODATE, &bio->bi_flags);
266 }
264 267
265 complete(bio->bi_private); 268 complete(bio->bi_private);
266} 269}
@@ -309,7 +312,9 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
309 *error_sector = bio->bi_sector; 312 *error_sector = bio->bi_sector;
310 313
311 ret = 0; 314 ret = 0;
312 if (!bio_flagged(bio, BIO_UPTODATE)) 315 if (bio_flagged(bio, BIO_EOPNOTSUPP))
316 ret = -EOPNOTSUPP;
317 else if (!bio_flagged(bio, BIO_UPTODATE))
313 ret = -EIO; 318 ret = -EIO;
314 319
315 bio_put(bio); 320 bio_put(bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index 775c8516abf5..2a438a93f723 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq)
127 rq->nr_hw_segments = 0; 127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0; 128 rq->ioprio = 0;
129 rq->special = NULL; 129 rq->special = NULL;
130 rq->raw_data_len = 0;
131 rq->buffer = NULL; 130 rq->buffer = NULL;
132 rq->tag = -1; 131 rq->tag = -1;
133 rq->errors = 0; 132 rq->errors = 0;
@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq)
135 rq->cmd_len = 0; 134 rq->cmd_len = 0;
136 memset(rq->cmd, 0, sizeof(rq->cmd)); 135 memset(rq->cmd, 0, sizeof(rq->cmd));
137 rq->data_len = 0; 136 rq->data_len = 0;
137 rq->extra_len = 0;
138 rq->sense_len = 0; 138 rq->sense_len = 0;
139 rq->data = NULL; 139 rq->data = NULL;
140 rq->sense = NULL; 140 rq->sense = NULL;
@@ -424,7 +424,6 @@ void blk_put_queue(struct request_queue *q)
424{ 424{
425 kobject_put(&q->kobj); 425 kobject_put(&q->kobj);
426} 426}
427EXPORT_SYMBOL(blk_put_queue);
428 427
429void blk_cleanup_queue(struct request_queue *q) 428void blk_cleanup_queue(struct request_queue *q)
430{ 429{
@@ -592,7 +591,6 @@ int blk_get_queue(struct request_queue *q)
592 591
593 return 1; 592 return 1;
594} 593}
595EXPORT_SYMBOL(blk_get_queue);
596 594
597static inline void blk_free_request(struct request_queue *q, struct request *rq) 595static inline void blk_free_request(struct request_queue *q, struct request *rq)
598{ 596{
@@ -1768,6 +1766,7 @@ static inline void __end_request(struct request *rq, int uptodate,
1768 1766
1769/** 1767/**
1770 * blk_rq_bytes - Returns bytes left to complete in the entire request 1768 * blk_rq_bytes - Returns bytes left to complete in the entire request
1769 * @rq: the request being processed
1771 **/ 1770 **/
1772unsigned int blk_rq_bytes(struct request *rq) 1771unsigned int blk_rq_bytes(struct request *rq)
1773{ 1772{
@@ -1780,6 +1779,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes);
1780 1779
1781/** 1780/**
1782 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment 1781 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1782 * @rq: the request being processed
1783 **/ 1783 **/
1784unsigned int blk_rq_cur_bytes(struct request *rq) 1784unsigned int blk_rq_cur_bytes(struct request *rq)
1785{ 1785{
@@ -2016,7 +2016,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2016 rq->hard_cur_sectors = rq->current_nr_sectors; 2016 rq->hard_cur_sectors = rq->current_nr_sectors;
2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2018 rq->buffer = bio_data(bio); 2018 rq->buffer = bio_data(bio);
2019 rq->raw_data_len = bio->bi_size;
2020 rq->data_len = bio->bi_size; 2019 rq->data_len = bio->bi_size;
2021 2020
2022 rq->bio = rq->biotail = bio; 2021 rq->bio = rq->biotail = bio;
diff --git a/block/blk-map.c b/block/blk-map.c
index 09f7fd0bcb73..c07d9c8317f4 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
19 rq->biotail->bi_next = bio; 19 rq->biotail->bi_next = bio;
20 rq->biotail = bio; 20 rq->biotail = bio;
21 21
22 rq->raw_data_len += bio->bi_size;
23 rq->data_len += bio->bi_size; 22 rq->data_len += bio->bi_size;
24 } 23 }
25 return 0; 24 return 0;
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 43 void __user *ubuf, unsigned int len)
45{ 44{
46 unsigned long uaddr; 45 unsigned long uaddr;
46 unsigned int alignment;
47 struct bio *bio, *orig_bio; 47 struct bio *bio, *orig_bio;
48 int reading, ret; 48 int reading, ret;
49 49
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
54 * direct dma. else, set up kernel bounce buffers 54 * direct dma. else, set up kernel bounce buffers
55 */ 55 */
56 uaddr = (unsigned long) ubuf; 56 uaddr = (unsigned long) ubuf;
57 if (!(uaddr & queue_dma_alignment(q)) && 57 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
58 !(len & queue_dma_alignment(q))) 58 if (!(uaddr & alignment) && !(len & alignment))
59 bio = bio_map_user(q, NULL, uaddr, len, reading); 59 bio = bio_map_user(q, NULL, uaddr, len, reading);
60 else 60 else
61 bio = bio_copy_user(q, uaddr, len, reading); 61 bio = bio_copy_user(q, uaddr, len, reading);
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
142 142
143 /* 143 /*
144 * __blk_rq_map_user() copies the buffers if starting address 144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned. As the copied buffer is always 145 * or length isn't aligned to dma_pad_mask. As the copied
146 * page aligned, we know that there's enough room for padding. 146 * buffer is always page aligned, we know that there's enough
147 * Extend the last bio and update rq->data_len accordingly. 147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
148 * 149 *
149 * On unmap, bio_uncopy_user() will use unmodified 150 * On unmap, bio_uncopy_user() will use unmodified
150 * bio_map_data pointed to by bio->bi_private. 151 * bio_map_data pointed to by bio->bi_private.
151 */ 152 */
152 if (len & queue_dma_alignment(q)) { 153 if (len & q->dma_pad_mask) {
153 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; 154 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
154 struct bio *bio = rq->biotail; 155 struct bio *tail = rq->biotail;
155 156
156 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; 157 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
157 bio->bi_size += pad_len; 158 tail->bi_size += pad_len;
158 rq->data_len += pad_len; 159
160 rq->extra_len += pad_len;
159 } 161 }
160 162
161 rq->buffer = rq->data = NULL; 163 rq->buffer = rq->data = NULL;
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
215 rq->buffer = rq->data = NULL; 217 rq->buffer = rq->data = NULL;
216 return 0; 218 return 0;
217} 219}
218EXPORT_SYMBOL(blk_rq_map_user_iov);
219 220
220/** 221/**
221 * blk_rq_unmap_user - unmap a request with user data 222 * blk_rq_unmap_user - unmap a request with user data
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7506c4fe0264..0f58616bcd7f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -231,7 +231,7 @@ new_segment:
231 ((unsigned long)q->dma_drain_buffer) & 231 ((unsigned long)q->dma_drain_buffer) &
232 (PAGE_SIZE - 1)); 232 (PAGE_SIZE - 1));
233 nsegs++; 233 nsegs++;
234 rq->data_len += q->dma_drain_size; 234 rq->extra_len += q->dma_drain_size;
235 } 235 }
236 236
237 if (sg) 237 if (sg)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9a8ffdd0ce3d..1344a0ea5cc6 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
140 /* Assume anything <= 4GB can be handled by IOMMU. 140 /* Assume anything <= 4GB can be handled by IOMMU.
141 Actually some IOMMUs can handle everything, but I don't 141 Actually some IOMMUs can handle everything, but I don't
142 know of a way to test this here. */ 142 know of a way to test this here. */
143 if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 143 if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
144 dma = 1; 144 dma = 1;
145 q->bounce_pfn = max_low_pfn; 145 q->bounce_pfn = max_low_pfn;
146#else 146#else
@@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
293EXPORT_SYMBOL(blk_queue_stack_limits); 293EXPORT_SYMBOL(blk_queue_stack_limits);
294 294
295/** 295/**
296 * blk_queue_dma_drain - Set up a drain buffer for excess dma. 296 * blk_queue_dma_pad - set pad mask
297 * @q: the request queue for the device
298 * @mask: pad mask
299 *
300 * Set pad mask. Direct IO requests are padded to the mask specified.
297 * 301 *
302 * Appending pad buffer to a request modifies ->data_len such that it
303 * includes the pad buffer. The original requested data length can be
304 * obtained using blk_rq_raw_data_len().
305 **/
306void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
307{
308 q->dma_pad_mask = mask;
309}
310EXPORT_SYMBOL(blk_queue_dma_pad);
311
312/**
313 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
298 * @q: the request queue for the device 314 * @q: the request queue for the device
299 * @dma_drain_needed: fn which returns non-zero if drain is necessary 315 * @dma_drain_needed: fn which returns non-zero if drain is necessary
300 * @buf: physically contiguous buffer 316 * @buf: physically contiguous buffer
@@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
316 * device can support otherwise there won't be room for the drain 332 * device can support otherwise there won't be room for the drain
317 * buffer. 333 * buffer.
318 */ 334 */
319extern int blk_queue_dma_drain(struct request_queue *q, 335int blk_queue_dma_drain(struct request_queue *q,
320 dma_drain_needed_fn *dma_drain_needed, 336 dma_drain_needed_fn *dma_drain_needed,
321 void *buf, unsigned int size) 337 void *buf, unsigned int size)
322{ 338{
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a8c37d4bbb32..4780a46ce234 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -6,6 +6,8 @@
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8 8
9#include "blk.h"
10
9/** 11/**
10 * blk_queue_find_tag - find a request by its tag and queue 12 * blk_queue_find_tag - find a request by its tag and queue
11 * @q: The request queue for the device 13 * @q: The request queue for the device
diff --git a/block/blk.h b/block/blk.h
index ec898dd0c65c..ec9120fb789a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -32,6 +32,8 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect);
32 32
33void blk_queue_congestion_threshold(struct request_queue *q); 33void blk_queue_congestion_threshold(struct request_queue *q);
34 34
35int blk_dev_init(void);
36
35/* 37/*
36 * Return the threshold (number of used requests) at which the queue is 38 * Return the threshold (number of used requests) at which the queue is
37 * considered to be congested. It include a little hysteresis to keep the 39 * considered to be congested. It include a little hysteresis to keep the
diff --git a/block/bsg.c b/block/bsg.c
index 7f3c09549e4b..8917c5174dc2 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
437 } 437 }
438 438
439 if (rq->next_rq) { 439 if (rq->next_rq) {
440 hdr->dout_resid = rq->raw_data_len; 440 hdr->dout_resid = rq->data_len;
441 hdr->din_resid = rq->next_rq->raw_data_len; 441 hdr->din_resid = rq->next_rq->data_len;
442 blk_rq_unmap_user(bidi_bio); 442 blk_rq_unmap_user(bidi_bio);
443 blk_put_request(rq->next_rq); 443 blk_put_request(rq->next_rq);
444 } else if (rq_data_dir(rq) == READ) 444 } else if (rq_data_dir(rq) == READ)
445 hdr->din_resid = rq->raw_data_len; 445 hdr->din_resid = rq->data_len;
446 else 446 else
447 hdr->dout_resid = rq->raw_data_len; 447 hdr->dout_resid = rq->data_len;
448 448
449 /* 449 /*
450 * If the request generated a negative error number, return it 450 * If the request generated a negative error number, return it
diff --git a/block/genhd.c b/block/genhd.c
index 53f2238e69c8..00da5219ee37 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,11 +17,15 @@
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19 19
20#include "blk.h"
21
20static DEFINE_MUTEX(block_class_lock); 22static DEFINE_MUTEX(block_class_lock);
21#ifndef CONFIG_SYSFS_DEPRECATED 23#ifndef CONFIG_SYSFS_DEPRECATED
22struct kobject *block_depr; 24struct kobject *block_depr;
23#endif 25#endif
24 26
27static struct device_type disk_type;
28
25/* 29/*
26 * Can be deleted altogether. Later. 30 * Can be deleted altogether. Later.
27 * 31 *
@@ -346,8 +350,6 @@ const struct seq_operations partitions_op = {
346#endif 350#endif
347 351
348 352
349extern int blk_dev_init(void);
350
351static struct kobject *base_probe(dev_t devt, int *part, void *data) 353static struct kobject *base_probe(dev_t devt, int *part, void *data)
352{ 354{
353 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) 355 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
@@ -358,7 +360,9 @@ static struct kobject *base_probe(dev_t devt, int *part, void *data)
358 360
359static int __init genhd_device_init(void) 361static int __init genhd_device_init(void)
360{ 362{
361 class_register(&block_class); 363 int error = class_register(&block_class);
364 if (unlikely(error))
365 return error;
362 bdev_map = kobj_map_init(base_probe, &block_class_lock); 366 bdev_map = kobj_map_init(base_probe, &block_class_lock);
363 blk_dev_init(); 367 blk_dev_init();
364 368
@@ -502,7 +506,7 @@ struct class block_class = {
502 .name = "block", 506 .name = "block",
503}; 507};
504 508
505struct device_type disk_type = { 509static struct device_type disk_type = {
506 .name = "disk", 510 .name = "disk",
507 .groups = disk_attr_groups, 511 .groups = disk_attr_groups,
508 .release = disk_release, 512 .release = disk_release,
@@ -632,12 +636,14 @@ static void media_change_notify_thread(struct work_struct *work)
632 put_device(gd->driverfs_dev); 636 put_device(gd->driverfs_dev);
633} 637}
634 638
639#if 0
635void genhd_media_change_notify(struct gendisk *disk) 640void genhd_media_change_notify(struct gendisk *disk)
636{ 641{
637 get_device(disk->driverfs_dev); 642 get_device(disk->driverfs_dev);
638 schedule_work(&disk->async_notify); 643 schedule_work(&disk->async_notify);
639} 644}
640EXPORT_SYMBOL_GPL(genhd_media_change_notify); 645EXPORT_SYMBOL_GPL(genhd_media_change_notify);
646#endif /* 0 */
641 647
642dev_t blk_lookup_devt(const char *name) 648dev_t blk_lookup_devt(const char *name)
643{ 649{
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e993cac4911d..a2c3a936ebf9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
266 hdr->info = 0; 266 hdr->info = 0;
267 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 267 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
268 hdr->info |= SG_INFO_CHECK; 268 hdr->info |= SG_INFO_CHECK;
269 hdr->resid = rq->raw_data_len; 269 hdr->resid = rq->data_len;
270 hdr->sb_len_wr = 0; 270 hdr->sb_len_wr = 0;
271 271
272 if (rq->sense_len && hdr->sbp) { 272 if (rq->sense_len && hdr->sbp) {
@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
528 rq = blk_get_request(q, WRITE, __GFP_WAIT); 528 rq = blk_get_request(q, WRITE, __GFP_WAIT);
529 rq->cmd_type = REQ_TYPE_BLOCK_PC; 529 rq->cmd_type = REQ_TYPE_BLOCK_PC;
530 rq->data = NULL; 530 rq->data = NULL;
531 rq->raw_data_len = 0;
532 rq->data_len = 0; 531 rq->data_len = 0;
532 rq->extra_len = 0;
533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
534 memset(rq->cmd, 0, sizeof(rq->cmd)); 534 memset(rq->cmd, 0, sizeof(rq->cmd));
535 rq->cmd[0] = cmd; 535 rq->cmd[0] = cmd;