aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:55:15 -0400
commit355bbd8cb82e60a592f6cd86ce6dbe5677615cf4 (patch)
tree23678e50ad4687f1656edc972388ee8014e7b89d /drivers
parent39695224bd84dc4be29abad93a0ec232a16fc519 (diff)
parent746cd1e7e4a555ddaee53b19a46e05c9c61eaf09 (diff)
Merge branch 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.32' of git://git.kernel.dk/linux-2.6-block: (29 commits) block: use blkdev_issue_discard in blk_ioctl_discard Make DISCARD_BARRIER and DISCARD_NOBARRIER writes instead of reads block: don't assume device has a request list backing in nr_requests store block: Optimal I/O limit wrapper cfq: choose a new next_req when a request is dispatched Seperate read and write statistics of in_flight requests aoe: end barrier bios with EOPNOTSUPP block: trace bio queueing trial only when it occurs block: enable rq CPU completion affinity by default cfq: fix the log message after dispatched a request block: use printk_once cciss: memory leak in cciss_init_one() splice: update mtime and atime on files block: make blk_iopoll_prep_sched() follow normal 0/1 return convention cfq-iosched: get rid of must_alloc flag block: use interrupts disabled version of raise_softirq_irqoff() block: fix comment in blk-iopoll.c block: adjust default budget for blk-iopoll block: fix long lines in block/blk-iopoll.c block: add blk-iopoll, a NAPI like approach for block devices ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/paride/pcd.c12
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm.c28
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/staging/dst/dcore.c5
17 files changed, 56 insertions, 56 deletions
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 95d344971eda..b6cd571adbf2 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -172,6 +172,9 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
172 BUG(); 172 BUG();
173 bio_endio(bio, -ENXIO); 173 bio_endio(bio, -ENXIO);
174 return 0; 174 return 0;
175 } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
176 bio_endio(bio, -EOPNOTSUPP);
177 return 0;
175 } else if (bio->bi_io_vec == NULL) { 178 } else if (bio->bi_io_vec == NULL) {
176 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 179 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
177 BUG(); 180 BUG();
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a52cc7fe45ea..0589dfbbd7db 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3889,7 +3889,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3889 int j = 0; 3889 int j = 0;
3890 int rc; 3890 int rc;
3891 int dac, return_code; 3891 int dac, return_code;
3892 InquiryData_struct *inq_buff = NULL; 3892 InquiryData_struct *inq_buff;
3893 3893
3894 if (reset_devices) { 3894 if (reset_devices) {
3895 /* Reset the controller with a PCI power-cycle */ 3895 /* Reset the controller with a PCI power-cycle */
@@ -4029,6 +4029,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4029 printk(KERN_WARNING "cciss: unable to determine firmware" 4029 printk(KERN_WARNING "cciss: unable to determine firmware"
4030 " version of controller\n"); 4030 " version of controller\n");
4031 } 4031 }
4032 kfree(inq_buff);
4032 4033
4033 cciss_procinit(i); 4034 cciss_procinit(i);
4034 4035
@@ -4045,7 +4046,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4045 return 1; 4046 return 1;
4046 4047
4047clean4: 4048clean4:
4048 kfree(inq_buff);
4049 kfree(hba[i]->cmd_pool_bits); 4049 kfree(hba[i]->cmd_pool_bits);
4050 if (hba[i]->cmd_pool) 4050 if (hba[i]->cmd_pool)
4051 pci_free_consistent(hba[i]->pdev, 4051 pci_free_consistent(hba[i]->pdev,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5757188cd1fb..bbb79441d895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -475,7 +475,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
476 476
477 if (bio_rw(bio) == WRITE) { 477 if (bio_rw(bio) == WRITE) {
478 int barrier = bio_barrier(bio); 478 bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
479 struct file *file = lo->lo_backing_file; 479 struct file *file = lo->lo_backing_file;
480 480
481 if (barrier) { 481 if (barrier) {
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 911dfd98d813..9f3518c515a1 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -219,8 +219,6 @@ static int pcd_sector; /* address of next requested sector */
219static int pcd_count; /* number of blocks still to do */ 219static int pcd_count; /* number of blocks still to do */
220static char *pcd_buf; /* buffer for request in progress */ 220static char *pcd_buf; /* buffer for request in progress */
221 221
222static int pcd_warned; /* Have we logged a phase warning ? */
223
224/* kernel glue structures */ 222/* kernel glue structures */
225 223
226static int pcd_block_open(struct block_device *bdev, fmode_t mode) 224static int pcd_block_open(struct block_device *bdev, fmode_t mode)
@@ -417,12 +415,10 @@ static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
417 printk 415 printk
418 ("%s: %s: Unexpected phase %d, d=%d, k=%d\n", 416 ("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
419 cd->name, fun, p, d, k); 417 cd->name, fun, p, d, k);
420 if ((verbose < 2) && !pcd_warned) { 418 if (verbose < 2)
421 pcd_warned = 1; 419 printk_once(
422 printk 420 "%s: WARNING: ATAPI phase errors\n",
423 ("%s: WARNING: ATAPI phase errors\n", 421 cd->name);
424 cd->name);
425 }
426 mdelay(1); 422 mdelay(1);
427 } 423 }
428 if (k++ > PCD_TMO) { 424 if (k++ > PCD_TMO) {
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index da403b6a7f43..f5cd2e83ebcc 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1564,15 +1564,13 @@ static int carm_init_shm(struct carm_host *host)
1564 1564
1565static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1565static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1566{ 1566{
1567 static unsigned int printed_version;
1568 struct carm_host *host; 1567 struct carm_host *host;
1569 unsigned int pci_dac; 1568 unsigned int pci_dac;
1570 int rc; 1569 int rc;
1571 struct request_queue *q; 1570 struct request_queue *q;
1572 unsigned int i; 1571 unsigned int i;
1573 1572
1574 if (!printed_version++) 1573 printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
1575 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
1576 1574
1577 rc = pci_enable_device(pdev); 1575 rc = pci_enable_device(pdev);
1578 if (rc) 1576 if (rc)
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 390d69bb7c48..b441ce3832e9 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -416,15 +416,9 @@ retry:
416 goto retry; 416 goto retry;
417 } 417 }
418 if (we.max_disk > (MAX_DISKNO - 1)) { 418 if (we.max_disk > (MAX_DISKNO - 1)) {
419 static int warned; 419 printk_once(VIOD_KERN_INFO
420 420 "Only examining the first %d of %d disks connected\n",
421 if (warned == 0) { 421 MAX_DISKNO, we.max_disk + 1);
422 warned++;
423 printk(VIOD_KERN_INFO
424 "Only examining the first %d "
425 "of %d disks connected\n",
426 MAX_DISKNO, we.max_disk + 1);
427 }
428 } 422 }
429 423
430 /* Send the close event to OS/400. We DON'T expect a response */ 424 /* Send the close event to OS/400. We DON'T expect a response */
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 33f179e66bf5..cc9dc79b0784 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1129 if (error == -EOPNOTSUPP) 1129 if (error == -EOPNOTSUPP)
1130 goto out; 1130 goto out;
1131 1131
1132 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1132 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1133 goto out; 1133 goto out;
1134 1134
1135 if (unlikely(error)) { 1135 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3e563d251733..e0efc1adcaff 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
285 if (!error) 285 if (!error)
286 return 0; /* I/O complete */ 286 return 0; /* I/O complete */
287 287
288 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 288 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
289 return error; 289 return error;
290 290
291 if (error == -EOPNOTSUPP) 291 if (error == -EOPNOTSUPP)
@@ -336,7 +336,7 @@ static void stripe_io_hints(struct dm_target *ti,
336 unsigned chunk_size = (sc->chunk_mask + 1) << 9; 336 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
337 337
338 blk_limits_io_min(limits, chunk_size); 338 blk_limits_io_min(limits, chunk_size);
339 limits->io_opt = chunk_size * sc->stripes; 339 blk_limits_io_opt(limits, chunk_size * sc->stripes);
340} 340}
341 341
342static struct target_type stripe_target = { 342static struct target_type stripe_target = {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b4845b14740d..eee28fac210c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -130,7 +130,7 @@ struct mapped_device {
130 /* 130 /*
131 * A list of ios that arrived while we were suspended. 131 * A list of ios that arrived while we were suspended.
132 */ 132 */
133 atomic_t pending; 133 atomic_t pending[2];
134 wait_queue_head_t wait; 134 wait_queue_head_t wait;
135 struct work_struct work; 135 struct work_struct work;
136 struct bio_list deferred; 136 struct bio_list deferred;
@@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
453{ 453{
454 struct mapped_device *md = io->md; 454 struct mapped_device *md = io->md;
455 int cpu; 455 int cpu;
456 int rw = bio_data_dir(io->bio);
456 457
457 io->start_time = jiffies; 458 io->start_time = jiffies;
458 459
459 cpu = part_stat_lock(); 460 cpu = part_stat_lock();
460 part_round_stats(cpu, &dm_disk(md)->part0); 461 part_round_stats(cpu, &dm_disk(md)->part0);
461 part_stat_unlock(); 462 part_stat_unlock();
462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 463 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
463} 464}
464 465
465static void end_io_acct(struct dm_io *io) 466static void end_io_acct(struct dm_io *io)
@@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
479 * After this is decremented the bio must not be touched if it is 480 * After this is decremented the bio must not be touched if it is
480 * a barrier. 481 * a barrier.
481 */ 482 */
482 dm_disk(md)->part0.in_flight = pending = 483 dm_disk(md)->part0.in_flight[rw] = pending =
483 atomic_dec_return(&md->pending); 484 atomic_dec_return(&md->pending[rw]);
485 pending += atomic_read(&md->pending[rw^0x1]);
484 486
485 /* nudge anyone waiting on suspend queue */ 487 /* nudge anyone waiting on suspend queue */
486 if (!pending) 488 if (!pending)
@@ -586,7 +588,7 @@ static void dec_pending(struct dm_io *io, int error)
586 */ 588 */
587 spin_lock_irqsave(&md->deferred_lock, flags); 589 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) { 590 if (__noflush_suspending(md)) {
589 if (!bio_barrier(io->bio)) 591 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
590 bio_list_add_head(&md->deferred, 592 bio_list_add_head(&md->deferred,
591 io->bio); 593 io->bio);
592 } else 594 } else
@@ -598,7 +600,7 @@ static void dec_pending(struct dm_io *io, int error)
598 io_error = io->error; 600 io_error = io->error;
599 bio = io->bio; 601 bio = io->bio;
600 602
601 if (bio_barrier(bio)) { 603 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
602 /* 604 /*
603 * There can be just one barrier request so we use 605 * There can be just one barrier request so we use
604 * a per-device variable for error reporting. 606 * a per-device variable for error reporting.
@@ -1209,7 +1211,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1209 1211
1210 ci.map = dm_get_table(md); 1212 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) { 1213 if (unlikely(!ci.map)) {
1212 if (!bio_barrier(bio)) 1214 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1213 bio_io_error(bio); 1215 bio_io_error(bio);
1214 else 1216 else
1215 if (!md->barrier_error) 1217 if (!md->barrier_error)
@@ -1321,7 +1323,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1321 * we have to queue this io for later. 1323 * we have to queue this io for later.
1322 */ 1324 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1325 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1324 unlikely(bio_barrier(bio))) { 1326 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1325 up_read(&md->io_lock); 1327 up_read(&md->io_lock);
1326 1328
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1329 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1344,7 +1346,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{ 1346{
1345 struct mapped_device *md = q->queuedata; 1347 struct mapped_device *md = q->queuedata;
1346 1348
1347 if (unlikely(bio_barrier(bio))) { 1349 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1348 bio_endio(bio, -EOPNOTSUPP); 1350 bio_endio(bio, -EOPNOTSUPP);
1349 return 0; 1351 return 0;
1350 } 1352 }
@@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
1785 if (!md->disk) 1787 if (!md->disk)
1786 goto bad_disk; 1788 goto bad_disk;
1787 1789
1788 atomic_set(&md->pending, 0); 1790 atomic_set(&md->pending[0], 0);
1791 atomic_set(&md->pending[1], 0);
1789 init_waitqueue_head(&md->wait); 1792 init_waitqueue_head(&md->wait);
1790 INIT_WORK(&md->work, dm_wq_work); 1793 INIT_WORK(&md->work, dm_wq_work);
1791 init_waitqueue_head(&md->eventq); 1794 init_waitqueue_head(&md->eventq);
@@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2088 break; 2091 break;
2089 } 2092 }
2090 spin_unlock_irqrestore(q->queue_lock, flags); 2093 spin_unlock_irqrestore(q->queue_lock, flags);
2091 } else if (!atomic_read(&md->pending)) 2094 } else if (!atomic_read(&md->pending[0]) &&
2095 !atomic_read(&md->pending[1]))
2092 break; 2096 break;
2093 2097
2094 if (interruptible == TASK_INTERRUPTIBLE && 2098 if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2164,7 +2168,7 @@ static void dm_wq_work(struct work_struct *work)
2164 if (dm_request_based(md)) 2168 if (dm_request_based(md))
2165 generic_make_request(c); 2169 generic_make_request(c);
2166 else { 2170 else {
2167 if (bio_barrier(c)) 2171 if (bio_rw_flagged(c, BIO_RW_BARRIER))
2168 process_barrier(md, c); 2172 process_barrier(md, c);
2169 else 2173 else
2170 __split_and_process_bio(md, c); 2174 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5fe39c2a3d2b..ea4842905444 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
288 sector_t start_sector; 288 sector_t start_sector;
289 int cpu; 289 int cpu;
290 290
291 if (unlikely(bio_barrier(bio))) { 291 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
292 bio_endio(bio, -EOPNOTSUPP); 292 bio_endio(bio, -EOPNOTSUPP);
293 return 0; 293 return 0;
294 } 294 }
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 7140909f6662..89e76819f61f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
90 90
91 if (uptodate) 91 if (uptodate)
92 multipath_end_bh_io(mp_bh, 0); 92 multipath_end_bh_io(mp_bh, 0);
93 else if (!bio_rw_ahead(bio)) { 93 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
94 /* 94 /*
95 * oops, IO error: 95 * oops, IO error:
96 */ 96 */
@@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
144 const int rw = bio_data_dir(bio); 144 const int rw = bio_data_dir(bio);
145 int cpu; 145 int cpu;
146 146
147 if (unlikely(bio_barrier(bio))) { 147 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
148 bio_endio(bio, -EOPNOTSUPP); 148 bio_endio(bio, -EOPNOTSUPP);
149 return 0; 149 return 0;
150 } 150 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 898e2bdfee47..f845ed98fec9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
448 const int rw = bio_data_dir(bio); 448 const int rw = bio_data_dir(bio);
449 int cpu; 449 int cpu;
450 450
451 if (unlikely(bio_barrier(bio))) { 451 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
452 bio_endio(bio, -EOPNOTSUPP); 452 bio_endio(bio, -EOPNOTSUPP);
453 return 0; 453 return 0;
454 } 454 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8726fd7ebce5..ff7ed3335995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
782 struct bio_list bl; 782 struct bio_list bl;
783 struct page **behind_pages = NULL; 783 struct page **behind_pages = NULL;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio); 785 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
786 int cpu, do_barriers; 786 int cpu;
787 bool do_barriers;
787 mdk_rdev_t *blocked_rdev; 788 mdk_rdev_t *blocked_rdev;
788 789
789 /* 790 /*
@@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
797 798
798 md_write_start(mddev, bio); /* wait on superblock update early */ 799 md_write_start(mddev, bio); /* wait on superblock update early */
799 800
800 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 801 if (unlikely(!mddev->barriers_work &&
802 bio_rw_flagged(bio, BIO_RW_BARRIER))) {
801 if (rw == WRITE) 803 if (rw == WRITE)
802 md_write_end(mddev); 804 md_write_end(mddev);
803 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
@@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
925 atomic_set(&r1_bio->remaining, 0); 927 atomic_set(&r1_bio->remaining, 0);
926 atomic_set(&r1_bio->behind_remaining, 0); 928 atomic_set(&r1_bio->behind_remaining, 0);
927 929
928 do_barriers = bio_barrier(bio); 930 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
929 if (do_barriers) 931 if (do_barriers)
930 set_bit(R1BIO_Barrier, &r1_bio->state); 932 set_bit(R1BIO_Barrier, &r1_bio->state);
931 933
@@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
1600 * We already have a nr_pending reference on these rdevs. 1602 * We already have a nr_pending reference on these rdevs.
1601 */ 1603 */
1602 int i; 1604 int i;
1603 const int do_sync = bio_sync(r1_bio->master_bio); 1605 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1604 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1606 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1605 clear_bit(R1BIO_Barrier, &r1_bio->state); 1607 clear_bit(R1BIO_Barrier, &r1_bio->state);
1606 for (i=0; i < conf->raid_disks; i++) 1608 for (i=0; i < conf->raid_disks; i++)
@@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
1654 (unsigned long long)r1_bio->sector); 1656 (unsigned long long)r1_bio->sector);
1655 raid_end_bio_io(r1_bio); 1657 raid_end_bio_io(r1_bio);
1656 } else { 1658 } else {
1657 const int do_sync = bio_sync(r1_bio->master_bio); 1659 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1658 r1_bio->bios[r1_bio->read_disk] = 1660 r1_bio->bios[r1_bio->read_disk] =
1659 mddev->ro ? IO_BLOCKED : NULL; 1661 mddev->ro ? IO_BLOCKED : NULL;
1660 r1_bio->read_disk = disk; 1662 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3d9020cf6f6e..d0a2152e064f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 int i; 796 int i;
797 int chunk_sects = conf->chunk_mask + 1; 797 int chunk_sects = conf->chunk_mask + 1;
798 const int rw = bio_data_dir(bio); 798 const int rw = bio_data_dir(bio);
799 const int do_sync = bio_sync(bio); 799 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
800 struct bio_list bl; 800 struct bio_list bl;
801 unsigned long flags; 801 unsigned long flags;
802 mdk_rdev_t *blocked_rdev; 802 mdk_rdev_t *blocked_rdev;
803 803
804 if (unlikely(bio_barrier(bio))) { 804 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
805 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
806 return 0; 806 return 0;
807 } 807 }
@@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
1610 raid_end_bio_io(r10_bio); 1610 raid_end_bio_io(r10_bio);
1611 bio_put(bio); 1611 bio_put(bio);
1612 } else { 1612 } else {
1613 const int do_sync = bio_sync(r10_bio->master_bio); 1613 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1614 bio_put(bio); 1614 bio_put(bio);
1615 rdev = conf->mirrors[mirror].rdev; 1615 rdev = conf->mirrors[mirror].rdev;
1616 if (printk_ratelimit()) 1616 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b8a2c5dc67ba..826eb3467357 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3606 const int rw = bio_data_dir(bi); 3606 const int rw = bio_data_dir(bi);
3607 int cpu, remaining; 3607 int cpu, remaining;
3608 3608
3609 if (unlikely(bio_barrier(bi))) { 3609 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
3610 bio_endio(bi, -EOPNOTSUPP); 3610 bio_endio(bi, -EOPNOTSUPP);
3611 return 0; 3611 return 0;
3612 } 3612 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 662024d86949..5987da857103 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -898,8 +898,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
898 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
899 scsi_print_command(cmd); 899 scsi_print_command(cmd);
900 } 900 }
901 blk_end_request_all(req, -EIO); 901 if (blk_end_request_err(req, -EIO))
902 scsi_next_command(cmd); 902 scsi_requeue_command(q, cmd);
903 else
904 scsi_next_command(cmd);
903 break; 905 break;
904 case ACTION_REPREP: 906 case ACTION_REPREP:
905 /* Unprep the request and put it back at the head of the queue. 907 /* Unprep the request and put it back at the head of the queue.
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index 84724187ec3e..ac8577358ba0 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -112,8 +112,9 @@ static int dst_request(struct request_queue *q, struct bio *bio)
112 * I worked with. 112 * I worked with.
113 * 113 *
114 * Empty barriers are not allowed anyway, see 51fd77bd9f512 114 * Empty barriers are not allowed anyway, see 51fd77bd9f512
115 * for example, although later it was changed to bio_discard() 115 * for example, although later it was changed to
116 * only, which does not work in this case. 116 * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
117 * work in this case.
117 */ 118 */
118 //err = -EOPNOTSUPP; 119 //err = -EOPNOTSUPP;
119 err = 0; 120 err = 0;