aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:40:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:40:10 -0500
commit53d8ab29f8f6d67e37857b68189b38fa3d87dd8e (patch)
tree3c770b58f0404c67b1b084f626dcafa8464c7512 /drivers/md/bcache/request.c
parentf568849edac8611d603e00bd6cbbcfea09395ae6 (diff)
parent14424be4dbfa127001ad623869f7ee4c7635e991 (diff)
Merge branch 'for-3.14/drivers' of git://git.kernel.dk/linux-block
Pull block IO driver changes from Jens Axboe: - bcache update from Kent Overstreet. - two bcache fixes from Nicholas Swenson. - cciss pci init error fix from Andrew. - underflow fix in the parallel IDE pg_write code from Dan Carpenter. I'm sure the 1 (or 0) users of that are now happy. - two PCI related fixes for sx8 from Jingoo Han. - floppy init fix for first block read from Jiri Kosina. - pktcdvd error return miss fix from Julia Lawall. - removal of IRQF_SHARED from the SEGA Dreamcast CD-ROM code from Michael Opdenacker. - comment typo fix for the loop driver from Olaf Hering. - potential oops fix for null_blk from Raghavendra K T. - two fixes from Sam Bradshaw (Micron) for the mtip32xx driver, fixing an OOM problem and a problem with handling security locked conditions * 'for-3.14/drivers' of git://git.kernel.dk/linux-block: (47 commits) mg_disk: Spelling s/finised/finished/ null_blk: Null pointer deference problem in alloc_page_buffers mtip32xx: Correctly handle security locked condition mtip32xx: Make SGL container per-command to eliminate high order dma allocation drivers/block/loop.c: fix comment typo in loop_config_discard drivers/block/cciss.c:cciss_init_one(): use proper errnos drivers/block/paride/pg.c: underflow bug in pg_write() drivers/block/sx8.c: remove unnecessary pci_set_drvdata() drivers/block/sx8.c: use module_pci_driver() floppy: bail out in open() if drive is not responding to block0 read bcache: Fix auxiliary search trees for key size > cacheline size bcache: Don't return -EINTR when insert finished bcache: Improve bucket_prio() calculation bcache: Add bch_bkey_equal_header() bcache: update bch_bkey_try_merge bcache: Move insert_fixup() to btree_keys_ops bcache: Convert sorting to btree_keys bcache: Convert debug code to btree_keys bcache: Convert btree_iter to struct btree_keys bcache: Refactor bset_tree sysfs stats ...
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c72
1 files changed, 49 insertions, 23 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index c906571997d7..72cd213f213f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -254,6 +254,24 @@ static void bch_data_insert_keys(struct closure *cl)
254 closure_return(cl); 254 closure_return(cl);
255} 255}
256 256
257static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
258 struct cache_set *c)
259{
260 size_t oldsize = bch_keylist_nkeys(l);
261 size_t newsize = oldsize + u64s;
262
263 /*
264 * The journalling code doesn't handle the case where the keys to insert
265 * is bigger than an empty write: If we just return -ENOMEM here,
266 * bio_insert() and bio_invalidate() will insert the keys created so far
267 * and finish the rest when the keylist is empty.
268 */
269 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
270 return -ENOMEM;
271
272 return __bch_keylist_realloc(l, u64s);
273}
274
257static void bch_data_invalidate(struct closure *cl) 275static void bch_data_invalidate(struct closure *cl)
258{ 276{
259 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 277 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
@@ -266,7 +284,7 @@ static void bch_data_invalidate(struct closure *cl)
266 unsigned sectors = min(bio_sectors(bio), 284 unsigned sectors = min(bio_sectors(bio),
267 1U << (KEY_SIZE_BITS - 1)); 285 1U << (KEY_SIZE_BITS - 1));
268 286
269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 287 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
270 goto out; 288 goto out;
271 289
272 bio->bi_iter.bi_sector += sectors; 290 bio->bi_iter.bi_sector += sectors;
@@ -356,7 +374,7 @@ static void bch_data_insert_start(struct closure *cl)
356 374
357 /* 1 for the device pointer and 1 for the chksum */ 375 /* 1 for the device pointer and 1 for the chksum */
358 if (bch_keylist_realloc(&op->insert_keys, 376 if (bch_keylist_realloc(&op->insert_keys,
359 1 + (op->csum ? 1 : 0), 377 3 + (op->csum ? 1 : 0),
360 op->c)) 378 op->c))
361 continue_at(cl, bch_data_insert_keys, bcache_wq); 379 continue_at(cl, bch_data_insert_keys, bcache_wq);
362 380
@@ -596,14 +614,12 @@ struct search {
596 /* Stack frame for bio_complete */ 614 /* Stack frame for bio_complete */
597 struct closure cl; 615 struct closure cl;
598 616
599 struct bcache_device *d;
600
601 struct bbio bio; 617 struct bbio bio;
602 struct bio *orig_bio; 618 struct bio *orig_bio;
603 struct bio *cache_miss; 619 struct bio *cache_miss;
620 struct bcache_device *d;
604 621
605 unsigned insert_bio_sectors; 622 unsigned insert_bio_sectors;
606
607 unsigned recoverable:1; 623 unsigned recoverable:1;
608 unsigned write:1; 624 unsigned write:1;
609 unsigned read_dirty_data:1; 625 unsigned read_dirty_data:1;
@@ -629,7 +645,8 @@ static void bch_cache_read_endio(struct bio *bio, int error)
629 645
630 if (error) 646 if (error)
631 s->iop.error = error; 647 s->iop.error = error;
632 else if (ptr_stale(s->iop.c, &b->key, 0)) { 648 else if (!KEY_DIRTY(&b->key) &&
649 ptr_stale(s->iop.c, &b->key, 0)) {
633 atomic_long_inc(&s->iop.c->cache_read_races); 650 atomic_long_inc(&s->iop.c->cache_read_races);
634 s->iop.error = -EINTR; 651 s->iop.error = -EINTR;
635 } 652 }
@@ -710,10 +727,13 @@ static void cache_lookup(struct closure *cl)
710{ 727{
711 struct search *s = container_of(cl, struct search, iop.cl); 728 struct search *s = container_of(cl, struct search, iop.cl);
712 struct bio *bio = &s->bio.bio; 729 struct bio *bio = &s->bio.bio;
730 int ret;
731
732 bch_btree_op_init(&s->op, -1);
713 733
714 int ret = bch_btree_map_keys(&s->op, s->iop.c, 734 ret = bch_btree_map_keys(&s->op, s->iop.c,
715 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 735 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
716 cache_lookup_fn, MAP_END_KEY); 736 cache_lookup_fn, MAP_END_KEY);
717 if (ret == -EAGAIN) 737 if (ret == -EAGAIN)
718 continue_at(cl, cache_lookup, bcache_wq); 738 continue_at(cl, cache_lookup, bcache_wq);
719 739
@@ -754,12 +774,12 @@ static void bio_complete(struct search *s)
754 } 774 }
755} 775}
756 776
757static void do_bio_hook(struct search *s) 777static void do_bio_hook(struct search *s, struct bio *orig_bio)
758{ 778{
759 struct bio *bio = &s->bio.bio; 779 struct bio *bio = &s->bio.bio;
760 780
761 bio_init(bio); 781 bio_init(bio);
762 __bio_clone_fast(bio, s->orig_bio); 782 __bio_clone_fast(bio, orig_bio);
763 bio->bi_end_io = request_endio; 783 bio->bi_end_io = request_endio;
764 bio->bi_private = &s->cl; 784 bio->bi_private = &s->cl;
765 785
@@ -778,26 +798,32 @@ static void search_free(struct closure *cl)
778 mempool_free(s, s->d->c->search); 798 mempool_free(s, s->d->c->search);
779} 799}
780 800
781static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 801static inline struct search *search_alloc(struct bio *bio,
802 struct bcache_device *d)
782{ 803{
783 struct search *s; 804 struct search *s;
784 805
785 s = mempool_alloc(d->c->search, GFP_NOIO); 806 s = mempool_alloc(d->c->search, GFP_NOIO);
786 memset(s, 0, offsetof(struct search, iop.insert_keys));
787 807
788 __closure_init(&s->cl, NULL); 808 closure_init(&s->cl, NULL);
809 do_bio_hook(s, bio);
789 810
790 s->iop.inode = d->id;
791 s->iop.c = d->c;
792 s->d = d;
793 s->op.lock = -1;
794 s->iop.write_point = hash_long((unsigned long) current, 16);
795 s->orig_bio = bio; 811 s->orig_bio = bio;
796 s->write = (bio->bi_rw & REQ_WRITE) != 0; 812 s->cache_miss = NULL;
797 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 813 s->d = d;
798 s->recoverable = 1; 814 s->recoverable = 1;
815 s->write = (bio->bi_rw & REQ_WRITE) != 0;
816 s->read_dirty_data = 0;
799 s->start_time = jiffies; 817 s->start_time = jiffies;
800 do_bio_hook(s); 818
819 s->iop.c = d->c;
820 s->iop.bio = NULL;
821 s->iop.inode = d->id;
822 s->iop.write_point = hash_long((unsigned long) current, 16);
823 s->iop.write_prio = 0;
824 s->iop.error = 0;
825 s->iop.flags = 0;
826 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
801 827
802 return s; 828 return s;
803} 829}
@@ -843,7 +869,7 @@ static void cached_dev_read_error(struct closure *cl)
843 trace_bcache_read_retry(s->orig_bio); 869 trace_bcache_read_retry(s->orig_bio);
844 870
845 s->iop.error = 0; 871 s->iop.error = 0;
846 do_bio_hook(s); 872 do_bio_hook(s, s->orig_bio);
847 873
848 /* XXX: invalidate cache */ 874 /* XXX: invalidate cache */
849 875