aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-10 22:16:31 -0400
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:08 -0500
commita5ae4300c15c778722c139953c825cd24d6ff517 (patch)
treee9a633c4a4c5dde8e44b1226a0fbecf0f7c41b3c /drivers/md/bcache/request.c
parentd56d000a1f424aa77538bd5aad18b43037ed20cc (diff)
bcache: Zero less memory
Another minor performance optimization Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 59b3d6df100b..cce02f19e6c7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -597,14 +597,12 @@ struct search {
597 /* Stack frame for bio_complete */ 597 /* Stack frame for bio_complete */
598 struct closure cl; 598 struct closure cl;
599 599
600 struct bcache_device *d;
601
602 struct bbio bio; 600 struct bbio bio;
603 struct bio *orig_bio; 601 struct bio *orig_bio;
604 struct bio *cache_miss; 602 struct bio *cache_miss;
603 struct bcache_device *d;
605 604
606 unsigned insert_bio_sectors; 605 unsigned insert_bio_sectors;
607
608 unsigned recoverable:1; 606 unsigned recoverable:1;
609 unsigned write:1; 607 unsigned write:1;
610 unsigned read_dirty_data:1; 608 unsigned read_dirty_data:1;
@@ -712,10 +710,13 @@ static void cache_lookup(struct closure *cl)
712{ 710{
713 struct search *s = container_of(cl, struct search, iop.cl); 711 struct search *s = container_of(cl, struct search, iop.cl);
714 struct bio *bio = &s->bio.bio; 712 struct bio *bio = &s->bio.bio;
713 int ret;
715 714
716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 715 bch_btree_op_init(&s->op, -1);
717 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 716
718 cache_lookup_fn, MAP_END_KEY); 717 ret = bch_btree_map_keys(&s->op, s->iop.c,
718 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
719 cache_lookup_fn, MAP_END_KEY);
719 if (ret == -EAGAIN) 720 if (ret == -EAGAIN)
720 continue_at(cl, cache_lookup, bcache_wq); 721 continue_at(cl, cache_lookup, bcache_wq);
721 722
@@ -756,12 +757,12 @@ static void bio_complete(struct search *s)
756 } 757 }
757} 758}
758 759
759static void do_bio_hook(struct search *s) 760static void do_bio_hook(struct search *s, struct bio *orig_bio)
760{ 761{
761 struct bio *bio = &s->bio.bio; 762 struct bio *bio = &s->bio.bio;
762 763
763 bio_init(bio); 764 bio_init(bio);
764 __bio_clone_fast(bio, s->orig_bio); 765 __bio_clone_fast(bio, orig_bio);
765 bio->bi_end_io = request_endio; 766 bio->bi_end_io = request_endio;
766 bio->bi_private = &s->cl; 767 bio->bi_private = &s->cl;
767 768
@@ -780,26 +781,32 @@ static void search_free(struct closure *cl)
780 mempool_free(s, s->d->c->search); 781 mempool_free(s, s->d->c->search);
781} 782}
782 783
783static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 784static inline struct search *search_alloc(struct bio *bio,
785 struct bcache_device *d)
784{ 786{
785 struct search *s; 787 struct search *s;
786 788
787 s = mempool_alloc(d->c->search, GFP_NOIO); 789 s = mempool_alloc(d->c->search, GFP_NOIO);
788 memset(s, 0, offsetof(struct search, iop.insert_keys));
789 790
790 __closure_init(&s->cl, NULL); 791 closure_init(&s->cl, NULL);
792 do_bio_hook(s, bio);
791 793
792 s->iop.inode = d->id;
793 s->iop.c = d->c;
794 s->d = d;
795 s->op.lock = -1;
796 s->iop.write_point = hash_long((unsigned long) current, 16);
797 s->orig_bio = bio; 794 s->orig_bio = bio;
798 s->write = (bio->bi_rw & REQ_WRITE) != 0; 795 s->cache_miss = NULL;
799 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 796 s->d = d;
800 s->recoverable = 1; 797 s->recoverable = 1;
798 s->write = (bio->bi_rw & REQ_WRITE) != 0;
799 s->read_dirty_data = 0;
801 s->start_time = jiffies; 800 s->start_time = jiffies;
802 do_bio_hook(s); 801
802 s->iop.c = d->c;
803 s->iop.bio = NULL;
804 s->iop.inode = d->id;
805 s->iop.write_point = hash_long((unsigned long) current, 16);
806 s->iop.write_prio = 0;
807 s->iop.error = 0;
808 s->iop.flags = 0;
809 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
803 810
804 return s; 811 return s;
805} 812}
@@ -845,7 +852,7 @@ static void cached_dev_read_error(struct closure *cl)
845 trace_bcache_read_retry(s->orig_bio); 852 trace_bcache_read_retry(s->orig_bio);
846 853
847 s->iop.error = 0; 854 s->iop.error = 0;
848 do_bio_hook(s); 855 do_bio_hook(s, s->orig_bio);
849 856
850 /* XXX: invalidate cache */ 857 /* XXX: invalidate cache */
851 858