diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-02-23 03:03:10 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-02-26 04:45:48 -0500 |
commit | 1e42807918d17e8c93bf14fbb74be84b141334c1 (patch) | |
tree | 55b15f0f6b2c666efeb938d064f24fdd268faf40 /block/blk-merge.c | |
parent | 5e4c91c84b194b26cf592779e451f4b5be777cba (diff) |
block: reduce stack footprint of blk_recount_segments()
blk_recalc_rq_segments() requires a request structure passed in, which
we don't have from blk_recount_segments(). So the latter allocates one on
the stack, using > 400 bytes of stack for that. This can cause us to spill
over one page of stack from ext4 at least:
0) 4560 400 blk_recount_segments+0x43/0x62
1) 4160 32 bio_phys_segments+0x1c/0x24
2) 4128 32 blk_rq_bio_prep+0x2a/0xf9
3) 4096 32 init_request_from_bio+0xf9/0xfe
4) 4064 112 __make_request+0x33c/0x3f6
5) 3952 144 generic_make_request+0x2d1/0x321
6) 3808 64 submit_bio+0xb9/0xc3
7) 3744 48 submit_bh+0xea/0x10e
8) 3696 368 ext4_mb_init_cache+0x257/0xa6a [ext4]
9) 3328 288 ext4_mb_regular_allocator+0x421/0xcd9 [ext4]
10) 3040 160 ext4_mb_new_blocks+0x211/0x4b4 [ext4]
11) 2880 336 ext4_ext_get_blocks+0xb61/0xd45 [ext4]
12) 2544 96 ext4_get_blocks_wrap+0xf2/0x200 [ext4]
13) 2448 80 ext4_da_get_block_write+0x6e/0x16b [ext4]
14) 2368 352 mpage_da_map_blocks+0x7e/0x4b3 [ext4]
15) 2016 352 ext4_da_writepages+0x2ce/0x43c [ext4]
16) 1664 32 do_writepages+0x2d/0x3c
17) 1632 144 __writeback_single_inode+0x162/0x2cd
18) 1488 96 generic_sync_sb_inodes+0x1e3/0x32b
19) 1392 16 sync_sb_inodes+0xe/0x10
20) 1376 48 writeback_inodes+0x69/0xb3
21) 1328 208 balance_dirty_pages_ratelimited_nr+0x187/0x2f9
22) 1120 224 generic_file_buffered_write+0x1d4/0x2c4
23) 896 176 __generic_file_aio_write_nolock+0x35f/0x393
24) 720 80 generic_file_aio_write+0x6c/0xc8
25) 640 80 ext4_file_write+0xa9/0x137 [ext4]
26) 560 320 do_sync_write+0xf0/0x137
27) 240 48 vfs_write+0xb3/0x13c
28) 192 64 sys_write+0x4c/0x74
29) 128 128 system_call_fastpath+0x16/0x1b
Split the segment counting out into a __blk_recalc_rq_segments() helper
to avoid allocating an onstack request just for checking the physical
segment count.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 94 |
1 files changed, 53 insertions, 41 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index b92f5b0866b0..a104593e70c3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) | |||
38 | } | 38 | } |
39 | } | 39 | } |
40 | 40 | ||
41 | void blk_recalc_rq_segments(struct request *rq) | 41 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
42 | struct bio *bio, | ||
43 | unsigned int *seg_size_ptr) | ||
42 | { | 44 | { |
43 | int nr_phys_segs; | ||
44 | unsigned int phys_size; | 45 | unsigned int phys_size; |
45 | struct bio_vec *bv, *bvprv = NULL; | 46 | struct bio_vec *bv, *bvprv = NULL; |
46 | int seg_size; | 47 | int cluster, i, high, highprv = 1; |
47 | int cluster; | 48 | unsigned int seg_size, nr_phys_segs; |
48 | struct req_iterator iter; | 49 | struct bio *fbio; |
49 | int high, highprv = 1; | ||
50 | struct request_queue *q = rq->q; | ||
51 | 50 | ||
52 | if (!rq->bio) | 51 | if (!bio) |
53 | return; | 52 | return 0; |
54 | 53 | ||
54 | fbio = bio; | ||
55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
56 | seg_size = 0; | 56 | seg_size = 0; |
57 | phys_size = nr_phys_segs = 0; | 57 | phys_size = nr_phys_segs = 0; |
58 | rq_for_each_segment(bv, rq, iter) { | 58 | for_each_bio(bio) { |
59 | /* | 59 | bio_for_each_segment(bv, bio, i) { |
60 | * the trick here is making sure that a high page is never | 60 | /* |
61 | * considered part of another segment, since that might | 61 | * the trick here is making sure that a high page is |
62 | * change with the bounce page. | 62 | * never considered part of another segment, since that |
63 | */ | 63 | * might change with the bounce page. |
64 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | 64 | */ |
65 | if (high || highprv) | 65 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; |
66 | goto new_segment; | 66 | if (high || highprv) |
67 | if (cluster) { | ||
68 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
69 | goto new_segment; | ||
70 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
71 | goto new_segment; | ||
72 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
73 | goto new_segment; | 67 | goto new_segment; |
68 | if (cluster) { | ||
69 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
70 | goto new_segment; | ||
71 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
72 | goto new_segment; | ||
73 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
74 | goto new_segment; | ||
75 | |||
76 | seg_size += bv->bv_len; | ||
77 | bvprv = bv; | ||
78 | continue; | ||
79 | } | ||
80 | new_segment: | ||
81 | if (nr_phys_segs == 1 && seg_size > | ||
82 | fbio->bi_seg_front_size) | ||
83 | fbio->bi_seg_front_size = seg_size; | ||
74 | 84 | ||
75 | seg_size += bv->bv_len; | 85 | nr_phys_segs++; |
76 | bvprv = bv; | 86 | bvprv = bv; |
77 | continue; | 87 | seg_size = bv->bv_len; |
88 | highprv = high; | ||
78 | } | 89 | } |
79 | new_segment: | ||
80 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
81 | rq->bio->bi_seg_front_size = seg_size; | ||
82 | |||
83 | nr_phys_segs++; | ||
84 | bvprv = bv; | ||
85 | seg_size = bv->bv_len; | ||
86 | highprv = high; | ||
87 | } | 90 | } |
88 | 91 | ||
89 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | 92 | if (seg_size_ptr) |
93 | *seg_size_ptr = seg_size; | ||
94 | |||
95 | return nr_phys_segs; | ||
96 | } | ||
97 | |||
98 | void blk_recalc_rq_segments(struct request *rq) | ||
99 | { | ||
100 | unsigned int seg_size = 0, phys_segs; | ||
101 | |||
102 | phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size); | ||
103 | |||
104 | if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
90 | rq->bio->bi_seg_front_size = seg_size; | 105 | rq->bio->bi_seg_front_size = seg_size; |
91 | if (seg_size > rq->biotail->bi_seg_back_size) | 106 | if (seg_size > rq->biotail->bi_seg_back_size) |
92 | rq->biotail->bi_seg_back_size = seg_size; | 107 | rq->biotail->bi_seg_back_size = seg_size; |
93 | 108 | ||
94 | rq->nr_phys_segments = nr_phys_segs; | 109 | rq->nr_phys_segments = phys_segs; |
95 | } | 110 | } |
96 | 111 | ||
97 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 112 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
98 | { | 113 | { |
99 | struct request rq; | ||
100 | struct bio *nxt = bio->bi_next; | 114 | struct bio *nxt = bio->bi_next; |
101 | rq.q = q; | 115 | |
102 | rq.bio = rq.biotail = bio; | ||
103 | bio->bi_next = NULL; | 116 | bio->bi_next = NULL; |
104 | blk_recalc_rq_segments(&rq); | 117 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL); |
105 | bio->bi_next = nxt; | 118 | bio->bi_next = nxt; |
106 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
107 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 119 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
108 | } | 120 | } |
109 | EXPORT_SYMBOL(blk_recount_segments); | 121 | EXPORT_SYMBOL(blk_recount_segments); |