diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 2 | ||||
-rw-r--r-- | block/blk-core.c | 100 | ||||
-rw-r--r-- | block/blk-integrity.c | 25 | ||||
-rw-r--r-- | block/blk-merge.c | 94 | ||||
-rw-r--r-- | block/blk-sysfs.c | 58 | ||||
-rw-r--r-- | block/blk-timeout.c | 9 | ||||
-rw-r--r-- | block/blk.h | 8 | ||||
-rw-r--r-- | block/blktrace.c | 74 | ||||
-rw-r--r-- | block/bsg.c | 17 | ||||
-rw-r--r-- | block/cfq-iosched.c | 39 | ||||
-rw-r--r-- | block/genhd.c | 24 |
11 files changed, 298 insertions, 152 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 8eba4e43bb0c..f7dae57e6cab 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -302,7 +302,7 @@ static void bio_end_empty_barrier(struct bio *bio, int err) | |||
302 | * Description: | 302 | * Description: |
303 | * Issue a flush for the block device in question. Caller can supply | 303 | * Issue a flush for the block device in question. Caller can supply |
304 | * room for storing the error offset in case of a flush error, if they | 304 | * room for storing the error offset in case of a flush error, if they |
305 | * wish to. Caller must run wait_for_completion() on its own. | 305 | * wish to. |
306 | */ | 306 | */ |
307 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | 307 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) |
308 | { | 308 | { |
diff --git a/block/blk-core.c b/block/blk-core.c index a824e49c0d0a..29bcfac6c688 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -64,11 +64,12 @@ static struct workqueue_struct *kblockd_workqueue; | |||
64 | 64 | ||
65 | static void drive_stat_acct(struct request *rq, int new_io) | 65 | static void drive_stat_acct(struct request *rq, int new_io) |
66 | { | 66 | { |
67 | struct gendisk *disk = rq->rq_disk; | ||
67 | struct hd_struct *part; | 68 | struct hd_struct *part; |
68 | int rw = rq_data_dir(rq); | 69 | int rw = rq_data_dir(rq); |
69 | int cpu; | 70 | int cpu; |
70 | 71 | ||
71 | if (!blk_fs_request(rq) || !rq->rq_disk) | 72 | if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue)) |
72 | return; | 73 | return; |
73 | 74 | ||
74 | cpu = part_stat_lock(); | 75 | cpu = part_stat_lock(); |
@@ -599,8 +600,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
599 | q->request_fn = rfn; | 600 | q->request_fn = rfn; |
600 | q->prep_rq_fn = NULL; | 601 | q->prep_rq_fn = NULL; |
601 | q->unplug_fn = generic_unplug_device; | 602 | q->unplug_fn = generic_unplug_device; |
602 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER | | 603 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
603 | 1 << QUEUE_FLAG_STACKABLE); | ||
604 | q->queue_lock = lock; | 604 | q->queue_lock = lock; |
605 | 605 | ||
606 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); | 606 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); |
@@ -1125,6 +1125,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1125 | 1125 | ||
1126 | if (bio_sync(bio)) | 1126 | if (bio_sync(bio)) |
1127 | req->cmd_flags |= REQ_RW_SYNC; | 1127 | req->cmd_flags |= REQ_RW_SYNC; |
1128 | if (bio_unplug(bio)) | ||
1129 | req->cmd_flags |= REQ_UNPLUG; | ||
1128 | if (bio_rw_meta(bio)) | 1130 | if (bio_rw_meta(bio)) |
1129 | req->cmd_flags |= REQ_RW_META; | 1131 | req->cmd_flags |= REQ_RW_META; |
1130 | 1132 | ||
@@ -1141,6 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1141 | int el_ret, nr_sectors; | 1143 | int el_ret, nr_sectors; |
1142 | const unsigned short prio = bio_prio(bio); | 1144 | const unsigned short prio = bio_prio(bio); |
1143 | const int sync = bio_sync(bio); | 1145 | const int sync = bio_sync(bio); |
1146 | const int unplug = bio_unplug(bio); | ||
1144 | int rw_flags; | 1147 | int rw_flags; |
1145 | 1148 | ||
1146 | nr_sectors = bio_sectors(bio); | 1149 | nr_sectors = bio_sectors(bio); |
@@ -1244,7 +1247,7 @@ get_rq: | |||
1244 | blk_plug_device(q); | 1247 | blk_plug_device(q); |
1245 | add_request(q, req); | 1248 | add_request(q, req); |
1246 | out: | 1249 | out: |
1247 | if (sync || blk_queue_nonrot(q)) | 1250 | if (unplug || blk_queue_nonrot(q)) |
1248 | __generic_unplug_device(q); | 1251 | __generic_unplug_device(q); |
1249 | spin_unlock_irq(q->queue_lock); | 1252 | spin_unlock_irq(q->queue_lock); |
1250 | return 0; | 1253 | return 0; |
@@ -1448,6 +1451,11 @@ static inline void __generic_make_request(struct bio *bio) | |||
1448 | err = -EOPNOTSUPP; | 1451 | err = -EOPNOTSUPP; |
1449 | goto end_io; | 1452 | goto end_io; |
1450 | } | 1453 | } |
1454 | if (bio_barrier(bio) && bio_has_data(bio) && | ||
1455 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1456 | err = -EOPNOTSUPP; | ||
1457 | goto end_io; | ||
1458 | } | ||
1451 | 1459 | ||
1452 | ret = q->make_request_fn(q, bio); | 1460 | ret = q->make_request_fn(q, bio); |
1453 | } while (ret); | 1461 | } while (ret); |
@@ -1655,6 +1663,55 @@ void blkdev_dequeue_request(struct request *req) | |||
1655 | } | 1663 | } |
1656 | EXPORT_SYMBOL(blkdev_dequeue_request); | 1664 | EXPORT_SYMBOL(blkdev_dequeue_request); |
1657 | 1665 | ||
1666 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | ||
1667 | { | ||
1668 | struct gendisk *disk = req->rq_disk; | ||
1669 | |||
1670 | if (!disk || !blk_do_io_stat(disk->queue)) | ||
1671 | return; | ||
1672 | |||
1673 | if (blk_fs_request(req)) { | ||
1674 | const int rw = rq_data_dir(req); | ||
1675 | struct hd_struct *part; | ||
1676 | int cpu; | ||
1677 | |||
1678 | cpu = part_stat_lock(); | ||
1679 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | ||
1680 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | ||
1681 | part_stat_unlock(); | ||
1682 | } | ||
1683 | } | ||
1684 | |||
1685 | static void blk_account_io_done(struct request *req) | ||
1686 | { | ||
1687 | struct gendisk *disk = req->rq_disk; | ||
1688 | |||
1689 | if (!disk || !blk_do_io_stat(disk->queue)) | ||
1690 | return; | ||
1691 | |||
1692 | /* | ||
1693 | * Account IO completion. bar_rq isn't accounted as a normal | ||
1694 | * IO on queueing nor completion. Accounting the containing | ||
1695 | * request is enough. | ||
1696 | */ | ||
1697 | if (blk_fs_request(req) && req != &req->q->bar_rq) { | ||
1698 | unsigned long duration = jiffies - req->start_time; | ||
1699 | const int rw = rq_data_dir(req); | ||
1700 | struct hd_struct *part; | ||
1701 | int cpu; | ||
1702 | |||
1703 | cpu = part_stat_lock(); | ||
1704 | part = disk_map_sector_rcu(disk, req->sector); | ||
1705 | |||
1706 | part_stat_inc(cpu, part, ios[rw]); | ||
1707 | part_stat_add(cpu, part, ticks[rw], duration); | ||
1708 | part_round_stats(cpu, part); | ||
1709 | part_dec_in_flight(part); | ||
1710 | |||
1711 | part_stat_unlock(); | ||
1712 | } | ||
1713 | } | ||
1714 | |||
1658 | /** | 1715 | /** |
1659 | * __end_that_request_first - end I/O on a request | 1716 | * __end_that_request_first - end I/O on a request |
1660 | * @req: the request being processed | 1717 | * @req: the request being processed |
@@ -1690,16 +1747,7 @@ static int __end_that_request_first(struct request *req, int error, | |||
1690 | (unsigned long long)req->sector); | 1747 | (unsigned long long)req->sector); |
1691 | } | 1748 | } |
1692 | 1749 | ||
1693 | if (blk_fs_request(req) && req->rq_disk) { | 1750 | blk_account_io_completion(req, nr_bytes); |
1694 | const int rw = rq_data_dir(req); | ||
1695 | struct hd_struct *part; | ||
1696 | int cpu; | ||
1697 | |||
1698 | cpu = part_stat_lock(); | ||
1699 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | ||
1700 | part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9); | ||
1701 | part_stat_unlock(); | ||
1702 | } | ||
1703 | 1751 | ||
1704 | total_bytes = bio_nbytes = 0; | 1752 | total_bytes = bio_nbytes = 0; |
1705 | while ((bio = req->bio) != NULL) { | 1753 | while ((bio = req->bio) != NULL) { |
@@ -1779,8 +1827,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1779 | */ | 1827 | */ |
1780 | static void end_that_request_last(struct request *req, int error) | 1828 | static void end_that_request_last(struct request *req, int error) |
1781 | { | 1829 | { |
1782 | struct gendisk *disk = req->rq_disk; | ||
1783 | |||
1784 | if (blk_rq_tagged(req)) | 1830 | if (blk_rq_tagged(req)) |
1785 | blk_queue_end_tag(req->q, req); | 1831 | blk_queue_end_tag(req->q, req); |
1786 | 1832 | ||
@@ -1792,27 +1838,7 @@ static void end_that_request_last(struct request *req, int error) | |||
1792 | 1838 | ||
1793 | blk_delete_timer(req); | 1839 | blk_delete_timer(req); |
1794 | 1840 | ||
1795 | /* | 1841 | blk_account_io_done(req); |
1796 | * Account IO completion. bar_rq isn't accounted as a normal | ||
1797 | * IO on queueing nor completion. Accounting the containing | ||
1798 | * request is enough. | ||
1799 | */ | ||
1800 | if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { | ||
1801 | unsigned long duration = jiffies - req->start_time; | ||
1802 | const int rw = rq_data_dir(req); | ||
1803 | struct hd_struct *part; | ||
1804 | int cpu; | ||
1805 | |||
1806 | cpu = part_stat_lock(); | ||
1807 | part = disk_map_sector_rcu(disk, req->sector); | ||
1808 | |||
1809 | part_stat_inc(cpu, part, ios[rw]); | ||
1810 | part_stat_add(cpu, part, ticks[rw], duration); | ||
1811 | part_round_stats(cpu, part); | ||
1812 | part_dec_in_flight(part); | ||
1813 | |||
1814 | part_stat_unlock(); | ||
1815 | } | ||
1816 | 1842 | ||
1817 | if (req->end_io) | 1843 | if (req->end_io) |
1818 | req->end_io(req, error); | 1844 | req->end_io(req, error); |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 61a8e2f8fdd0..91fa8e06b6a5 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -309,24 +309,24 @@ static struct kobj_type integrity_ktype = { | |||
309 | /** | 309 | /** |
310 | * blk_integrity_register - Register a gendisk as being integrity-capable | 310 | * blk_integrity_register - Register a gendisk as being integrity-capable |
311 | * @disk: struct gendisk pointer to make integrity-aware | 311 | * @disk: struct gendisk pointer to make integrity-aware |
312 | * @template: integrity profile | 312 | * @template: optional integrity profile to register |
313 | * | 313 | * |
314 | * Description: When a device needs to advertise itself as being able | 314 | * Description: When a device needs to advertise itself as being able |
315 | * to send/receive integrity metadata it must use this function to | 315 | * to send/receive integrity metadata it must use this function to |
316 | * register the capability with the block layer. The template is a | 316 | * register the capability with the block layer. The template is a |
317 | * blk_integrity struct with values appropriate for the underlying | 317 | * blk_integrity struct with values appropriate for the underlying |
318 | * hardware. See Documentation/block/data-integrity.txt. | 318 | * hardware. If template is NULL the new profile is allocated but |
319 | * not filled out. See Documentation/block/data-integrity.txt. | ||
319 | */ | 320 | */ |
320 | int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) | 321 | int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) |
321 | { | 322 | { |
322 | struct blk_integrity *bi; | 323 | struct blk_integrity *bi; |
323 | 324 | ||
324 | BUG_ON(disk == NULL); | 325 | BUG_ON(disk == NULL); |
325 | BUG_ON(template == NULL); | ||
326 | 326 | ||
327 | if (disk->integrity == NULL) { | 327 | if (disk->integrity == NULL) { |
328 | bi = kmem_cache_alloc(integrity_cachep, | 328 | bi = kmem_cache_alloc(integrity_cachep, |
329 | GFP_KERNEL | __GFP_ZERO); | 329 | GFP_KERNEL | __GFP_ZERO); |
330 | if (!bi) | 330 | if (!bi) |
331 | return -1; | 331 | return -1; |
332 | 332 | ||
@@ -346,13 +346,16 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) | |||
346 | bi = disk->integrity; | 346 | bi = disk->integrity; |
347 | 347 | ||
348 | /* Use the provided profile as template */ | 348 | /* Use the provided profile as template */ |
349 | bi->name = template->name; | 349 | if (template != NULL) { |
350 | bi->generate_fn = template->generate_fn; | 350 | bi->name = template->name; |
351 | bi->verify_fn = template->verify_fn; | 351 | bi->generate_fn = template->generate_fn; |
352 | bi->tuple_size = template->tuple_size; | 352 | bi->verify_fn = template->verify_fn; |
353 | bi->set_tag_fn = template->set_tag_fn; | 353 | bi->tuple_size = template->tuple_size; |
354 | bi->get_tag_fn = template->get_tag_fn; | 354 | bi->set_tag_fn = template->set_tag_fn; |
355 | bi->tag_size = template->tag_size; | 355 | bi->get_tag_fn = template->get_tag_fn; |
356 | bi->tag_size = template->tag_size; | ||
357 | } else | ||
358 | bi->name = "unsupported"; | ||
356 | 359 | ||
357 | return 0; | 360 | return 0; |
358 | } | 361 | } |
diff --git a/block/blk-merge.c b/block/blk-merge.c index b92f5b0866b0..a104593e70c3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) | |||
38 | } | 38 | } |
39 | } | 39 | } |
40 | 40 | ||
41 | void blk_recalc_rq_segments(struct request *rq) | 41 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
42 | struct bio *bio, | ||
43 | unsigned int *seg_size_ptr) | ||
42 | { | 44 | { |
43 | int nr_phys_segs; | ||
44 | unsigned int phys_size; | 45 | unsigned int phys_size; |
45 | struct bio_vec *bv, *bvprv = NULL; | 46 | struct bio_vec *bv, *bvprv = NULL; |
46 | int seg_size; | 47 | int cluster, i, high, highprv = 1; |
47 | int cluster; | 48 | unsigned int seg_size, nr_phys_segs; |
48 | struct req_iterator iter; | 49 | struct bio *fbio; |
49 | int high, highprv = 1; | ||
50 | struct request_queue *q = rq->q; | ||
51 | 50 | ||
52 | if (!rq->bio) | 51 | if (!bio) |
53 | return; | 52 | return 0; |
54 | 53 | ||
54 | fbio = bio; | ||
55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
56 | seg_size = 0; | 56 | seg_size = 0; |
57 | phys_size = nr_phys_segs = 0; | 57 | phys_size = nr_phys_segs = 0; |
58 | rq_for_each_segment(bv, rq, iter) { | 58 | for_each_bio(bio) { |
59 | /* | 59 | bio_for_each_segment(bv, bio, i) { |
60 | * the trick here is making sure that a high page is never | 60 | /* |
61 | * considered part of another segment, since that might | 61 | * the trick here is making sure that a high page is |
62 | * change with the bounce page. | 62 | * never considered part of another segment, since that |
63 | */ | 63 | * might change with the bounce page. |
64 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | 64 | */ |
65 | if (high || highprv) | 65 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; |
66 | goto new_segment; | 66 | if (high || highprv) |
67 | if (cluster) { | ||
68 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
69 | goto new_segment; | ||
70 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
71 | goto new_segment; | ||
72 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
73 | goto new_segment; | 67 | goto new_segment; |
68 | if (cluster) { | ||
69 | if (seg_size + bv->bv_len > q->max_segment_size) | ||
70 | goto new_segment; | ||
71 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | ||
72 | goto new_segment; | ||
73 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | ||
74 | goto new_segment; | ||
75 | |||
76 | seg_size += bv->bv_len; | ||
77 | bvprv = bv; | ||
78 | continue; | ||
79 | } | ||
80 | new_segment: | ||
81 | if (nr_phys_segs == 1 && seg_size > | ||
82 | fbio->bi_seg_front_size) | ||
83 | fbio->bi_seg_front_size = seg_size; | ||
74 | 84 | ||
75 | seg_size += bv->bv_len; | 85 | nr_phys_segs++; |
76 | bvprv = bv; | 86 | bvprv = bv; |
77 | continue; | 87 | seg_size = bv->bv_len; |
88 | highprv = high; | ||
78 | } | 89 | } |
79 | new_segment: | ||
80 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
81 | rq->bio->bi_seg_front_size = seg_size; | ||
82 | |||
83 | nr_phys_segs++; | ||
84 | bvprv = bv; | ||
85 | seg_size = bv->bv_len; | ||
86 | highprv = high; | ||
87 | } | 90 | } |
88 | 91 | ||
89 | if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | 92 | if (seg_size_ptr) |
93 | *seg_size_ptr = seg_size; | ||
94 | |||
95 | return nr_phys_segs; | ||
96 | } | ||
97 | |||
98 | void blk_recalc_rq_segments(struct request *rq) | ||
99 | { | ||
100 | unsigned int seg_size = 0, phys_segs; | ||
101 | |||
102 | phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size); | ||
103 | |||
104 | if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size) | ||
90 | rq->bio->bi_seg_front_size = seg_size; | 105 | rq->bio->bi_seg_front_size = seg_size; |
91 | if (seg_size > rq->biotail->bi_seg_back_size) | 106 | if (seg_size > rq->biotail->bi_seg_back_size) |
92 | rq->biotail->bi_seg_back_size = seg_size; | 107 | rq->biotail->bi_seg_back_size = seg_size; |
93 | 108 | ||
94 | rq->nr_phys_segments = nr_phys_segs; | 109 | rq->nr_phys_segments = phys_segs; |
95 | } | 110 | } |
96 | 111 | ||
97 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 112 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
98 | { | 113 | { |
99 | struct request rq; | ||
100 | struct bio *nxt = bio->bi_next; | 114 | struct bio *nxt = bio->bi_next; |
101 | rq.q = q; | 115 | |
102 | rq.bio = rq.biotail = bio; | ||
103 | bio->bi_next = NULL; | 116 | bio->bi_next = NULL; |
104 | blk_recalc_rq_segments(&rq); | 117 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL); |
105 | bio->bi_next = nxt; | 118 | bio->bi_next = nxt; |
106 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
107 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 119 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
108 | } | 120 | } |
109 | EXPORT_SYMBOL(blk_recount_segments); | 121 | EXPORT_SYMBOL(blk_recount_segments); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index a29cb788e408..e29ddfc73cf4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -130,6 +130,27 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |||
130 | return queue_var_show(max_hw_sectors_kb, (page)); | 130 | return queue_var_show(max_hw_sectors_kb, (page)); |
131 | } | 131 | } |
132 | 132 | ||
133 | static ssize_t queue_nonrot_show(struct request_queue *q, char *page) | ||
134 | { | ||
135 | return queue_var_show(!blk_queue_nonrot(q), page); | ||
136 | } | ||
137 | |||
138 | static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, | ||
139 | size_t count) | ||
140 | { | ||
141 | unsigned long nm; | ||
142 | ssize_t ret = queue_var_store(&nm, page, count); | ||
143 | |||
144 | spin_lock_irq(q->queue_lock); | ||
145 | if (nm) | ||
146 | queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
147 | else | ||
148 | queue_flag_set(QUEUE_FLAG_NONROT, q); | ||
149 | spin_unlock_irq(q->queue_lock); | ||
150 | |||
151 | return ret; | ||
152 | } | ||
153 | |||
133 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | 154 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
134 | { | 155 | { |
135 | return queue_var_show(blk_queue_nomerges(q), page); | 156 | return queue_var_show(blk_queue_nomerges(q), page); |
@@ -146,8 +167,8 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |||
146 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | 167 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
147 | else | 168 | else |
148 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); | 169 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
149 | |||
150 | spin_unlock_irq(q->queue_lock); | 170 | spin_unlock_irq(q->queue_lock); |
171 | |||
151 | return ret; | 172 | return ret; |
152 | } | 173 | } |
153 | 174 | ||
@@ -176,6 +197,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |||
176 | return ret; | 197 | return ret; |
177 | } | 198 | } |
178 | 199 | ||
200 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) | ||
201 | { | ||
202 | return queue_var_show(blk_queue_io_stat(q), page); | ||
203 | } | ||
204 | |||
205 | static ssize_t queue_iostats_store(struct request_queue *q, const char *page, | ||
206 | size_t count) | ||
207 | { | ||
208 | unsigned long stats; | ||
209 | ssize_t ret = queue_var_store(&stats, page, count); | ||
210 | |||
211 | spin_lock_irq(q->queue_lock); | ||
212 | if (stats) | ||
213 | queue_flag_set(QUEUE_FLAG_IO_STAT, q); | ||
214 | else | ||
215 | queue_flag_clear(QUEUE_FLAG_IO_STAT, q); | ||
216 | spin_unlock_irq(q->queue_lock); | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
179 | static struct queue_sysfs_entry queue_requests_entry = { | 221 | static struct queue_sysfs_entry queue_requests_entry = { |
180 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 222 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
181 | .show = queue_requests_show, | 223 | .show = queue_requests_show, |
@@ -210,6 +252,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { | |||
210 | .show = queue_hw_sector_size_show, | 252 | .show = queue_hw_sector_size_show, |
211 | }; | 253 | }; |
212 | 254 | ||
255 | static struct queue_sysfs_entry queue_nonrot_entry = { | ||
256 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | ||
257 | .show = queue_nonrot_show, | ||
258 | .store = queue_nonrot_store, | ||
259 | }; | ||
260 | |||
213 | static struct queue_sysfs_entry queue_nomerges_entry = { | 261 | static struct queue_sysfs_entry queue_nomerges_entry = { |
214 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | 262 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, |
215 | .show = queue_nomerges_show, | 263 | .show = queue_nomerges_show, |
@@ -222,6 +270,12 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = { | |||
222 | .store = queue_rq_affinity_store, | 270 | .store = queue_rq_affinity_store, |
223 | }; | 271 | }; |
224 | 272 | ||
273 | static struct queue_sysfs_entry queue_iostats_entry = { | ||
274 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | ||
275 | .show = queue_iostats_show, | ||
276 | .store = queue_iostats_store, | ||
277 | }; | ||
278 | |||
225 | static struct attribute *default_attrs[] = { | 279 | static struct attribute *default_attrs[] = { |
226 | &queue_requests_entry.attr, | 280 | &queue_requests_entry.attr, |
227 | &queue_ra_entry.attr, | 281 | &queue_ra_entry.attr, |
@@ -229,8 +283,10 @@ static struct attribute *default_attrs[] = { | |||
229 | &queue_max_sectors_entry.attr, | 283 | &queue_max_sectors_entry.attr, |
230 | &queue_iosched_entry.attr, | 284 | &queue_iosched_entry.attr, |
231 | &queue_hw_sector_size_entry.attr, | 285 | &queue_hw_sector_size_entry.attr, |
286 | &queue_nonrot_entry.attr, | ||
232 | &queue_nomerges_entry.attr, | 287 | &queue_nomerges_entry.attr, |
233 | &queue_rq_affinity_entry.attr, | 288 | &queue_rq_affinity_entry.attr, |
289 | &queue_iostats_entry.attr, | ||
234 | NULL, | 290 | NULL, |
235 | }; | 291 | }; |
236 | 292 | ||
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a09535377a94..bbbdc4b8ccf2 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -209,12 +209,19 @@ void blk_abort_queue(struct request_queue *q) | |||
209 | { | 209 | { |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | struct request *rq, *tmp; | 211 | struct request *rq, *tmp; |
212 | LIST_HEAD(list); | ||
212 | 213 | ||
213 | spin_lock_irqsave(q->queue_lock, flags); | 214 | spin_lock_irqsave(q->queue_lock, flags); |
214 | 215 | ||
215 | elv_abort_queue(q); | 216 | elv_abort_queue(q); |
216 | 217 | ||
217 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) | 218 | /* |
219 | * Splice entries to local list, to avoid deadlocking if entries | ||
220 | * get readded to the timeout list by error handling | ||
221 | */ | ||
222 | list_splice_init(&q->timeout_list, &list); | ||
223 | |||
224 | list_for_each_entry_safe(rq, tmp, &list, timeout_list) | ||
218 | blk_abort_request(rq); | 225 | blk_abort_request(rq); |
219 | 226 | ||
220 | spin_unlock_irqrestore(q->queue_lock, flags); | 227 | spin_unlock_irqrestore(q->queue_lock, flags); |
diff --git a/block/blk.h b/block/blk.h index 6e1ed40534e9..0dce92c37496 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -108,4 +108,12 @@ static inline int blk_cpu_to_group(int cpu) | |||
108 | #endif | 108 | #endif |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int blk_do_io_stat(struct request_queue *q) | ||
112 | { | ||
113 | if (q) | ||
114 | return blk_queue_io_stat(q); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
111 | #endif | 119 | #endif |
diff --git a/block/blktrace.c b/block/blktrace.c index b0a2cae886db..7cf9d1ff45a0 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -142,7 +142,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
142 | 142 | ||
143 | what |= ddir_act[rw & WRITE]; | 143 | what |= ddir_act[rw & WRITE]; |
144 | what |= MASK_TC_BIT(rw, BARRIER); | 144 | what |= MASK_TC_BIT(rw, BARRIER); |
145 | what |= MASK_TC_BIT(rw, SYNC); | 145 | what |= MASK_TC_BIT(rw, SYNCIO); |
146 | what |= MASK_TC_BIT(rw, AHEAD); | 146 | what |= MASK_TC_BIT(rw, AHEAD); |
147 | what |= MASK_TC_BIT(rw, META); | 147 | what |= MASK_TC_BIT(rw, META); |
148 | what |= MASK_TC_BIT(rw, DISCARD); | 148 | what |= MASK_TC_BIT(rw, DISCARD); |
@@ -187,59 +187,12 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
187 | 187 | ||
188 | static struct dentry *blk_tree_root; | 188 | static struct dentry *blk_tree_root; |
189 | static DEFINE_MUTEX(blk_tree_mutex); | 189 | static DEFINE_MUTEX(blk_tree_mutex); |
190 | static unsigned int root_users; | ||
191 | |||
192 | static inline void blk_remove_root(void) | ||
193 | { | ||
194 | if (blk_tree_root) { | ||
195 | debugfs_remove(blk_tree_root); | ||
196 | blk_tree_root = NULL; | ||
197 | } | ||
198 | } | ||
199 | |||
200 | static void blk_remove_tree(struct dentry *dir) | ||
201 | { | ||
202 | mutex_lock(&blk_tree_mutex); | ||
203 | debugfs_remove(dir); | ||
204 | if (--root_users == 0) | ||
205 | blk_remove_root(); | ||
206 | mutex_unlock(&blk_tree_mutex); | ||
207 | } | ||
208 | |||
209 | static struct dentry *blk_create_tree(const char *blk_name) | ||
210 | { | ||
211 | struct dentry *dir = NULL; | ||
212 | int created = 0; | ||
213 | |||
214 | mutex_lock(&blk_tree_mutex); | ||
215 | |||
216 | if (!blk_tree_root) { | ||
217 | blk_tree_root = debugfs_create_dir("block", NULL); | ||
218 | if (!blk_tree_root) | ||
219 | goto err; | ||
220 | created = 1; | ||
221 | } | ||
222 | |||
223 | dir = debugfs_create_dir(blk_name, blk_tree_root); | ||
224 | if (dir) | ||
225 | root_users++; | ||
226 | else { | ||
227 | /* Delete root only if we created it */ | ||
228 | if (created) | ||
229 | blk_remove_root(); | ||
230 | } | ||
231 | |||
232 | err: | ||
233 | mutex_unlock(&blk_tree_mutex); | ||
234 | return dir; | ||
235 | } | ||
236 | 190 | ||
237 | static void blk_trace_cleanup(struct blk_trace *bt) | 191 | static void blk_trace_cleanup(struct blk_trace *bt) |
238 | { | 192 | { |
239 | relay_close(bt->rchan); | ||
240 | debugfs_remove(bt->msg_file); | 193 | debugfs_remove(bt->msg_file); |
241 | debugfs_remove(bt->dropped_file); | 194 | debugfs_remove(bt->dropped_file); |
242 | blk_remove_tree(bt->dir); | 195 | relay_close(bt->rchan); |
243 | free_percpu(bt->sequence); | 196 | free_percpu(bt->sequence); |
244 | free_percpu(bt->msg_data); | 197 | free_percpu(bt->msg_data); |
245 | kfree(bt); | 198 | kfree(bt); |
@@ -346,7 +299,18 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
346 | 299 | ||
347 | static int blk_remove_buf_file_callback(struct dentry *dentry) | 300 | static int blk_remove_buf_file_callback(struct dentry *dentry) |
348 | { | 301 | { |
302 | struct dentry *parent = dentry->d_parent; | ||
349 | debugfs_remove(dentry); | 303 | debugfs_remove(dentry); |
304 | |||
305 | /* | ||
306 | * this will fail for all but the last file, but that is ok. what we | ||
307 | * care about is the top level buts->name directory going away, when | ||
308 | * the last trace file is gone. Then we don't have to rmdir() that | ||
309 | * manually on trace stop, so it nicely solves the issue with | ||
310 | * force killing of running traces. | ||
311 | */ | ||
312 | |||
313 | debugfs_remove(parent); | ||
350 | return 0; | 314 | return 0; |
351 | } | 315 | } |
352 | 316 | ||
@@ -404,7 +368,15 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
404 | goto err; | 368 | goto err; |
405 | 369 | ||
406 | ret = -ENOENT; | 370 | ret = -ENOENT; |
407 | dir = blk_create_tree(buts->name); | 371 | |
372 | if (!blk_tree_root) { | ||
373 | blk_tree_root = debugfs_create_dir("block", NULL); | ||
374 | if (!blk_tree_root) | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | |||
378 | dir = debugfs_create_dir(buts->name, blk_tree_root); | ||
379 | |||
408 | if (!dir) | 380 | if (!dir) |
409 | goto err; | 381 | goto err; |
410 | 382 | ||
@@ -458,8 +430,6 @@ probe_err: | |||
458 | atomic_dec(&blk_probes_ref); | 430 | atomic_dec(&blk_probes_ref); |
459 | mutex_unlock(&blk_probe_mutex); | 431 | mutex_unlock(&blk_probe_mutex); |
460 | err: | 432 | err: |
461 | if (dir) | ||
462 | blk_remove_tree(dir); | ||
463 | if (bt) { | 433 | if (bt) { |
464 | if (bt->msg_file) | 434 | if (bt->msg_file) |
465 | debugfs_remove(bt->msg_file); | 435 | debugfs_remove(bt->msg_file); |
diff --git a/block/bsg.c b/block/bsg.c index d414bb5607e8..0ce8806dd0c1 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -244,7 +244,8 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) | |||
244 | * map sg_io_v4 to a request. | 244 | * map sg_io_v4 to a request. |
245 | */ | 245 | */ |
246 | static struct request * | 246 | static struct request * |
247 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) | 247 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, |
248 | u8 *sense) | ||
248 | { | 249 | { |
249 | struct request_queue *q = bd->queue; | 250 | struct request_queue *q = bd->queue; |
250 | struct request *rq, *next_rq = NULL; | 251 | struct request *rq, *next_rq = NULL; |
@@ -306,6 +307,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) | |||
306 | if (ret) | 307 | if (ret) |
307 | goto out; | 308 | goto out; |
308 | } | 309 | } |
310 | |||
311 | rq->sense = sense; | ||
312 | rq->sense_len = 0; | ||
313 | |||
309 | return rq; | 314 | return rq; |
310 | out: | 315 | out: |
311 | if (rq->cmd != rq->__cmd) | 316 | if (rq->cmd != rq->__cmd) |
@@ -348,9 +353,6 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
348 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, | 353 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
349 | struct bsg_command *bc, struct request *rq) | 354 | struct bsg_command *bc, struct request *rq) |
350 | { | 355 | { |
351 | rq->sense = bc->sense; | ||
352 | rq->sense_len = 0; | ||
353 | |||
354 | /* | 356 | /* |
355 | * add bc command to busy queue and submit rq for io | 357 | * add bc command to busy queue and submit rq for io |
356 | */ | 358 | */ |
@@ -419,7 +421,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
419 | { | 421 | { |
420 | int ret = 0; | 422 | int ret = 0; |
421 | 423 | ||
422 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | 424 | dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); |
423 | /* | 425 | /* |
424 | * fill in all the output members | 426 | * fill in all the output members |
425 | */ | 427 | */ |
@@ -635,7 +637,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
635 | /* | 637 | /* |
636 | * get a request, fill in the blanks, and add to request queue | 638 | * get a request, fill in the blanks, and add to request queue |
637 | */ | 639 | */ |
638 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); | 640 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
639 | if (IS_ERR(rq)) { | 641 | if (IS_ERR(rq)) { |
640 | ret = PTR_ERR(rq); | 642 | ret = PTR_ERR(rq); |
641 | rq = NULL; | 643 | rq = NULL; |
@@ -922,11 +924,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
922 | struct request *rq; | 924 | struct request *rq; |
923 | struct bio *bio, *bidi_bio = NULL; | 925 | struct bio *bio, *bidi_bio = NULL; |
924 | struct sg_io_v4 hdr; | 926 | struct sg_io_v4 hdr; |
927 | u8 sense[SCSI_SENSE_BUFFERSIZE]; | ||
925 | 928 | ||
926 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | 929 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) |
927 | return -EFAULT; | 930 | return -EFAULT; |
928 | 931 | ||
929 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); | 932 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
930 | if (IS_ERR(rq)) | 933 | if (IS_ERR(rq)) |
931 | return PTR_ERR(rq); | 934 | return PTR_ERR(rq); |
932 | 935 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e8525fa72823..664ebfd092ec 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -84,6 +84,11 @@ struct cfq_data { | |||
84 | */ | 84 | */ |
85 | struct cfq_rb_root service_tree; | 85 | struct cfq_rb_root service_tree; |
86 | unsigned int busy_queues; | 86 | unsigned int busy_queues; |
87 | /* | ||
88 | * Used to track any pending rt requests so we can pre-empt current | ||
89 | * non-RT cfqq in service when this value is non-zero. | ||
90 | */ | ||
91 | unsigned int busy_rt_queues; | ||
87 | 92 | ||
88 | int rq_in_driver; | 93 | int rq_in_driver; |
89 | int sync_flight; | 94 | int sync_flight; |
@@ -562,6 +567,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
562 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 567 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
563 | cfq_mark_cfqq_on_rr(cfqq); | 568 | cfq_mark_cfqq_on_rr(cfqq); |
564 | cfqd->busy_queues++; | 569 | cfqd->busy_queues++; |
570 | if (cfq_class_rt(cfqq)) | ||
571 | cfqd->busy_rt_queues++; | ||
565 | 572 | ||
566 | cfq_resort_rr_list(cfqd, cfqq); | 573 | cfq_resort_rr_list(cfqd, cfqq); |
567 | } | 574 | } |
@@ -581,6 +588,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
581 | 588 | ||
582 | BUG_ON(!cfqd->busy_queues); | 589 | BUG_ON(!cfqd->busy_queues); |
583 | cfqd->busy_queues--; | 590 | cfqd->busy_queues--; |
591 | if (cfq_class_rt(cfqq)) | ||
592 | cfqd->busy_rt_queues--; | ||
584 | } | 593 | } |
585 | 594 | ||
586 | /* | 595 | /* |
@@ -1005,6 +1014,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1005 | goto expire; | 1014 | goto expire; |
1006 | 1015 | ||
1007 | /* | 1016 | /* |
1017 | * If we have a RT cfqq waiting, then we pre-empt the current non-rt | ||
1018 | * cfqq. | ||
1019 | */ | ||
1020 | if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { | ||
1021 | /* | ||
1022 | * We simulate this as cfqq timed out so that it gets to bank | ||
1023 | * the remaining of its time slice. | ||
1024 | */ | ||
1025 | cfq_log_cfqq(cfqd, cfqq, "preempt"); | ||
1026 | cfq_slice_expired(cfqd, 1); | ||
1027 | goto new_queue; | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1008 | * The active queue has requests and isn't expired, allow it to | 1031 | * The active queue has requests and isn't expired, allow it to |
1009 | * dispatch. | 1032 | * dispatch. |
1010 | */ | 1033 | */ |
@@ -1067,6 +1090,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1067 | if (RB_EMPTY_ROOT(&cfqq->sort_list)) | 1090 | if (RB_EMPTY_ROOT(&cfqq->sort_list)) |
1068 | break; | 1091 | break; |
1069 | 1092 | ||
1093 | /* | ||
1094 | * If there is a non-empty RT cfqq waiting for current | ||
1095 | * cfqq's timeslice to complete, pre-empt this cfqq | ||
1096 | */ | ||
1097 | if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) | ||
1098 | break; | ||
1099 | |||
1070 | } while (dispatched < max_dispatch); | 1100 | } while (dispatched < max_dispatch); |
1071 | 1101 | ||
1072 | /* | 1102 | /* |
@@ -1801,6 +1831,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1801 | if (rq_is_meta(rq) && !cfqq->meta_pending) | 1831 | if (rq_is_meta(rq) && !cfqq->meta_pending) |
1802 | return 1; | 1832 | return 1; |
1803 | 1833 | ||
1834 | /* | ||
1835 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. | ||
1836 | */ | ||
1837 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) | ||
1838 | return 1; | ||
1839 | |||
1804 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) | 1840 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) |
1805 | return 0; | 1841 | return 0; |
1806 | 1842 | ||
@@ -1870,7 +1906,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1870 | /* | 1906 | /* |
1871 | * not the active queue - expire current slice if it is | 1907 | * not the active queue - expire current slice if it is |
1872 | * idle and has expired it's mean thinktime or this new queue | 1908 | * idle and has expired it's mean thinktime or this new queue |
1873 | * has some old slice time left and is of higher priority | 1909 | * has some old slice time left and is of higher priority or |
1910 | * this new queue is RT and the current one is BE | ||
1874 | */ | 1911 | */ |
1875 | cfq_preempt_queue(cfqd, cfqq); | 1912 | cfq_preempt_queue(cfqd, cfqq); |
1876 | cfq_mark_cfqq_must_dispatch(cfqq); | 1913 | cfq_mark_cfqq_must_dispatch(cfqq); |
diff --git a/block/genhd.c b/block/genhd.c index 397960cf26af..a9ec910974c1 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset) | |||
256 | } | 256 | } |
257 | #endif /* CONFIG_PROC_FS */ | 257 | #endif /* CONFIG_PROC_FS */ |
258 | 258 | ||
259 | /** | ||
260 | * register_blkdev - register a new block device | ||
261 | * | ||
262 | * @major: the requested major device number [1..255]. If @major=0, try to | ||
263 | * allocate any unused major number. | ||
264 | * @name: the name of the new block device as a zero terminated string | ||
265 | * | ||
266 | * The @name must be unique within the system. | ||
267 | * | ||
268 | * The return value depends on the @major input parameter. | ||
269 | * - if a major device number was requested in range [1..255] then the | ||
270 | * function returns zero on success, or a negative error code | ||
271 | * - if any unused major number was requested with @major=0 parameter | ||
272 | * then the return value is the allocated major number in range | ||
273 | * [1..255] or a negative error code otherwise | ||
274 | */ | ||
259 | int register_blkdev(unsigned int major, const char *name) | 275 | int register_blkdev(unsigned int major, const char *name) |
260 | { | 276 | { |
261 | struct blk_major_name **n, *p; | 277 | struct blk_major_name **n, *p; |
@@ -1087,6 +1103,14 @@ dev_t blk_lookup_devt(const char *name, int partno) | |||
1087 | if (strcmp(dev_name(dev), name)) | 1103 | if (strcmp(dev_name(dev), name)) |
1088 | continue; | 1104 | continue; |
1089 | 1105 | ||
1106 | if (partno < disk->minors) { | ||
1107 | /* We need to return the right devno, even | ||
1108 | * if the partition doesn't exist yet. | ||
1109 | */ | ||
1110 | devt = MKDEV(MAJOR(dev->devt), | ||
1111 | MINOR(dev->devt) + partno); | ||
1112 | break; | ||
1113 | } | ||
1090 | part = disk_get_part(disk, partno); | 1114 | part = disk_get_part(disk, partno); |
1091 | if (part) { | 1115 | if (part) { |
1092 | devt = part_devt(part); | 1116 | devt = part_devt(part); |