aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c45
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c34
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c63
-rw-r--r--block/compat_ioctl.c13
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c17
9 files changed, 149 insertions, 61 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6593ab39cfe9..8873b9b439ff 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
350 350
351 if (bio->bi_private) 351 if (bio->bi_private)
352 complete(bio->bi_private); 352 complete(bio->bi_private);
353 __free_page(bio_page(bio));
353 354
354 bio_put(bio); 355 bio_put(bio);
355} 356}
@@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
372 struct request_queue *q = bdev_get_queue(bdev); 373 struct request_queue *q = bdev_get_queue(bdev);
373 int type = flags & DISCARD_FL_BARRIER ? 374 int type = flags & DISCARD_FL_BARRIER ?
374 DISCARD_BARRIER : DISCARD_NOBARRIER; 375 DISCARD_BARRIER : DISCARD_NOBARRIER;
376 struct bio *bio;
377 struct page *page;
375 int ret = 0; 378 int ret = 0;
376 379
377 if (!q) 380 if (!q)
378 return -ENXIO; 381 return -ENXIO;
379 382
380 if (!q->prepare_discard_fn) 383 if (!blk_queue_discard(q))
381 return -EOPNOTSUPP; 384 return -EOPNOTSUPP;
382 385
383 while (nr_sects && !ret) { 386 while (nr_sects && !ret) {
384 struct bio *bio = bio_alloc(gfp_mask, 0); 387 unsigned int sector_size = q->limits.logical_block_size;
385 if (!bio) 388 unsigned int max_discard_sectors =
386 return -ENOMEM; 389 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
387 390
391 bio = bio_alloc(gfp_mask, 1);
392 if (!bio)
393 goto out;
394 bio->bi_sector = sector;
388 bio->bi_end_io = blkdev_discard_end_io; 395 bio->bi_end_io = blkdev_discard_end_io;
389 bio->bi_bdev = bdev; 396 bio->bi_bdev = bdev;
390 if (flags & DISCARD_FL_WAIT) 397 if (flags & DISCARD_FL_WAIT)
391 bio->bi_private = &wait; 398 bio->bi_private = &wait;
392 399
393 bio->bi_sector = sector; 400 /*
401 * Add a zeroed one-sector payload as that's what
402 * our current implementations need. If we'll ever need
403 * more the interface will need revisiting.
404 */
405 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
406 if (!page)
407 goto out_free_bio;
408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
409 goto out_free_page;
394 410
395 if (nr_sects > queue_max_hw_sectors(q)) { 411 /*
396 bio->bi_size = queue_max_hw_sectors(q) << 9; 412 * And override the bio size - the way discard works we
397 nr_sects -= queue_max_hw_sectors(q); 413 * touch many more blocks on disk than the actual payload
398 sector += queue_max_hw_sectors(q); 414 * length.
415 */
416 if (nr_sects > max_discard_sectors) {
417 bio->bi_size = max_discard_sectors << 9;
418 nr_sects -= max_discard_sectors;
419 sector += max_discard_sectors;
399 } else { 420 } else {
400 bio->bi_size = nr_sects << 9; 421 bio->bi_size = nr_sects << 9;
401 nr_sects = 0; 422 nr_sects = 0;
@@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
414 bio_put(bio); 435 bio_put(bio);
415 } 436 }
416 return ret; 437 return ret;
438out_free_page:
439 __free_page(page);
440out_free_bio:
441 bio_put(bio);
442out:
443 return -ENOMEM;
417} 444}
418EXPORT_SYMBOL(blkdev_issue_discard); 445EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-core.c b/block/blk-core.c
index 8135228e4b29..81f34311659a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -34,6 +34,7 @@
34#include "blk.h" 34#include "blk.h"
35 35
36EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); 36EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
37EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
37EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 38EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
38 39
39static int __make_request(struct request_queue *q, struct bio *bio); 40static int __make_request(struct request_queue *q, struct bio *bio);
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
69 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
70 else { 71 else {
71 part_round_stats(cpu, part); 72 part_round_stats(cpu, part);
72 part_inc_in_flight(part, rw); 73 part_inc_in_flight(part);
73 } 74 }
74 75
75 part_stat_unlock(); 76 part_stat_unlock();
@@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1031 1032
1032 if (part->in_flight) { 1033 if (part->in_flight) {
1033 __part_stat_add(cpu, part, time_in_queue, 1034 __part_stat_add(cpu, part, time_in_queue,
1034 part_in_flight(part) * (now - part->stamp)); 1035 part->in_flight * (now - part->stamp));
1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1036 } 1037 }
1037 part->stamp = now; 1038 part->stamp = now;
@@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1124 req->cmd_flags |= REQ_DISCARD; 1125 req->cmd_flags |= REQ_DISCARD;
1125 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1126 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1126 req->cmd_flags |= REQ_SOFTBARRIER; 1127 req->cmd_flags |= REQ_SOFTBARRIER;
1127 req->q->prepare_discard_fn(req->q, req);
1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) 1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1129 req->cmd_flags |= REQ_HARDBARRIER; 1129 req->cmd_flags |= REQ_HARDBARRIER;
1130 1130
@@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio)
1437 goto end_io; 1437 goto end_io;
1438 } 1438 }
1439 1439
1440 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1440 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1441 nr_sectors > queue_max_hw_sectors(q))) {
1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1442 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1442 bdevname(bio->bi_bdev, b), 1443 bdevname(bio->bi_bdev, b),
1443 bio_sectors(bio), 1444 bio_sectors(bio),
@@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio)
1470 goto end_io; 1471 goto end_io;
1471 1472
1472 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1473 if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1473 !q->prepare_discard_fn) { 1474 !blk_queue_discard(q)) {
1474 err = -EOPNOTSUPP; 1475 err = -EOPNOTSUPP;
1475 goto end_io; 1476 goto end_io;
1476 } 1477 }
@@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
1738 part_stat_inc(cpu, part, ios[rw]); 1739 part_stat_inc(cpu, part, ios[rw]);
1739 part_stat_add(cpu, part, ticks[rw], duration); 1740 part_stat_add(cpu, part, ticks[rw], duration);
1740 part_round_stats(cpu, part); 1741 part_round_stats(cpu, part);
1741 part_dec_in_flight(part, rw); 1742 part_dec_in_flight(part);
1742 1743
1743 part_stat_unlock(); 1744 part_stat_unlock();
1744 } 1745 }
@@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2491} 2492}
2492EXPORT_SYMBOL(kblockd_schedule_work); 2493EXPORT_SYMBOL(kblockd_schedule_work);
2493 2494
2495int kblockd_schedule_delayed_work(struct request_queue *q,
2496 struct delayed_work *work,
2497 unsigned long delay)
2498{
2499 return queue_delayed_work(kblockd_workqueue, work, delay);
2500}
2501EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2502
2494int __init blk_dev_init(void) 2503int __init blk_dev_init(void)
2495{ 2504{
2496 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2505 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 99cb5cf1f447..b0de8574fdc8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
352 352
353 part_round_stats(cpu, part); 353 part_round_stats(cpu, part);
354 part_dec_in_flight(part, rq_data_dir(req)); 354 part_dec_in_flight(part);
355 355
356 part_stat_unlock(); 356 part_stat_unlock();
357 } 357 }
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 83413ff83739..e0695bca7027 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -34,23 +34,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34EXPORT_SYMBOL(blk_queue_prep_rq); 34EXPORT_SYMBOL(blk_queue_prep_rq);
35 35
36/** 36/**
37 * blk_queue_set_discard - set a discard_sectors function for queue
38 * @q: queue
39 * @dfn: prepare_discard function
40 *
41 * It's possible for a queue to register a discard callback which is used
42 * to transform a discard request into the appropriate type for the
43 * hardware. If none is registered, then discard requests are failed
44 * with %EOPNOTSUPP.
45 *
46 */
47void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
48{
49 q->prepare_discard_fn = dfn;
50}
51EXPORT_SYMBOL(blk_queue_set_discard);
52
53/**
54 * blk_queue_merge_bvec - set a merge_bvec function for queue 37 * blk_queue_merge_bvec - set a merge_bvec function for queue
55 * @q: queue 38 * @q: queue
56 * @mbfn: merge_bvec_fn 39 * @mbfn: merge_bvec_fn
@@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim)
111 lim->max_hw_segments = MAX_HW_SEGMENTS; 94 lim->max_hw_segments = MAX_HW_SEGMENTS;
112 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
113 lim->max_segment_size = MAX_SEGMENT_SIZE; 96 lim->max_segment_size = MAX_SEGMENT_SIZE;
114 lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; 97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
98 lim->max_hw_sectors = INT_MAX;
99 lim->max_discard_sectors = SAFE_MAX_SECTORS;
115 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 100 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
116 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 101 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
117 lim->alignment_offset = 0; 102 lim->alignment_offset = 0;
@@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
164 q->unplug_timer.data = (unsigned long)q; 149 q->unplug_timer.data = (unsigned long)q;
165 150
166 blk_set_default_limits(&q->limits); 151 blk_set_default_limits(&q->limits);
152 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
167 153
168 /* 154 /*
169 * If the caller didn't supply a lock, fall back to our embedded 155 * If the caller didn't supply a lock, fall back to our embedded
@@ -254,6 +240,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
254EXPORT_SYMBOL(blk_queue_max_hw_sectors); 240EXPORT_SYMBOL(blk_queue_max_hw_sectors);
255 241
256/** 242/**
243 * blk_queue_max_discard_sectors - set max sectors for a single discard
244 * @q: the request queue for the device
245 * @max_discard: maximum number of sectors to discard
246 **/
247void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors)
249{
250 q->limits.max_discard_sectors = max_discard_sectors;
251}
252EXPORT_SYMBOL(blk_queue_max_discard_sectors);
253
254/**
257 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 255 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
258 * @q: the request queue for the device 256 * @q: the request queue for the device
259 * @max_segments: max number of segments 257 * @max_segments: max number of segments
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b78c9c3e2670..8a6d81afb284 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk)
452 if (ret) { 452 if (ret) {
453 kobject_uevent(&q->kobj, KOBJ_REMOVE); 453 kobject_uevent(&q->kobj, KOBJ_REMOVE);
454 kobject_del(&q->kobj); 454 kobject_del(&q->kobj);
455 blk_trace_remove_sysfs(disk_to_dev(disk));
455 return ret; 456 return ret;
456 } 457 }
457 458
@@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk)
465 if (WARN_ON(!q)) 466 if (WARN_ON(!q))
466 return; 467 return;
467 468
468 if (q->request_fn) { 469 if (q->request_fn)
469 elv_unregister_queue(q); 470 elv_unregister_queue(q);
470 471
471 kobject_uevent(&q->kobj, KOBJ_REMOVE); 472 kobject_uevent(&q->kobj, KOBJ_REMOVE);
472 kobject_del(&q->kobj); 473 kobject_del(&q->kobj);
473 kobject_put(&disk_to_dev(disk)->kobj); 474 blk_trace_remove_sysfs(disk_to_dev(disk));
474 } 475 kobject_put(&disk_to_dev(disk)->kobj);
475} 476}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1ca813b16e78..9c4b679908f4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
150 * idle window management 150 * idle window management
151 */ 151 */
152 struct timer_list idle_slice_timer; 152 struct timer_list idle_slice_timer;
153 struct work_struct unplug_work; 153 struct delayed_work unplug_work;
154 154
155 struct cfq_queue *active_queue; 155 struct cfq_queue *active_queue;
156 struct cfq_io_context *active_cic; 156 struct cfq_io_context *active_cic;
@@ -173,6 +173,7 @@ struct cfq_data {
173 unsigned int cfq_slice[2]; 173 unsigned int cfq_slice[2];
174 unsigned int cfq_slice_async_rq; 174 unsigned int cfq_slice_async_rq;
175 unsigned int cfq_slice_idle; 175 unsigned int cfq_slice_idle;
176 unsigned int cfq_latency;
176 177
177 struct list_head cic_list; 178 struct list_head cic_list;
178 179
@@ -180,6 +181,8 @@ struct cfq_data {
180 * Fallback dummy cfqq for extreme OOM conditions 181 * Fallback dummy cfqq for extreme OOM conditions
181 */ 182 */
182 struct cfq_queue oom_cfqq; 183 struct cfq_queue oom_cfqq;
184
185 unsigned long last_end_sync_rq;
183}; 186};
184 187
185enum cfqq_state_flags { 188enum cfqq_state_flags {
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
265 * scheduler run of queue, if there are requests pending and no one in the 268 * scheduler run of queue, if there are requests pending and no one in the
266 * driver that will restart queueing 269 * driver that will restart queueing
267 */ 270 */
268static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
272 unsigned long delay)
269{ 273{
270 if (cfqd->busy_queues) { 274 if (cfqd->busy_queues) {
271 cfq_log(cfqd, "schedule dispatch"); 275 cfq_log(cfqd, "schedule dispatch");
272 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 276 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
277 delay);
273 } 278 }
274} 279}
275 280
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1326 return 0; 1331 return 0;
1327 1332
1328 /* 1333 /*
1329 * we are the only queue, allow up to 4 times of 'quantum' 1334 * Sole queue user, allow bigger slice
1330 */ 1335 */
1331 if (cfqq->dispatched >= 4 * max_dispatch) 1336 max_dispatch *= 4;
1332 return 0; 1337 }
1338
1339 /*
1340 * Async queues must wait a bit before being allowed dispatch.
1341 * We also ramp up the dispatch depth gradually for async IO,
1342 * based on the last sync IO we serviced
1343 */
1344 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1345 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1346 unsigned int depth;
1347
1348 depth = last_sync / cfqd->cfq_slice[1];
1349 if (!depth && !cfqq->dispatched)
1350 depth = 1;
1351 if (depth < max_dispatch)
1352 max_dispatch = depth;
1333 } 1353 }
1334 1354
1355 if (cfqq->dispatched >= max_dispatch)
1356 return 0;
1357
1335 /* 1358 /*
1336 * Dispatch a request from this cfqq 1359 * Dispatch a request from this cfqq
1337 */ 1360 */
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1376 1399
1377 if (unlikely(cfqd->active_queue == cfqq)) { 1400 if (unlikely(cfqd->active_queue == cfqq)) {
1378 __cfq_slice_expired(cfqd, cfqq, 0); 1401 __cfq_slice_expired(cfqd, cfqq, 0);
1379 cfq_schedule_dispatch(cfqd); 1402 cfq_schedule_dispatch(cfqd, 0);
1380 } 1403 }
1381 1404
1382 kmem_cache_free(cfq_pool, cfqq); 1405 kmem_cache_free(cfq_pool, cfqq);
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1471{ 1494{
1472 if (unlikely(cfqq == cfqd->active_queue)) { 1495 if (unlikely(cfqq == cfqd->active_queue)) {
1473 __cfq_slice_expired(cfqd, cfqq, 0); 1496 __cfq_slice_expired(cfqd, cfqq, 0);
1474 cfq_schedule_dispatch(cfqd); 1497 cfq_schedule_dispatch(cfqd, 0);
1475 } 1498 }
1476 1499
1477 cfq_put_queue(cfqq); 1500 cfq_put_queue(cfqq);
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1951 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1974 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1952 1975
1953 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1976 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1954 (cfqd->hw_tag && CIC_SEEKY(cic))) 1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
1955 enable_idle = 0; 1978 enable_idle = 0;
1956 else if (sample_valid(cic->ttime_samples)) { 1979 else if (sample_valid(cic->ttime_samples)) {
1957 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1980 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2157 if (cfq_cfqq_sync(cfqq)) 2180 if (cfq_cfqq_sync(cfqq))
2158 cfqd->sync_flight--; 2181 cfqd->sync_flight--;
2159 2182
2160 if (sync) 2183 if (sync) {
2161 RQ_CIC(rq)->last_end_request = now; 2184 RQ_CIC(rq)->last_end_request = now;
2185 cfqd->last_end_sync_rq = now;
2186 }
2162 2187
2163 /* 2188 /*
2164 * If this is the active queue, check if it needs to be expired, 2189 * If this is the active queue, check if it needs to be expired,
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2186 } 2211 }
2187 2212
2188 if (!rq_in_driver(cfqd)) 2213 if (!rq_in_driver(cfqd))
2189 cfq_schedule_dispatch(cfqd); 2214 cfq_schedule_dispatch(cfqd, 0);
2190} 2215}
2191 2216
2192/* 2217/*
@@ -2316,7 +2341,7 @@ queue_fail:
2316 if (cic) 2341 if (cic)
2317 put_io_context(cic->ioc); 2342 put_io_context(cic->ioc);
2318 2343
2319 cfq_schedule_dispatch(cfqd); 2344 cfq_schedule_dispatch(cfqd, 0);
2320 spin_unlock_irqrestore(q->queue_lock, flags); 2345 spin_unlock_irqrestore(q->queue_lock, flags);
2321 cfq_log(cfqd, "set_request fail"); 2346 cfq_log(cfqd, "set_request fail");
2322 return 1; 2347 return 1;
@@ -2325,7 +2350,7 @@ queue_fail:
2325static void cfq_kick_queue(struct work_struct *work) 2350static void cfq_kick_queue(struct work_struct *work)
2326{ 2351{
2327 struct cfq_data *cfqd = 2352 struct cfq_data *cfqd =
2328 container_of(work, struct cfq_data, unplug_work); 2353 container_of(work, struct cfq_data, unplug_work.work);
2329 struct request_queue *q = cfqd->queue; 2354 struct request_queue *q = cfqd->queue;
2330 2355
2331 spin_lock_irq(q->queue_lock); 2356 spin_lock_irq(q->queue_lock);
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2379expire: 2404expire:
2380 cfq_slice_expired(cfqd, timed_out); 2405 cfq_slice_expired(cfqd, timed_out);
2381out_kick: 2406out_kick:
2382 cfq_schedule_dispatch(cfqd); 2407 cfq_schedule_dispatch(cfqd, 0);
2383out_cont: 2408out_cont:
2384 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2385} 2410}
@@ -2387,7 +2412,7 @@ out_cont:
2387static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2412static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2388{ 2413{
2389 del_timer_sync(&cfqd->idle_slice_timer); 2414 del_timer_sync(&cfqd->idle_slice_timer);
2390 cancel_work_sync(&cfqd->unplug_work); 2415 cancel_delayed_work_sync(&cfqd->unplug_work);
2391} 2416}
2392 2417
2393static void cfq_put_async_queues(struct cfq_data *cfqd) 2418static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q)
2469 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2470 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2471 2496
2472 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2497 INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
2473 2498
2474 cfqd->cfq_quantum = cfq_quantum; 2499 cfqd->cfq_quantum = cfq_quantum;
2475 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q)
2480 cfqd->cfq_slice[1] = cfq_slice_sync; 2505 cfqd->cfq_slice[1] = cfq_slice_sync;
2481 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2506 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2482 cfqd->cfq_slice_idle = cfq_slice_idle; 2507 cfqd->cfq_slice_idle = cfq_slice_idle;
2508 cfqd->cfq_latency = 1;
2483 cfqd->hw_tag = 1; 2509 cfqd->hw_tag = 1;
2484 2510 cfqd->last_end_sync_rq = jiffies;
2485 return cfqd; 2511 return cfqd;
2486} 2512}
2487 2513
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2549SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2575SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2550SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2576SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2551SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2577SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2578SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2552#undef SHOW_FUNCTION 2579#undef SHOW_FUNCTION
2553 2580
2554#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2581#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2580STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2607STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2581STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2608STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2582 UINT_MAX, 0); 2609 UINT_MAX, 0);
2610STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2583#undef STORE_FUNCTION 2611#undef STORE_FUNCTION
2584 2612
2585#define CFQ_ATTR(name) \ 2613#define CFQ_ATTR(name) \
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = {
2595 CFQ_ATTR(slice_async), 2623 CFQ_ATTR(slice_async),
2596 CFQ_ATTR(slice_async_rq), 2624 CFQ_ATTR(slice_async_rq),
2597 CFQ_ATTR(slice_idle), 2625 CFQ_ATTR(slice_idle),
2626 CFQ_ATTR(low_latency),
2598 __ATTR_NULL 2627 __ATTR_NULL
2599}; 2628};
2600 2629
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7865a34e0faa..9bd086c1a4d5 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val)
21 return put_user(val, (compat_int_t __user *)compat_ptr(arg)); 21 return put_user(val, (compat_int_t __user *)compat_ptr(arg));
22} 22}
23 23
24static int compat_put_uint(unsigned long arg, unsigned int val)
25{
26 return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
27}
28
24static int compat_put_long(unsigned long arg, long val) 29static int compat_put_long(unsigned long arg, long val)
25{ 30{
26 return put_user(val, (compat_long_t __user *)compat_ptr(arg)); 31 return put_user(val, (compat_long_t __user *)compat_ptr(arg));
@@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
734 switch (cmd) { 739 switch (cmd) {
735 case HDIO_GETGEO: 740 case HDIO_GETGEO:
736 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); 741 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
742 case BLKPBSZGET:
743 return compat_put_uint(arg, bdev_physical_block_size(bdev));
744 case BLKIOMIN:
745 return compat_put_uint(arg, bdev_io_min(bdev));
746 case BLKIOOPT:
747 return compat_put_uint(arg, bdev_io_opt(bdev));
748 case BLKALIGNOFF:
749 return compat_put_int(arg, bdev_alignment_offset(bdev));
737 case BLKFLSBUF: 750 case BLKFLSBUF:
738 case BLKROSET: 751 case BLKROSET:
739 case BLKDISCARD: 752 case BLKDISCARD:
diff --git a/block/genhd.c b/block/genhd.c
index 517e4332cb37..5a0861da324d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
872static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
873#ifdef CONFIG_FAIL_MAKE_REQUEST 872#ifdef CONFIG_FAIL_MAKE_REQUEST
874static struct device_attribute dev_attr_fail = 873static struct device_attribute dev_attr_fail =
875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); 874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = {
889 &dev_attr_alignment_offset.attr, 888 &dev_attr_alignment_offset.attr,
890 &dev_attr_capability.attr, 889 &dev_attr_capability.attr,
891 &dev_attr_stat.attr, 890 &dev_attr_stat.attr,
892 &dev_attr_inflight.attr,
893#ifdef CONFIG_FAIL_MAKE_REQUEST 891#ifdef CONFIG_FAIL_MAKE_REQUEST
894 &dev_attr_fail.attr, 892 &dev_attr_fail.attr,
895#endif 893#endif
@@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1055 part_stat_read(hd, merges[1]), 1053 part_stat_read(hd, merges[1]),
1056 (unsigned long long)part_stat_read(hd, sectors[1]), 1054 (unsigned long long)part_stat_read(hd, sectors[1]),
1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])),
1058 part_in_flight(hd), 1056 hd->in_flight,
1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1061 ); 1059 );
diff --git a/block/ioctl.c b/block/ioctl.c
index d3e6b5827a34..1f4d1de12b09 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val)
138 return put_user(val, (int __user *)arg); 138 return put_user(val, (int __user *)arg);
139} 139}
140 140
141static int put_uint(unsigned long arg, unsigned int val)
142{
143 return put_user(val, (unsigned int __user *)arg);
144}
145
141static int put_long(unsigned long arg, long val) 146static int put_long(unsigned long arg, long val)
142{ 147{
143 return put_user(val, (long __user *)arg); 148 return put_user(val, (long __user *)arg);
@@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
263 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 268 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
264 case BLKROGET: 269 case BLKROGET:
265 return put_int(arg, bdev_read_only(bdev) != 0); 270 return put_int(arg, bdev_read_only(bdev) != 0);
266 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 271 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
267 return put_int(arg, block_size(bdev)); 272 return put_int(arg, block_size(bdev));
268 case BLKSSZGET: /* get block device hardware sector size */ 273 case BLKSSZGET: /* get block device logical block size */
269 return put_int(arg, bdev_logical_block_size(bdev)); 274 return put_int(arg, bdev_logical_block_size(bdev));
275 case BLKPBSZGET: /* get block device physical block size */
276 return put_uint(arg, bdev_physical_block_size(bdev));
277 case BLKIOMIN:
278 return put_uint(arg, bdev_io_min(bdev));
279 case BLKIOOPT:
280 return put_uint(arg, bdev_io_opt(bdev));
281 case BLKALIGNOFF:
282 return put_int(arg, bdev_alignment_offset(bdev));
270 case BLKSECTGET: 283 case BLKSECTGET:
271 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); 284 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
272 case BLKRASET: 285 case BLKRASET: