aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-10-05 03:30:10 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-05 03:30:10 -0400
commit5d13379a4dba717fb75b749acc0f928c2c02db17 (patch)
treeab5bf870206e6ea8e744390cdb37cb14f7e78257 /block
parent08dc8726d4be85bca793141c827574fd32a681bb (diff)
parent374576a8b6f865022c0fd1ca62396889b23d66dd (diff)
Merge branch 'master' into for-2.6.33
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/cfq-iosched.c63
-rw-r--r--block/compat_ioctl.c13
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c17
6 files changed, 87 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ddaaea4fdffc..81f34311659a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
71 else { 71 else {
72 part_round_stats(cpu, part); 72 part_round_stats(cpu, part);
73 part_inc_in_flight(part, rw); 73 part_inc_in_flight(part);
74 } 74 }
75 75
76 part_stat_unlock(); 76 part_stat_unlock();
@@ -1032,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1032 1032
1033 if (part->in_flight) { 1033 if (part->in_flight) {
1034 __part_stat_add(cpu, part, time_in_queue, 1034 __part_stat_add(cpu, part, time_in_queue,
1035 part_in_flight(part) * (now - part->stamp)); 1035 part->in_flight * (now - part->stamp));
1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1037 } 1037 }
1038 part->stamp = now; 1038 part->stamp = now;
@@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
1739 part_stat_inc(cpu, part, ios[rw]); 1739 part_stat_inc(cpu, part, ios[rw]);
1740 part_stat_add(cpu, part, ticks[rw], duration); 1740 part_stat_add(cpu, part, ticks[rw], duration);
1741 part_round_stats(cpu, part); 1741 part_round_stats(cpu, part);
1742 part_dec_in_flight(part, rw); 1742 part_dec_in_flight(part);
1743 1743
1744 part_stat_unlock(); 1744 part_stat_unlock();
1745 } 1745 }
@@ -2492,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2492} 2492}
2493EXPORT_SYMBOL(kblockd_schedule_work); 2493EXPORT_SYMBOL(kblockd_schedule_work);
2494 2494
2495int kblockd_schedule_delayed_work(struct request_queue *q,
2496 struct delayed_work *work,
2497 unsigned long delay)
2498{
2499 return queue_delayed_work(kblockd_workqueue, work, delay);
2500}
2501EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2502
2495int __init blk_dev_init(void) 2503int __init blk_dev_init(void)
2496{ 2504{
2497 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2505 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 99cb5cf1f447..b0de8574fdc8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
352 352
353 part_round_stats(cpu, part); 353 part_round_stats(cpu, part);
354 part_dec_in_flight(part, rq_data_dir(req)); 354 part_dec_in_flight(part);
355 355
356 part_stat_unlock(); 356 part_stat_unlock();
357 } 357 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1ca813b16e78..9c4b679908f4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
150 * idle window management 150 * idle window management
151 */ 151 */
152 struct timer_list idle_slice_timer; 152 struct timer_list idle_slice_timer;
153 struct work_struct unplug_work; 153 struct delayed_work unplug_work;
154 154
155 struct cfq_queue *active_queue; 155 struct cfq_queue *active_queue;
156 struct cfq_io_context *active_cic; 156 struct cfq_io_context *active_cic;
@@ -173,6 +173,7 @@ struct cfq_data {
173 unsigned int cfq_slice[2]; 173 unsigned int cfq_slice[2];
174 unsigned int cfq_slice_async_rq; 174 unsigned int cfq_slice_async_rq;
175 unsigned int cfq_slice_idle; 175 unsigned int cfq_slice_idle;
176 unsigned int cfq_latency;
176 177
177 struct list_head cic_list; 178 struct list_head cic_list;
178 179
@@ -180,6 +181,8 @@ struct cfq_data {
180 * Fallback dummy cfqq for extreme OOM conditions 181 * Fallback dummy cfqq for extreme OOM conditions
181 */ 182 */
182 struct cfq_queue oom_cfqq; 183 struct cfq_queue oom_cfqq;
184
185 unsigned long last_end_sync_rq;
183}; 186};
184 187
185enum cfqq_state_flags { 188enum cfqq_state_flags {
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
265 * scheduler run of queue, if there are requests pending and no one in the 268 * scheduler run of queue, if there are requests pending and no one in the
266 * driver that will restart queueing 269 * driver that will restart queueing
267 */ 270 */
268static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
272 unsigned long delay)
269{ 273{
270 if (cfqd->busy_queues) { 274 if (cfqd->busy_queues) {
271 cfq_log(cfqd, "schedule dispatch"); 275 cfq_log(cfqd, "schedule dispatch");
272 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 276 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
277 delay);
273 } 278 }
274} 279}
275 280
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1326 return 0; 1331 return 0;
1327 1332
1328 /* 1333 /*
1329 * we are the only queue, allow up to 4 times of 'quantum' 1334 * Sole queue user, allow bigger slice
1330 */ 1335 */
1331 if (cfqq->dispatched >= 4 * max_dispatch) 1336 max_dispatch *= 4;
1332 return 0; 1337 }
1338
1339 /*
1340 * Async queues must wait a bit before being allowed dispatch.
1341 * We also ramp up the dispatch depth gradually for async IO,
1342 * based on the last sync IO we serviced
1343 */
1344 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1345 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1346 unsigned int depth;
1347
1348 depth = last_sync / cfqd->cfq_slice[1];
1349 if (!depth && !cfqq->dispatched)
1350 depth = 1;
1351 if (depth < max_dispatch)
1352 max_dispatch = depth;
1333 } 1353 }
1334 1354
1355 if (cfqq->dispatched >= max_dispatch)
1356 return 0;
1357
1335 /* 1358 /*
1336 * Dispatch a request from this cfqq 1359 * Dispatch a request from this cfqq
1337 */ 1360 */
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1376 1399
1377 if (unlikely(cfqd->active_queue == cfqq)) { 1400 if (unlikely(cfqd->active_queue == cfqq)) {
1378 __cfq_slice_expired(cfqd, cfqq, 0); 1401 __cfq_slice_expired(cfqd, cfqq, 0);
1379 cfq_schedule_dispatch(cfqd); 1402 cfq_schedule_dispatch(cfqd, 0);
1380 } 1403 }
1381 1404
1382 kmem_cache_free(cfq_pool, cfqq); 1405 kmem_cache_free(cfq_pool, cfqq);
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1471{ 1494{
1472 if (unlikely(cfqq == cfqd->active_queue)) { 1495 if (unlikely(cfqq == cfqd->active_queue)) {
1473 __cfq_slice_expired(cfqd, cfqq, 0); 1496 __cfq_slice_expired(cfqd, cfqq, 0);
1474 cfq_schedule_dispatch(cfqd); 1497 cfq_schedule_dispatch(cfqd, 0);
1475 } 1498 }
1476 1499
1477 cfq_put_queue(cfqq); 1500 cfq_put_queue(cfqq);
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1951 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1974 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1952 1975
1953 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1976 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1954 (cfqd->hw_tag && CIC_SEEKY(cic))) 1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
1955 enable_idle = 0; 1978 enable_idle = 0;
1956 else if (sample_valid(cic->ttime_samples)) { 1979 else if (sample_valid(cic->ttime_samples)) {
1957 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1980 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2157 if (cfq_cfqq_sync(cfqq)) 2180 if (cfq_cfqq_sync(cfqq))
2158 cfqd->sync_flight--; 2181 cfqd->sync_flight--;
2159 2182
2160 if (sync) 2183 if (sync) {
2161 RQ_CIC(rq)->last_end_request = now; 2184 RQ_CIC(rq)->last_end_request = now;
2185 cfqd->last_end_sync_rq = now;
2186 }
2162 2187
2163 /* 2188 /*
2164 * If this is the active queue, check if it needs to be expired, 2189 * If this is the active queue, check if it needs to be expired,
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2186 } 2211 }
2187 2212
2188 if (!rq_in_driver(cfqd)) 2213 if (!rq_in_driver(cfqd))
2189 cfq_schedule_dispatch(cfqd); 2214 cfq_schedule_dispatch(cfqd, 0);
2190} 2215}
2191 2216
2192/* 2217/*
@@ -2316,7 +2341,7 @@ queue_fail:
2316 if (cic) 2341 if (cic)
2317 put_io_context(cic->ioc); 2342 put_io_context(cic->ioc);
2318 2343
2319 cfq_schedule_dispatch(cfqd); 2344 cfq_schedule_dispatch(cfqd, 0);
2320 spin_unlock_irqrestore(q->queue_lock, flags); 2345 spin_unlock_irqrestore(q->queue_lock, flags);
2321 cfq_log(cfqd, "set_request fail"); 2346 cfq_log(cfqd, "set_request fail");
2322 return 1; 2347 return 1;
@@ -2325,7 +2350,7 @@ queue_fail:
2325static void cfq_kick_queue(struct work_struct *work) 2350static void cfq_kick_queue(struct work_struct *work)
2326{ 2351{
2327 struct cfq_data *cfqd = 2352 struct cfq_data *cfqd =
2328 container_of(work, struct cfq_data, unplug_work); 2353 container_of(work, struct cfq_data, unplug_work.work);
2329 struct request_queue *q = cfqd->queue; 2354 struct request_queue *q = cfqd->queue;
2330 2355
2331 spin_lock_irq(q->queue_lock); 2356 spin_lock_irq(q->queue_lock);
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2379expire: 2404expire:
2380 cfq_slice_expired(cfqd, timed_out); 2405 cfq_slice_expired(cfqd, timed_out);
2381out_kick: 2406out_kick:
2382 cfq_schedule_dispatch(cfqd); 2407 cfq_schedule_dispatch(cfqd, 0);
2383out_cont: 2408out_cont:
2384 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2385} 2410}
@@ -2387,7 +2412,7 @@ out_cont:
2387static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2412static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2388{ 2413{
2389 del_timer_sync(&cfqd->idle_slice_timer); 2414 del_timer_sync(&cfqd->idle_slice_timer);
2390 cancel_work_sync(&cfqd->unplug_work); 2415 cancel_delayed_work_sync(&cfqd->unplug_work);
2391} 2416}
2392 2417
2393static void cfq_put_async_queues(struct cfq_data *cfqd) 2418static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q)
2469 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2470 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2471 2496
2472 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2497 INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
2473 2498
2474 cfqd->cfq_quantum = cfq_quantum; 2499 cfqd->cfq_quantum = cfq_quantum;
2475 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q)
2480 cfqd->cfq_slice[1] = cfq_slice_sync; 2505 cfqd->cfq_slice[1] = cfq_slice_sync;
2481 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2506 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2482 cfqd->cfq_slice_idle = cfq_slice_idle; 2507 cfqd->cfq_slice_idle = cfq_slice_idle;
2508 cfqd->cfq_latency = 1;
2483 cfqd->hw_tag = 1; 2509 cfqd->hw_tag = 1;
2484 2510 cfqd->last_end_sync_rq = jiffies;
2485 return cfqd; 2511 return cfqd;
2486} 2512}
2487 2513
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2549SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2575SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2550SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2576SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2551SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2577SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2578SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2552#undef SHOW_FUNCTION 2579#undef SHOW_FUNCTION
2553 2580
2554#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2581#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2580STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2607STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2581STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2608STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2582 UINT_MAX, 0); 2609 UINT_MAX, 0);
2610STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2583#undef STORE_FUNCTION 2611#undef STORE_FUNCTION
2584 2612
2585#define CFQ_ATTR(name) \ 2613#define CFQ_ATTR(name) \
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = {
2595 CFQ_ATTR(slice_async), 2623 CFQ_ATTR(slice_async),
2596 CFQ_ATTR(slice_async_rq), 2624 CFQ_ATTR(slice_async_rq),
2597 CFQ_ATTR(slice_idle), 2625 CFQ_ATTR(slice_idle),
2626 CFQ_ATTR(low_latency),
2598 __ATTR_NULL 2627 __ATTR_NULL
2599}; 2628};
2600 2629
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7865a34e0faa..9bd086c1a4d5 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val)
21 return put_user(val, (compat_int_t __user *)compat_ptr(arg)); 21 return put_user(val, (compat_int_t __user *)compat_ptr(arg));
22} 22}
23 23
24static int compat_put_uint(unsigned long arg, unsigned int val)
25{
26 return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
27}
28
24static int compat_put_long(unsigned long arg, long val) 29static int compat_put_long(unsigned long arg, long val)
25{ 30{
26 return put_user(val, (compat_long_t __user *)compat_ptr(arg)); 31 return put_user(val, (compat_long_t __user *)compat_ptr(arg));
@@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
734 switch (cmd) { 739 switch (cmd) {
735 case HDIO_GETGEO: 740 case HDIO_GETGEO:
736 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); 741 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
742 case BLKPBSZGET:
743 return compat_put_uint(arg, bdev_physical_block_size(bdev));
744 case BLKIOMIN:
745 return compat_put_uint(arg, bdev_io_min(bdev));
746 case BLKIOOPT:
747 return compat_put_uint(arg, bdev_io_opt(bdev));
748 case BLKALIGNOFF:
749 return compat_put_int(arg, bdev_alignment_offset(bdev));
737 case BLKFLSBUF: 750 case BLKFLSBUF:
738 case BLKROSET: 751 case BLKROSET:
739 case BLKDISCARD: 752 case BLKDISCARD:
diff --git a/block/genhd.c b/block/genhd.c
index 517e4332cb37..5a0861da324d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
872static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
873#ifdef CONFIG_FAIL_MAKE_REQUEST 872#ifdef CONFIG_FAIL_MAKE_REQUEST
874static struct device_attribute dev_attr_fail = 873static struct device_attribute dev_attr_fail =
875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); 874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = {
889 &dev_attr_alignment_offset.attr, 888 &dev_attr_alignment_offset.attr,
890 &dev_attr_capability.attr, 889 &dev_attr_capability.attr,
891 &dev_attr_stat.attr, 890 &dev_attr_stat.attr,
892 &dev_attr_inflight.attr,
893#ifdef CONFIG_FAIL_MAKE_REQUEST 891#ifdef CONFIG_FAIL_MAKE_REQUEST
894 &dev_attr_fail.attr, 892 &dev_attr_fail.attr,
895#endif 893#endif
@@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1055 part_stat_read(hd, merges[1]), 1053 part_stat_read(hd, merges[1]),
1056 (unsigned long long)part_stat_read(hd, sectors[1]), 1054 (unsigned long long)part_stat_read(hd, sectors[1]),
1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])),
1058 part_in_flight(hd), 1056 hd->in_flight,
1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1061 ); 1059 );
diff --git a/block/ioctl.c b/block/ioctl.c
index d3e6b5827a34..1f4d1de12b09 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val)
138 return put_user(val, (int __user *)arg); 138 return put_user(val, (int __user *)arg);
139} 139}
140 140
141static int put_uint(unsigned long arg, unsigned int val)
142{
143 return put_user(val, (unsigned int __user *)arg);
144}
145
141static int put_long(unsigned long arg, long val) 146static int put_long(unsigned long arg, long val)
142{ 147{
143 return put_user(val, (long __user *)arg); 148 return put_user(val, (long __user *)arg);
@@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
263 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 268 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
264 case BLKROGET: 269 case BLKROGET:
265 return put_int(arg, bdev_read_only(bdev) != 0); 270 return put_int(arg, bdev_read_only(bdev) != 0);
266 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 271 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
267 return put_int(arg, block_size(bdev)); 272 return put_int(arg, block_size(bdev));
268 case BLKSSZGET: /* get block device hardware sector size */ 273 case BLKSSZGET: /* get block device logical block size */
269 return put_int(arg, bdev_logical_block_size(bdev)); 274 return put_int(arg, bdev_logical_block_size(bdev));
275 case BLKPBSZGET: /* get block device physical block size */
276 return put_uint(arg, bdev_physical_block_size(bdev));
277 case BLKIOMIN:
278 return put_uint(arg, bdev_io_min(bdev));
279 case BLKIOOPT:
280 return put_uint(arg, bdev_io_opt(bdev));
281 case BLKALIGNOFF:
282 return put_int(arg, bdev_alignment_offset(bdev));
270 case BLKSECTGET: 283 case BLKSECTGET:
271 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); 284 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
272 case BLKRASET: 285 case BLKRASET: