aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 01:41:02 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 01:45:31 -0400
commit384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch)
tree04c93f391a1b65c8bf8d7ba8643c07d26c26590a /block
parenta76761b621bcd8336065c4fe3a74f046858bc34c (diff)
parent142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff)
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig11
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-integrity.c1
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c84
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c13
-rw-r--r--block/scsi_ioctl.c1
9 files changed, 94 insertions, 54 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 95a86adc33a1..9be0b56eaee1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -48,9 +48,9 @@ config LBDAF
48 If unsure, say Y. 48 If unsure, say Y.
49 49
50config BLK_DEV_BSG 50config BLK_DEV_BSG
51 bool "Block layer SG support v4 (EXPERIMENTAL)" 51 bool "Block layer SG support v4"
52 depends on EXPERIMENTAL 52 default y
53 ---help--- 53 help
54 Saying Y here will enable generic SG (SCSI generic) v4 support 54 Saying Y here will enable generic SG (SCSI generic) v4 support
55 for any block device. 55 for any block device.
56 56
@@ -60,7 +60,10 @@ config BLK_DEV_BSG
60 protocols (e.g. Task Management Functions and SMP in Serial 60 protocols (e.g. Task Management Functions and SMP in Serial
61 Attached SCSI). 61 Attached SCSI).
62 62
63 If unsure, say N. 63 This option is required by recent UDEV versions to properly
64 access device serial numbers, etc.
65
66 If unsure, say Y.
64 67
65config BLK_DEV_INTEGRITY 68config BLK_DEV_INTEGRITY
66 bool "Block layer data integrity support" 69 bool "Block layer data integrity support"
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435c6eaf..e3299a77a0d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
575 return NULL; 575 return NULL;
576 } 576 }
577 577
578 /*
579 * if caller didn't supply a lock, they get per-queue locking with
580 * our embedded lock
581 */
582 if (!lock)
583 lock = &q->__queue_lock;
584
585 q->request_fn = rfn; 578 q->request_fn = rfn;
586 q->prep_rq_fn = NULL; 579 q->prep_rq_fn = NULL;
587 q->unplug_fn = generic_unplug_device; 580 q->unplug_fn = generic_unplug_device;
@@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2143{ 2136{
2144 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2137 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2145} 2138}
2146EXPORT_SYMBOL_GPL(blk_end_request); 2139EXPORT_SYMBOL(blk_end_request);
2147 2140
2148/** 2141/**
2149 * blk_end_request_all - Helper function for drives to finish the request. 2142 * blk_end_request_all - Helper function for drives to finish the request.
@@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error)
2164 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2157 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2165 BUG_ON(pending); 2158 BUG_ON(pending);
2166} 2159}
2167EXPORT_SYMBOL_GPL(blk_end_request_all); 2160EXPORT_SYMBOL(blk_end_request_all);
2168 2161
2169/** 2162/**
2170 * blk_end_request_cur - Helper function to finish the current request chunk. 2163 * blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error)
2182{ 2175{
2183 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2176 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2184} 2177}
2185EXPORT_SYMBOL_GPL(blk_end_request_cur); 2178EXPORT_SYMBOL(blk_end_request_cur);
2186 2179
2187/** 2180/**
2188 * __blk_end_request - Helper function for drivers to complete the request. 2181 * __blk_end_request - Helper function for drivers to complete the request.
@@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2201{ 2194{
2202 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2195 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2203} 2196}
2204EXPORT_SYMBOL_GPL(__blk_end_request); 2197EXPORT_SYMBOL(__blk_end_request);
2205 2198
2206/** 2199/**
2207 * __blk_end_request_all - Helper function for drives to finish the request. 2200 * __blk_end_request_all - Helper function for drives to finish the request.
@@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error)
2222 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2215 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2223 BUG_ON(pending); 2216 BUG_ON(pending);
2224} 2217}
2225EXPORT_SYMBOL_GPL(__blk_end_request_all); 2218EXPORT_SYMBOL(__blk_end_request_all);
2226 2219
2227/** 2220/**
2228 * __blk_end_request_cur - Helper function to finish the current request chunk. 2221 * __blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error)
2241{ 2234{
2242 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2235 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2243} 2236}
2244EXPORT_SYMBOL_GPL(__blk_end_request_cur); 2237EXPORT_SYMBOL(__blk_end_request_cur);
2245 2238
2246void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2239void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2247 struct bio *bio) 2240 struct bio *bio)
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 73e28d355688..15c630813b1c 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk)
379 379
380 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 380 kobject_uevent(&bi->kobj, KOBJ_REMOVE);
381 kobject_del(&bi->kobj); 381 kobject_del(&bi->kobj);
382 kobject_put(&bi->kobj);
382 kmem_cache_free(integrity_cachep, bi); 383 kmem_cache_free(integrity_cachep, bi);
383 disk->integrity = NULL; 384 disk->integrity = NULL;
384} 385}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba6..e1999679a4d5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
350 if (blk_integrity_rq(req) != blk_integrity_rq(next)) 350 if (blk_integrity_rq(req) != blk_integrity_rq(next))
351 return 0; 351 return 0;
352 352
353 /* don't merge requests of different failfast settings */
354 if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
355 blk_failfast_transport(req) != blk_failfast_transport(next) ||
356 blk_failfast_driver(req) != blk_failfast_driver(next))
357 return 0;
358
353 /* 359 /*
354 * If we are allowed to merge, then append bio list 360 * If we are allowed to merge, then append bio list
355 * from next to rq and release next. merge_requests_fn 361 * from next to rq and release next. merge_requests_fn
diff --git a/block/blk-settings.c b/block/blk-settings.c
index bd582a7f5310..476d87065073 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,6 +7,7 @@
7#include <linux/bio.h> 7#include <linux/bio.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h>
10 11
11#include "blk.h" 12#include "blk.h"
12 13
@@ -165,6 +166,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
165 blk_set_default_limits(&q->limits); 166 blk_set_default_limits(&q->limits);
166 167
167 /* 168 /*
169 * If the caller didn't supply a lock, fall back to our embedded
170 * per-queue locks
171 */
172 if (!q->queue_lock)
173 q->queue_lock = &q->__queue_lock;
174
175 /*
168 * by default assume old behaviour and bounce for any highmem page 176 * by default assume old behaviour and bounce for any highmem page
169 */ 177 */
170 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 178 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
@@ -377,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
377EXPORT_SYMBOL(blk_queue_alignment_offset); 385EXPORT_SYMBOL(blk_queue_alignment_offset);
378 386
379/** 387/**
380 * blk_queue_io_min - set minimum request size for the queue 388 * blk_limits_io_min - set minimum request size for a device
381 * @q: the request queue for the device 389 * @limits: the queue limits
382 * @min: smallest I/O size in bytes 390 * @min: smallest I/O size in bytes
383 * 391 *
384 * Description: 392 * Description:
@@ -387,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset);
387 * smallest I/O the device can perform without incurring a performance 395 * smallest I/O the device can perform without incurring a performance
388 * penalty. 396 * penalty.
389 */ 397 */
390void blk_queue_io_min(struct request_queue *q, unsigned int min) 398void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
391{ 399{
392 q->limits.io_min = min; 400 limits->io_min = min;
393 401
394 if (q->limits.io_min < q->limits.logical_block_size) 402 if (limits->io_min < limits->logical_block_size)
395 q->limits.io_min = q->limits.logical_block_size; 403 limits->io_min = limits->logical_block_size;
396 404
397 if (q->limits.io_min < q->limits.physical_block_size) 405 if (limits->io_min < limits->physical_block_size)
398 q->limits.io_min = q->limits.physical_block_size; 406 limits->io_min = limits->physical_block_size;
407}
408EXPORT_SYMBOL(blk_limits_io_min);
409
410/**
411 * blk_queue_io_min - set minimum request size for the queue
412 * @q: the request queue for the device
413 * @min: smallest I/O size in bytes
414 *
415 * Description:
416 * Storage devices may report a granularity or preferred minimum I/O
417 * size which is the smallest request the device can perform without
418 * incurring a performance penalty. For disk drives this is often the
419 * physical block size. For RAID arrays it is often the stripe chunk
420 * size. A properly aligned multiple of minimum_io_size is the
421 * preferred request size for workloads where a high number of I/O
422 * operations is desired.
423 */
424void blk_queue_io_min(struct request_queue *q, unsigned int min)
425{
426 blk_limits_io_min(&q->limits, min);
399} 427}
400EXPORT_SYMBOL(blk_queue_io_min); 428EXPORT_SYMBOL(blk_queue_io_min);
401 429
@@ -405,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min);
405 * @opt: optimal request size in bytes 433 * @opt: optimal request size in bytes
406 * 434 *
407 * Description: 435 * Description:
408 * Drivers can call this function to set the preferred I/O request 436 * Storage devices may report an optimal I/O size, which is the
409 * size for devices that report such a value. 437 * device's preferred unit for sustained I/O. This is rarely reported
438 * for disk drives. For RAID arrays it is usually the stripe width or
439 * the internal track size. A properly aligned multiple of
440 * optimal_io_size is the preferred request size for workloads where
441 * sustained throughput is desired.
410 */ 442 */
411void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 443void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
412{ 444{
@@ -426,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt);
426 **/ 458 **/
427void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 459void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
428{ 460{
429 /* zero is "infinity" */ 461 blk_stack_limits(&t->limits, &b->limits, 0);
430 t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
431 queue_max_sectors(b));
432
433 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
434 queue_max_hw_sectors(b));
435
436 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
437 queue_segment_boundary(b));
438
439 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
440 queue_max_phys_segments(b));
441
442 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
443 queue_max_hw_segments(b));
444
445 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
446 queue_max_segment_size(b));
447
448 t->limits.logical_block_size = max(queue_logical_block_size(t),
449 queue_logical_block_size(b));
450 462
451 if (!t->queue_lock) 463 if (!t->queue_lock)
452 WARN_ON_ONCE(1); 464 WARN_ON_ONCE(1);
@@ -516,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
516 return -1; 528 return -1;
517 } 529 }
518 530
531 /* Find lcm() of optimal I/O size */
532 if (t->io_opt && b->io_opt)
533 t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
534 else if (b->io_opt)
535 t->io_opt = b->io_opt;
536
537 /* Verify that optimal I/O size is a multiple of io_min */
538 if (t->io_min && t->io_opt % t->io_min)
539 return -1;
540
519 return 0; 541 return 0;
520} 542}
521EXPORT_SYMBOL(blk_stack_limits); 543EXPORT_SYMBOL(blk_stack_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b1cd04087d6a..418d63619680 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -16,9 +16,9 @@ struct queue_sysfs_entry {
16}; 16};
17 17
18static ssize_t 18static ssize_t
19queue_var_show(unsigned int var, char *page) 19queue_var_show(unsigned long var, char *page)
20{ 20{
21 return sprintf(page, "%d\n", var); 21 return sprintf(page, "%lu\n", var);
22} 22}
23 23
24static ssize_t 24static ssize_t
@@ -77,7 +77,8 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
77 77
78static ssize_t queue_ra_show(struct request_queue *q, char *page) 78static ssize_t queue_ra_show(struct request_queue *q, char *page)
79{ 79{
80 int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); 80 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
81 (PAGE_CACHE_SHIFT - 10);
81 82
82 return queue_var_show(ra_kb, (page)); 83 return queue_var_show(ra_kb, (page));
83} 84}
@@ -189,9 +190,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
189 190
190static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 191static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
191{ 192{
192 unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 193 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
193 194
194 return queue_var_show(set != 0, page); 195 return queue_var_show(set, page);
195} 196}
196 197
197static ssize_t 198static ssize_t
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 85208dd1d05b..1b2d12cda43e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2311,7 +2311,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2311 goto queue_fail; 2311 goto queue_fail;
2312 2312
2313 cfqq = cic_to_cfqq(cic, is_sync); 2313 cfqq = cic_to_cfqq(cic, is_sync);
2314 if (!cfqq) { 2314 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2315 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2315 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2316 cic_set_cfqq(cic, cfqq, is_sync); 2316 cic_set_cfqq(cic, cfqq, is_sync);
2317 } 2317 }
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..2d511f9105e1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,19 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
100 if (bio_integrity(bio) != blk_integrity_rq(rq)) 100 if (bio_integrity(bio) != blk_integrity_rq(rq))
101 return 0; 101 return 0;
102 102
103 /*
104 * Don't merge if failfast settings don't match.
105 *
106 * FIXME: The negation in front of each condition is necessary
107 * because bio and request flags use different bit positions
108 * and the accessors return those bits directly. This
109 * ugliness will soon go away.
110 */
111 if (!bio_failfast_dev(bio) != !blk_failfast_dev(rq) ||
112 !bio_failfast_transport(bio) != !blk_failfast_transport(rq) ||
113 !bio_failfast_driver(bio) != !blk_failfast_driver(rq))
114 return 0;
115
103 if (!elv_iosched_allow_merge(rq, bio)) 116 if (!elv_iosched_allow_merge(rq, bio))
104 return 0; 117 return 0;
105 118
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index f0e0ce0a607d..e5b10017a50b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -680,3 +680,4 @@ int __init blk_scsi_ioctl_init(void)
680 blk_set_cmd_filter_defaults(&blk_default_cmd_filter); 680 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
681 return 0; 681 return 0;
682} 682}
683fs_initcall(blk_scsi_ioctl_init);