diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2009-09-12 07:02:26 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-12 07:02:26 -0400 |
| commit | ddd559b13f6d2fe3ad68c4b3f5235fd3c2eae4e3 (patch) | |
| tree | d827bca3fc825a0ac33efbcd493713be40fcc812 /block | |
| parent | cf7a2b4fb6a9b86779930a0a123b0df41aa9208f (diff) | |
| parent | f17a1f06d2fa93f4825be572622eb02c4894db4e (diff) | |
Merge branch 'devel-stable' into devel
Conflicts:
MAINTAINERS
arch/arm/mm/fault.c
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-core.c | 19 | ||||
| -rw-r--r-- | block/blk-integrity.c | 1 | ||||
| -rw-r--r-- | block/blk-settings.c | 7 | ||||
| -rw-r--r-- | block/blk-sysfs.c | 11 | ||||
| -rw-r--r-- | block/elevator.c | 13 |
5 files changed, 29 insertions, 22 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 4b45435c6eaf..e3299a77a0d8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
| 575 | return NULL; | 575 | return NULL; |
| 576 | } | 576 | } |
| 577 | 577 | ||
| 578 | /* | ||
| 579 | * if caller didn't supply a lock, they get per-queue locking with | ||
| 580 | * our embedded lock | ||
| 581 | */ | ||
| 582 | if (!lock) | ||
| 583 | lock = &q->__queue_lock; | ||
| 584 | |||
| 585 | q->request_fn = rfn; | 578 | q->request_fn = rfn; |
| 586 | q->prep_rq_fn = NULL; | 579 | q->prep_rq_fn = NULL; |
| 587 | q->unplug_fn = generic_unplug_device; | 580 | q->unplug_fn = generic_unplug_device; |
| @@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | |||
| 2143 | { | 2136 | { |
| 2144 | return blk_end_bidi_request(rq, error, nr_bytes, 0); | 2137 | return blk_end_bidi_request(rq, error, nr_bytes, 0); |
| 2145 | } | 2138 | } |
| 2146 | EXPORT_SYMBOL_GPL(blk_end_request); | 2139 | EXPORT_SYMBOL(blk_end_request); |
| 2147 | 2140 | ||
| 2148 | /** | 2141 | /** |
| 2149 | * blk_end_request_all - Helper function for drives to finish the request. | 2142 | * blk_end_request_all - Helper function for drives to finish the request. |
| @@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error) | |||
| 2164 | pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 2157 | pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); |
| 2165 | BUG_ON(pending); | 2158 | BUG_ON(pending); |
| 2166 | } | 2159 | } |
| 2167 | EXPORT_SYMBOL_GPL(blk_end_request_all); | 2160 | EXPORT_SYMBOL(blk_end_request_all); |
| 2168 | 2161 | ||
| 2169 | /** | 2162 | /** |
| 2170 | * blk_end_request_cur - Helper function to finish the current request chunk. | 2163 | * blk_end_request_cur - Helper function to finish the current request chunk. |
| @@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error) | |||
| 2182 | { | 2175 | { |
| 2183 | return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 2176 | return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); |
| 2184 | } | 2177 | } |
| 2185 | EXPORT_SYMBOL_GPL(blk_end_request_cur); | 2178 | EXPORT_SYMBOL(blk_end_request_cur); |
| 2186 | 2179 | ||
| 2187 | /** | 2180 | /** |
| 2188 | * __blk_end_request - Helper function for drivers to complete the request. | 2181 | * __blk_end_request - Helper function for drivers to complete the request. |
| @@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | |||
| 2201 | { | 2194 | { |
| 2202 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); | 2195 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); |
| 2203 | } | 2196 | } |
| 2204 | EXPORT_SYMBOL_GPL(__blk_end_request); | 2197 | EXPORT_SYMBOL(__blk_end_request); |
| 2205 | 2198 | ||
| 2206 | /** | 2199 | /** |
| 2207 | * __blk_end_request_all - Helper function for drives to finish the request. | 2200 | * __blk_end_request_all - Helper function for drives to finish the request. |
| @@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error) | |||
| 2222 | pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 2215 | pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); |
| 2223 | BUG_ON(pending); | 2216 | BUG_ON(pending); |
| 2224 | } | 2217 | } |
| 2225 | EXPORT_SYMBOL_GPL(__blk_end_request_all); | 2218 | EXPORT_SYMBOL(__blk_end_request_all); |
| 2226 | 2219 | ||
| 2227 | /** | 2220 | /** |
| 2228 | * __blk_end_request_cur - Helper function to finish the current request chunk. | 2221 | * __blk_end_request_cur - Helper function to finish the current request chunk. |
| @@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error) | |||
| 2241 | { | 2234 | { |
| 2242 | return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 2235 | return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); |
| 2243 | } | 2236 | } |
| 2244 | EXPORT_SYMBOL_GPL(__blk_end_request_cur); | 2237 | EXPORT_SYMBOL(__blk_end_request_cur); |
| 2245 | 2238 | ||
| 2246 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2239 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
| 2247 | struct bio *bio) | 2240 | struct bio *bio) |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 73e28d355688..15c630813b1c 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
| @@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk) | |||
| 379 | 379 | ||
| 380 | kobject_uevent(&bi->kobj, KOBJ_REMOVE); | 380 | kobject_uevent(&bi->kobj, KOBJ_REMOVE); |
| 381 | kobject_del(&bi->kobj); | 381 | kobject_del(&bi->kobj); |
| 382 | kobject_put(&bi->kobj); | ||
| 382 | kmem_cache_free(integrity_cachep, bi); | 383 | kmem_cache_free(integrity_cachep, bi); |
| 383 | disk->integrity = NULL; | 384 | disk->integrity = NULL; |
| 384 | } | 385 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index bd582a7f5310..8a3ea3bba10d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -165,6 +165,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
| 165 | blk_set_default_limits(&q->limits); | 165 | blk_set_default_limits(&q->limits); |
| 166 | 166 | ||
| 167 | /* | 167 | /* |
| 168 | * If the caller didn't supply a lock, fall back to our embedded | ||
| 169 | * per-queue locks | ||
| 170 | */ | ||
| 171 | if (!q->queue_lock) | ||
| 172 | q->queue_lock = &q->__queue_lock; | ||
| 173 | |||
| 174 | /* | ||
| 168 | * by default assume old behaviour and bounce for any highmem page | 175 | * by default assume old behaviour and bounce for any highmem page |
| 169 | */ | 176 | */ |
| 170 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 177 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b1cd04087d6a..418d63619680 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -16,9 +16,9 @@ struct queue_sysfs_entry { | |||
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | static ssize_t | 18 | static ssize_t |
| 19 | queue_var_show(unsigned int var, char *page) | 19 | queue_var_show(unsigned long var, char *page) |
| 20 | { | 20 | { |
| 21 | return sprintf(page, "%d\n", var); | 21 | return sprintf(page, "%lu\n", var); |
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | static ssize_t | 24 | static ssize_t |
| @@ -77,7 +77,8 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
| 77 | 77 | ||
| 78 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | 78 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
| 79 | { | 79 | { |
| 80 | int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | 80 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
| 81 | (PAGE_CACHE_SHIFT - 10); | ||
| 81 | 82 | ||
| 82 | return queue_var_show(ra_kb, (page)); | 83 | return queue_var_show(ra_kb, (page)); |
| 83 | } | 84 | } |
| @@ -189,9 +190,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |||
| 189 | 190 | ||
| 190 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) | 191 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
| 191 | { | 192 | { |
| 192 | unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); | 193 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
| 193 | 194 | ||
| 194 | return queue_var_show(set != 0, page); | 195 | return queue_var_show(set, page); |
| 195 | } | 196 | } |
| 196 | 197 | ||
| 197 | static ssize_t | 198 | static ssize_t |
diff --git a/block/elevator.c b/block/elevator.c index 6f2375339a99..2d511f9105e1 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -101,11 +101,16 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
| 101 | return 0; | 101 | return 0; |
| 102 | 102 | ||
| 103 | /* | 103 | /* |
| 104 | * Don't merge if failfast settings don't match | 104 | * Don't merge if failfast settings don't match. |
| 105 | * | ||
| 106 | * FIXME: The negation in front of each condition is necessary | ||
| 107 | * because bio and request flags use different bit positions | ||
| 108 | * and the accessors return those bits directly. This | ||
| 109 | * ugliness will soon go away. | ||
| 105 | */ | 110 | */ |
| 106 | if (bio_failfast_dev(bio) != blk_failfast_dev(rq) || | 111 | if (!bio_failfast_dev(bio) != !blk_failfast_dev(rq) || |
| 107 | bio_failfast_transport(bio) != blk_failfast_transport(rq) || | 112 | !bio_failfast_transport(bio) != !blk_failfast_transport(rq) || |
| 108 | bio_failfast_driver(bio) != blk_failfast_driver(rq)) | 113 | !bio_failfast_driver(bio) != !blk_failfast_driver(rq)) |
| 109 | return 0; | 114 | return 0; |
| 110 | 115 | ||
| 111 | if (!elv_iosched_allow_merge(rq, bio)) | 116 | if (!elv_iosched_allow_merge(rq, bio)) |
