diff options
119 files changed, 151 insertions, 1269 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index b9a83dd24732..2a7b38c832c7 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt | |||
@@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests. | |||
963 | 963 | ||
964 | elevator_add_req_fn* called to add a new request into the scheduler | 964 | elevator_add_req_fn* called to add a new request into the scheduler |
965 | 965 | ||
966 | elevator_queue_empty_fn returns true if the merge queue is empty. | ||
967 | Drivers shouldn't use this, but rather check | ||
968 | if elv_next_request is NULL (without losing the | ||
969 | request if one exists!) | ||
970 | |||
971 | elevator_former_req_fn | 966 | elevator_former_req_fn |
972 | elevator_latter_req_fn These return the request before or after the | 967 | elevator_latter_req_fn These return the request before or after the |
973 | one specified in disk sort order. Used by the | 968 | one specified in disk sort order. Used by the |
diff --git a/block/blk-core.c b/block/blk-core.c index 6efb55cc5af0..82a45898ba76 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
198 | } | 198 | } |
199 | EXPORT_SYMBOL(blk_dump_rq_flags); | 199 | EXPORT_SYMBOL(blk_dump_rq_flags); |
200 | 200 | ||
201 | /* | ||
202 | * Make sure that plugs that were pending when this function was entered, | ||
203 | * are now complete and requests pushed to the queue. | ||
204 | */ | ||
205 | static inline void queue_sync_plugs(struct request_queue *q) | ||
206 | { | ||
207 | /* | ||
208 | * If the current process is plugged and has barriers submitted, | ||
209 | * we will livelock if we don't unplug first. | ||
210 | */ | ||
211 | blk_flush_plug(current); | ||
212 | } | ||
213 | |||
201 | static void blk_delay_work(struct work_struct *work) | 214 | static void blk_delay_work(struct work_struct *work) |
202 | { | 215 | { |
203 | struct request_queue *q; | 216 | struct request_queue *q; |
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs) | |||
224 | } | 237 | } |
225 | EXPORT_SYMBOL(blk_delay_queue); | 238 | EXPORT_SYMBOL(blk_delay_queue); |
226 | 239 | ||
227 | /* | ||
228 | * "plug" the device if there are no outstanding requests: this will | ||
229 | * force the transfer to start only after we have put all the requests | ||
230 | * on the list. | ||
231 | * | ||
232 | * This is called with interrupts off and no requests on the queue and | ||
233 | * with the queue lock held. | ||
234 | */ | ||
235 | void blk_plug_device(struct request_queue *q) | ||
236 | { | ||
237 | WARN_ON(!irqs_disabled()); | ||
238 | |||
239 | /* | ||
240 | * don't plug a stopped queue, it must be paired with blk_start_queue() | ||
241 | * which will restart the queueing | ||
242 | */ | ||
243 | if (blk_queue_stopped(q)) | ||
244 | return; | ||
245 | |||
246 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { | ||
247 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | ||
248 | trace_block_plug(q); | ||
249 | } | ||
250 | } | ||
251 | EXPORT_SYMBOL(blk_plug_device); | ||
252 | |||
253 | /** | ||
254 | * blk_plug_device_unlocked - plug a device without queue lock held | ||
255 | * @q: The &struct request_queue to plug | ||
256 | * | ||
257 | * Description: | ||
258 | * Like @blk_plug_device(), but grabs the queue lock and disables | ||
259 | * interrupts. | ||
260 | **/ | ||
261 | void blk_plug_device_unlocked(struct request_queue *q) | ||
262 | { | ||
263 | unsigned long flags; | ||
264 | |||
265 | spin_lock_irqsave(q->queue_lock, flags); | ||
266 | blk_plug_device(q); | ||
267 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
268 | } | ||
269 | EXPORT_SYMBOL(blk_plug_device_unlocked); | ||
270 | |||
271 | /* | ||
272 | * remove the queue from the plugged list, if present. called with | ||
273 | * queue lock held and interrupts disabled. | ||
274 | */ | ||
275 | int blk_remove_plug(struct request_queue *q) | ||
276 | { | ||
277 | WARN_ON(!irqs_disabled()); | ||
278 | |||
279 | if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) | ||
280 | return 0; | ||
281 | |||
282 | del_timer(&q->unplug_timer); | ||
283 | return 1; | ||
284 | } | ||
285 | EXPORT_SYMBOL(blk_remove_plug); | ||
286 | |||
287 | /* | ||
288 | * remove the plug and let it rip.. | ||
289 | */ | ||
290 | void __generic_unplug_device(struct request_queue *q) | ||
291 | { | ||
292 | if (unlikely(blk_queue_stopped(q))) | ||
293 | return; | ||
294 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | ||
295 | return; | ||
296 | |||
297 | q->request_fn(q); | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * generic_unplug_device - fire a request queue | ||
302 | * @q: The &struct request_queue in question | ||
303 | * | ||
304 | * Description: | ||
305 | * Linux uses plugging to build bigger requests queues before letting | ||
306 | * the device have at them. If a queue is plugged, the I/O scheduler | ||
307 | * is still adding and merging requests on the queue. Once the queue | ||
308 | * gets unplugged, the request_fn defined for the queue is invoked and | ||
309 | * transfers started. | ||
310 | **/ | ||
311 | void generic_unplug_device(struct request_queue *q) | ||
312 | { | ||
313 | if (blk_queue_plugged(q)) { | ||
314 | spin_lock_irq(q->queue_lock); | ||
315 | __generic_unplug_device(q); | ||
316 | spin_unlock_irq(q->queue_lock); | ||
317 | } | ||
318 | } | ||
319 | EXPORT_SYMBOL(generic_unplug_device); | ||
320 | |||
321 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | ||
322 | struct page *page) | ||
323 | { | ||
324 | struct request_queue *q = bdi->unplug_io_data; | ||
325 | |||
326 | blk_unplug(q); | ||
327 | } | ||
328 | |||
329 | void blk_unplug_work(struct work_struct *work) | ||
330 | { | ||
331 | struct request_queue *q = | ||
332 | container_of(work, struct request_queue, unplug_work); | ||
333 | |||
334 | trace_block_unplug_io(q); | ||
335 | q->unplug_fn(q); | ||
336 | } | ||
337 | |||
338 | void blk_unplug_timeout(unsigned long data) | ||
339 | { | ||
340 | struct request_queue *q = (struct request_queue *)data; | ||
341 | |||
342 | trace_block_unplug_timer(q); | ||
343 | kblockd_schedule_work(q, &q->unplug_work); | ||
344 | } | ||
345 | |||
346 | void blk_unplug(struct request_queue *q) | ||
347 | { | ||
348 | /* | ||
349 | * devices don't necessarily have an ->unplug_fn defined | ||
350 | */ | ||
351 | if (q->unplug_fn) { | ||
352 | trace_block_unplug_io(q); | ||
353 | q->unplug_fn(q); | ||
354 | } | ||
355 | } | ||
356 | EXPORT_SYMBOL(blk_unplug); | ||
357 | |||
358 | /** | 240 | /** |
359 | * blk_start_queue - restart a previously stopped queue | 241 | * blk_start_queue - restart a previously stopped queue |
360 | * @q: The &struct request_queue in question | 242 | * @q: The &struct request_queue in question |
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue); | |||
389 | **/ | 271 | **/ |
390 | void blk_stop_queue(struct request_queue *q) | 272 | void blk_stop_queue(struct request_queue *q) |
391 | { | 273 | { |
392 | blk_remove_plug(q); | ||
393 | cancel_delayed_work(&q->delay_work); | 274 | cancel_delayed_work(&q->delay_work); |
394 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | 275 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
395 | } | 276 | } |
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
411 | */ | 292 | */ |
412 | void blk_sync_queue(struct request_queue *q) | 293 | void blk_sync_queue(struct request_queue *q) |
413 | { | 294 | { |
414 | del_timer_sync(&q->unplug_timer); | ||
415 | del_timer_sync(&q->timeout); | 295 | del_timer_sync(&q->timeout); |
416 | cancel_work_sync(&q->unplug_work); | ||
417 | throtl_shutdown_timer_wq(q); | 296 | throtl_shutdown_timer_wq(q); |
418 | cancel_delayed_work_sync(&q->delay_work); | 297 | cancel_delayed_work_sync(&q->delay_work); |
298 | queue_sync_plugs(q); | ||
419 | } | 299 | } |
420 | EXPORT_SYMBOL(blk_sync_queue); | 300 | EXPORT_SYMBOL(blk_sync_queue); |
421 | 301 | ||
@@ -430,14 +310,9 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
430 | */ | 310 | */ |
431 | void __blk_run_queue(struct request_queue *q) | 311 | void __blk_run_queue(struct request_queue *q) |
432 | { | 312 | { |
433 | blk_remove_plug(q); | ||
434 | |||
435 | if (unlikely(blk_queue_stopped(q))) | 313 | if (unlikely(blk_queue_stopped(q))) |
436 | return; | 314 | return; |
437 | 315 | ||
438 | if (elv_queue_empty(q)) | ||
439 | return; | ||
440 | |||
441 | /* | 316 | /* |
442 | * Only recurse once to avoid overrunning the stack, let the unplug | 317 | * Only recurse once to avoid overrunning the stack, let the unplug |
443 | * handling reinvoke the handler shortly if we already got there. | 318 | * handling reinvoke the handler shortly if we already got there. |
@@ -445,10 +320,8 @@ void __blk_run_queue(struct request_queue *q) | |||
445 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 320 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
446 | q->request_fn(q); | 321 | q->request_fn(q); |
447 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 322 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
448 | } else { | 323 | } else |
449 | queue_flag_set(QUEUE_FLAG_PLUGGED, q); | 324 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
450 | kblockd_schedule_work(q, &q->unplug_work); | ||
451 | } | ||
452 | } | 325 | } |
453 | EXPORT_SYMBOL(__blk_run_queue); | 326 | EXPORT_SYMBOL(__blk_run_queue); |
454 | 327 | ||
@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
535 | if (!q) | 408 | if (!q) |
536 | return NULL; | 409 | return NULL; |
537 | 410 | ||
538 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | ||
539 | q->backing_dev_info.unplug_io_data = q; | ||
540 | q->backing_dev_info.ra_pages = | 411 | q->backing_dev_info.ra_pages = |
541 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 412 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
542 | q->backing_dev_info.state = 0; | 413 | q->backing_dev_info.state = 0; |
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
556 | 427 | ||
557 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 428 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
558 | laptop_mode_timer_fn, (unsigned long) q); | 429 | laptop_mode_timer_fn, (unsigned long) q); |
559 | init_timer(&q->unplug_timer); | ||
560 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 430 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
561 | INIT_LIST_HEAD(&q->timeout_list); | 431 | INIT_LIST_HEAD(&q->timeout_list); |
562 | INIT_LIST_HEAD(&q->flush_queue[0]); | 432 | INIT_LIST_HEAD(&q->flush_queue[0]); |
563 | INIT_LIST_HEAD(&q->flush_queue[1]); | 433 | INIT_LIST_HEAD(&q->flush_queue[1]); |
564 | INIT_LIST_HEAD(&q->flush_data_in_flight); | 434 | INIT_LIST_HEAD(&q->flush_data_in_flight); |
565 | INIT_WORK(&q->unplug_work, blk_unplug_work); | ||
566 | INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); | 435 | INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); |
567 | 436 | ||
568 | kobject_init(&q->kobj, &blk_queue_ktype); | 437 | kobject_init(&q->kobj, &blk_queue_ktype); |
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
652 | q->request_fn = rfn; | 521 | q->request_fn = rfn; |
653 | q->prep_rq_fn = NULL; | 522 | q->prep_rq_fn = NULL; |
654 | q->unprep_rq_fn = NULL; | 523 | q->unprep_rq_fn = NULL; |
655 | q->unplug_fn = generic_unplug_device; | ||
656 | q->queue_flags = QUEUE_FLAG_DEFAULT; | 524 | q->queue_flags = QUEUE_FLAG_DEFAULT; |
657 | q->queue_lock = lock; | 525 | q->queue_lock = lock; |
658 | 526 | ||
@@ -910,8 +778,8 @@ out: | |||
910 | } | 778 | } |
911 | 779 | ||
912 | /* | 780 | /* |
913 | * No available requests for this queue, unplug the device and wait for some | 781 | * No available requests for this queue, wait for some requests to become |
914 | * requests to become available. | 782 | * available. |
915 | * | 783 | * |
916 | * Called with q->queue_lock held, and returns with it unlocked. | 784 | * Called with q->queue_lock held, and returns with it unlocked. |
917 | */ | 785 | */ |
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
932 | 800 | ||
933 | trace_block_sleeprq(q, bio, rw_flags & 1); | 801 | trace_block_sleeprq(q, bio, rw_flags & 1); |
934 | 802 | ||
935 | __generic_unplug_device(q); | ||
936 | spin_unlock_irq(q->queue_lock); | 803 | spin_unlock_irq(q->queue_lock); |
937 | io_schedule(); | 804 | io_schedule(); |
938 | 805 | ||
@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq, | |||
1058 | int where) | 925 | int where) |
1059 | { | 926 | { |
1060 | drive_stat_acct(rq, 1); | 927 | drive_stat_acct(rq, 1); |
1061 | __elv_add_request(q, rq, where, 0); | 928 | __elv_add_request(q, rq, where); |
1062 | } | 929 | } |
1063 | 930 | ||
1064 | /** | 931 | /** |
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug) | |||
2798 | /* | 2665 | /* |
2799 | * rq is already accounted, so use raw insert | 2666 | * rq is already accounted, so use raw insert |
2800 | */ | 2667 | */ |
2801 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0); | 2668 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT); |
2802 | } | 2669 | } |
2803 | 2670 | ||
2804 | if (q) { | 2671 | if (q) { |
diff --git a/block/blk-exec.c b/block/blk-exec.c index cf1456a02acd..81e31819a597 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
54 | rq->end_io = done; | 54 | rq->end_io = done; |
55 | WARN_ON(irqs_disabled()); | 55 | WARN_ON(irqs_disabled()); |
56 | spin_lock_irq(q->queue_lock); | 56 | spin_lock_irq(q->queue_lock); |
57 | __elv_add_request(q, rq, where, 1); | 57 | __elv_add_request(q, rq, where); |
58 | __generic_unplug_device(q); | 58 | __blk_run_queue(q); |
59 | /* the queue is stopped so it won't be plugged+unplugged */ | 59 | /* the queue is stopped so it won't be plugged+unplugged */ |
60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) | 60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) |
61 | q->request_fn(q); | 61 | q->request_fn(q); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 1e2aa8a8908c..671fa9da7560 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
194 | { | 194 | { |
195 | struct request_queue *q = flush_rq->q; | 195 | struct request_queue *q = flush_rq->q; |
196 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; | 196 | struct list_head *running = &q->flush_queue[q->flush_running_idx]; |
197 | bool was_empty = elv_queue_empty(q); | ||
198 | bool queued = false; | 197 | bool queued = false; |
199 | struct request *rq, *n; | 198 | struct request *rq, *n; |
200 | 199 | ||
@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
213 | } | 212 | } |
214 | 213 | ||
215 | /* after populating an empty queue, kick it to avoid stall */ | 214 | /* after populating an empty queue, kick it to avoid stall */ |
216 | if (queued && was_empty) | 215 | if (queued) |
217 | __blk_run_queue(q); | 216 | __blk_run_queue(q); |
218 | } | 217 | } |
219 | 218 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index 36c8c1f2af18..c8d68921dddb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
164 | blk_queue_congestion_threshold(q); | 164 | blk_queue_congestion_threshold(q); |
165 | q->nr_batching = BLK_BATCH_REQ; | 165 | q->nr_batching = BLK_BATCH_REQ; |
166 | 166 | ||
167 | q->unplug_thresh = 4; /* hmm */ | ||
168 | q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ | ||
169 | if (q->unplug_delay == 0) | ||
170 | q->unplug_delay = 1; | ||
171 | |||
172 | q->unplug_timer.function = blk_unplug_timeout; | ||
173 | q->unplug_timer.data = (unsigned long)q; | ||
174 | |||
175 | blk_set_default_limits(&q->limits); | 167 | blk_set_default_limits(&q->limits); |
176 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); | 168 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
177 | 169 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a89043a3caa4..b8dcdc2663a1 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -800,7 +800,6 @@ out: | |||
800 | if (nr_disp) { | 800 | if (nr_disp) { |
801 | while((bio = bio_list_pop(&bio_list_on_stack))) | 801 | while((bio = bio_list_pop(&bio_list_on_stack))) |
802 | generic_make_request(bio); | 802 | generic_make_request(bio); |
803 | blk_unplug(q); | ||
804 | } | 803 | } |
805 | return nr_disp; | 804 | return nr_disp; |
806 | } | 805 | } |
diff --git a/block/blk.h b/block/blk.h index 284b500852bd..49d21af81d07 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |||
18 | void blk_dequeue_request(struct request *rq); | 18 | void blk_dequeue_request(struct request *rq); |
19 | void __blk_queue_free_tags(struct request_queue *q); | 19 | void __blk_queue_free_tags(struct request_queue *q); |
20 | 20 | ||
21 | void blk_unplug_work(struct work_struct *work); | ||
22 | void blk_unplug_timeout(unsigned long data); | ||
23 | void blk_rq_timed_out_timer(unsigned long data); | 21 | void blk_rq_timed_out_timer(unsigned long data); |
24 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
25 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3202c7e87fb3..ef631539dd2a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
499 | } | 499 | } |
500 | } | 500 | } |
501 | 501 | ||
502 | static int cfq_queue_empty(struct request_queue *q) | ||
503 | { | ||
504 | struct cfq_data *cfqd = q->elevator->elevator_data; | ||
505 | |||
506 | return !cfqd->rq_queued; | ||
507 | } | ||
508 | |||
509 | /* | 502 | /* |
510 | * Scale schedule slice based on io priority. Use the sync time slice only | 503 | * Scale schedule slice based on io priority. Use the sync time slice only |
511 | * if a queue is marked sync and has sync io queued. A sync queue with async | 504 | * if a queue is marked sync and has sync io queued. A sync queue with async |
@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = { | |||
4061 | .elevator_add_req_fn = cfq_insert_request, | 4054 | .elevator_add_req_fn = cfq_insert_request, |
4062 | .elevator_activate_req_fn = cfq_activate_request, | 4055 | .elevator_activate_req_fn = cfq_activate_request, |
4063 | .elevator_deactivate_req_fn = cfq_deactivate_request, | 4056 | .elevator_deactivate_req_fn = cfq_deactivate_request, |
4064 | .elevator_queue_empty_fn = cfq_queue_empty, | ||
4065 | .elevator_completed_req_fn = cfq_completed_request, | 4057 | .elevator_completed_req_fn = cfq_completed_request, |
4066 | .elevator_former_req_fn = elv_rb_former_request, | 4058 | .elevator_former_req_fn = elv_rb_former_request, |
4067 | .elevator_latter_req_fn = elv_rb_latter_request, | 4059 | .elevator_latter_req_fn = elv_rb_latter_request, |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b547cbca7b23..5139c0ea1864 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -326,14 +326,6 @@ dispatch_request: | |||
326 | return 1; | 326 | return 1; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int deadline_queue_empty(struct request_queue *q) | ||
330 | { | ||
331 | struct deadline_data *dd = q->elevator->elevator_data; | ||
332 | |||
333 | return list_empty(&dd->fifo_list[WRITE]) | ||
334 | && list_empty(&dd->fifo_list[READ]); | ||
335 | } | ||
336 | |||
337 | static void deadline_exit_queue(struct elevator_queue *e) | 329 | static void deadline_exit_queue(struct elevator_queue *e) |
338 | { | 330 | { |
339 | struct deadline_data *dd = e->elevator_data; | 331 | struct deadline_data *dd = e->elevator_data; |
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = { | |||
445 | .elevator_merge_req_fn = deadline_merged_requests, | 437 | .elevator_merge_req_fn = deadline_merged_requests, |
446 | .elevator_dispatch_fn = deadline_dispatch_requests, | 438 | .elevator_dispatch_fn = deadline_dispatch_requests, |
447 | .elevator_add_req_fn = deadline_add_request, | 439 | .elevator_add_req_fn = deadline_add_request, |
448 | .elevator_queue_empty_fn = deadline_queue_empty, | ||
449 | .elevator_former_req_fn = elv_rb_former_request, | 440 | .elevator_former_req_fn = elv_rb_former_request, |
450 | .elevator_latter_req_fn = elv_rb_latter_request, | 441 | .elevator_latter_req_fn = elv_rb_latter_request, |
451 | .elevator_init_fn = deadline_init_queue, | 442 | .elevator_init_fn = deadline_init_queue, |
diff --git a/block/elevator.c b/block/elevator.c index 25713927c0d3..3ea208256e78 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -619,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q) | |||
619 | 619 | ||
620 | void elv_insert(struct request_queue *q, struct request *rq, int where) | 620 | void elv_insert(struct request_queue *q, struct request *rq, int where) |
621 | { | 621 | { |
622 | int unplug_it = 1; | ||
623 | |||
624 | trace_block_rq_insert(q, rq); | 622 | trace_block_rq_insert(q, rq); |
625 | 623 | ||
626 | rq->q = q; | 624 | rq->q = q; |
627 | 625 | ||
628 | switch (where) { | 626 | switch (where) { |
629 | case ELEVATOR_INSERT_REQUEUE: | 627 | case ELEVATOR_INSERT_REQUEUE: |
630 | /* | ||
631 | * Most requeues happen because of a busy condition, | ||
632 | * don't force unplug of the queue for that case. | ||
633 | * Clear unplug_it and fall through. | ||
634 | */ | ||
635 | unplug_it = 0; | ||
636 | |||
637 | case ELEVATOR_INSERT_FRONT: | 628 | case ELEVATOR_INSERT_FRONT: |
638 | rq->cmd_flags |= REQ_SOFTBARRIER; | 629 | rq->cmd_flags |= REQ_SOFTBARRIER; |
639 | list_add(&rq->queuelist, &q->queue_head); | 630 | list_add(&rq->queuelist, &q->queue_head); |
@@ -679,24 +670,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
679 | rq->cmd_flags |= REQ_SOFTBARRIER; | 670 | rq->cmd_flags |= REQ_SOFTBARRIER; |
680 | blk_insert_flush(rq); | 671 | blk_insert_flush(rq); |
681 | break; | 672 | break; |
682 | |||
683 | default: | 673 | default: |
684 | printk(KERN_ERR "%s: bad insertion point %d\n", | 674 | printk(KERN_ERR "%s: bad insertion point %d\n", |
685 | __func__, where); | 675 | __func__, where); |
686 | BUG(); | 676 | BUG(); |
687 | } | 677 | } |
688 | |||
689 | if (unplug_it && blk_queue_plugged(q)) { | ||
690 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] | ||
691 | - queue_in_flight(q); | ||
692 | |||
693 | if (nrq >= q->unplug_thresh) | ||
694 | __generic_unplug_device(q); | ||
695 | } | ||
696 | } | 678 | } |
697 | 679 | ||
698 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, | 680 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
699 | int plug) | ||
700 | { | 681 | { |
701 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); | 682 | BUG_ON(rq->cmd_flags & REQ_ON_PLUG); |
702 | 683 | ||
@@ -711,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, | |||
711 | where == ELEVATOR_INSERT_SORT) | 692 | where == ELEVATOR_INSERT_SORT) |
712 | where = ELEVATOR_INSERT_BACK; | 693 | where = ELEVATOR_INSERT_BACK; |
713 | 694 | ||
714 | if (plug) | ||
715 | blk_plug_device(q); | ||
716 | |||
717 | elv_insert(q, rq, where); | 695 | elv_insert(q, rq, where); |
718 | } | 696 | } |
719 | EXPORT_SYMBOL(__elv_add_request); | 697 | EXPORT_SYMBOL(__elv_add_request); |
720 | 698 | ||
721 | void elv_add_request(struct request_queue *q, struct request *rq, int where, | 699 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
722 | int plug) | ||
723 | { | 700 | { |
724 | unsigned long flags; | 701 | unsigned long flags; |
725 | 702 | ||
726 | spin_lock_irqsave(q->queue_lock, flags); | 703 | spin_lock_irqsave(q->queue_lock, flags); |
727 | __elv_add_request(q, rq, where, plug); | 704 | __elv_add_request(q, rq, where); |
728 | spin_unlock_irqrestore(q->queue_lock, flags); | 705 | spin_unlock_irqrestore(q->queue_lock, flags); |
729 | } | 706 | } |
730 | EXPORT_SYMBOL(elv_add_request); | 707 | EXPORT_SYMBOL(elv_add_request); |
731 | 708 | ||
732 | int elv_queue_empty(struct request_queue *q) | ||
733 | { | ||
734 | struct elevator_queue *e = q->elevator; | ||
735 | |||
736 | if (!list_empty(&q->queue_head)) | ||
737 | return 0; | ||
738 | |||
739 | if (e->ops->elevator_queue_empty_fn) | ||
740 | return e->ops->elevator_queue_empty_fn(q); | ||
741 | |||
742 | return 1; | ||
743 | } | ||
744 | EXPORT_SYMBOL(elv_queue_empty); | ||
745 | |||
746 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 709 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
747 | { | 710 | { |
748 | struct elevator_queue *e = q->elevator; | 711 | struct elevator_queue *e = q->elevator; |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 232c4b38cd37..06389e9ef96d 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq) | |||
39 | list_add_tail(&rq->queuelist, &nd->queue); | 39 | list_add_tail(&rq->queuelist, &nd->queue); |
40 | } | 40 | } |
41 | 41 | ||
42 | static int noop_queue_empty(struct request_queue *q) | ||
43 | { | ||
44 | struct noop_data *nd = q->elevator->elevator_data; | ||
45 | |||
46 | return list_empty(&nd->queue); | ||
47 | } | ||
48 | |||
49 | static struct request * | 42 | static struct request * |
50 | noop_former_request(struct request_queue *q, struct request *rq) | 43 | noop_former_request(struct request_queue *q, struct request *rq) |
51 | { | 44 | { |
@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = { | |||
90 | .elevator_merge_req_fn = noop_merged_requests, | 83 | .elevator_merge_req_fn = noop_merged_requests, |
91 | .elevator_dispatch_fn = noop_dispatch, | 84 | .elevator_dispatch_fn = noop_dispatch, |
92 | .elevator_add_req_fn = noop_add_request, | 85 | .elevator_add_req_fn = noop_add_request, |
93 | .elevator_queue_empty_fn = noop_queue_empty, | ||
94 | .elevator_former_req_fn = noop_former_request, | 86 | .elevator_former_req_fn = noop_former_request, |
95 | .elevator_latter_req_fn = noop_latter_request, | 87 | .elevator_latter_req_fn = noop_latter_request, |
96 | .elevator_init_fn = noop_init_queue, | 88 | .elevator_init_fn = noop_init_queue, |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9279272b3732..35658f445fca 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q) | |||
3170 | int sg_index = 0; | 3170 | int sg_index = 0; |
3171 | int chained = 0; | 3171 | int chained = 0; |
3172 | 3172 | ||
3173 | /* We call start_io here in case there is a command waiting on the | ||
3174 | * queue that has not been sent. | ||
3175 | */ | ||
3176 | if (blk_queue_plugged(q)) | ||
3177 | goto startio; | ||
3178 | |||
3179 | queue: | 3173 | queue: |
3180 | creq = blk_peek_request(q); | 3174 | creq = blk_peek_request(q); |
3181 | if (!creq) | 3175 | if (!creq) |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 946dad4caef3..b2fceb53e809 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q) | |||
911 | struct scatterlist tmp_sg[SG_MAX]; | 911 | struct scatterlist tmp_sg[SG_MAX]; |
912 | int i, dir, seg; | 912 | int i, dir, seg; |
913 | 913 | ||
914 | if (blk_queue_plugged(q)) | ||
915 | goto startio; | ||
916 | |||
917 | queue_next: | 914 | queue_next: |
918 | creq = blk_peek_request(q); | 915 | creq = blk_peek_request(q); |
919 | if (!creq) | 916 | if (!creq) |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index ba95cba192be..2096628d6e65 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | |||
689 | } | 689 | } |
690 | } | 690 | } |
691 | 691 | ||
692 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
693 | |||
694 | /* always (try to) flush bitmap to stable storage */ | 692 | /* always (try to) flush bitmap to stable storage */ |
695 | drbd_md_flush(mdev); | 693 | drbd_md_flush(mdev); |
696 | 694 | ||
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index fd42832f785b..0645ca829a94 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | |||
840 | for (i = 0; i < num_pages; i++) | 840 | for (i = 0; i < num_pages; i++) |
841 | bm_page_io_async(mdev, b, i, rw); | 841 | bm_page_io_async(mdev, b, i, rw); |
842 | 842 | ||
843 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
844 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); | 843 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); |
845 | 844 | ||
846 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { | 845 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 3803a0348937..0b5718e19586 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev) | |||
2382 | return QUEUE_ORDERED_NONE; | 2382 | return QUEUE_ORDERED_NONE; |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static inline void drbd_blk_run_queue(struct request_queue *q) | ||
2386 | { | ||
2387 | if (q && q->unplug_fn) | ||
2388 | q->unplug_fn(q); | ||
2389 | } | ||
2390 | |||
2391 | static inline void drbd_kick_lo(struct drbd_conf *mdev) | ||
2392 | { | ||
2393 | if (get_ldev(mdev)) { | ||
2394 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev)); | ||
2395 | put_ldev(mdev); | ||
2396 | } | ||
2397 | } | ||
2398 | |||
2399 | static inline void drbd_md_flush(struct drbd_conf *mdev) | 2385 | static inline void drbd_md_flush(struct drbd_conf *mdev) |
2400 | { | 2386 | { |
2401 | int r; | 2387 | int r; |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 29cd0dc9fe4f..6049cb85310d 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2719,35 +2719,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode) | |||
2719 | return 0; | 2719 | return 0; |
2720 | } | 2720 | } |
2721 | 2721 | ||
2722 | static void drbd_unplug_fn(struct request_queue *q) | ||
2723 | { | ||
2724 | struct drbd_conf *mdev = q->queuedata; | ||
2725 | |||
2726 | /* unplug FIRST */ | ||
2727 | spin_lock_irq(q->queue_lock); | ||
2728 | blk_remove_plug(q); | ||
2729 | spin_unlock_irq(q->queue_lock); | ||
2730 | |||
2731 | /* only if connected */ | ||
2732 | spin_lock_irq(&mdev->req_lock); | ||
2733 | if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) { | ||
2734 | D_ASSERT(mdev->state.role == R_PRIMARY); | ||
2735 | if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) { | ||
2736 | /* add to the data.work queue, | ||
2737 | * unless already queued. | ||
2738 | * XXX this might be a good addition to drbd_queue_work | ||
2739 | * anyways, to detect "double queuing" ... */ | ||
2740 | if (list_empty(&mdev->unplug_work.list)) | ||
2741 | drbd_queue_work(&mdev->data.work, | ||
2742 | &mdev->unplug_work); | ||
2743 | } | ||
2744 | } | ||
2745 | spin_unlock_irq(&mdev->req_lock); | ||
2746 | |||
2747 | if (mdev->state.disk >= D_INCONSISTENT) | ||
2748 | drbd_kick_lo(mdev); | ||
2749 | } | ||
2750 | |||
2751 | static void drbd_set_defaults(struct drbd_conf *mdev) | 2722 | static void drbd_set_defaults(struct drbd_conf *mdev) |
2752 | { | 2723 | { |
2753 | /* This way we get a compile error when sync_conf grows, | 2724 | /* This way we get a compile error when sync_conf grows, |
@@ -3222,9 +3193,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3222 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | 3193 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); |
3223 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3194 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3224 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3195 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3225 | q->queue_lock = &mdev->req_lock; /* needed since we use */ | 3196 | q->queue_lock = &mdev->req_lock; |
3226 | /* plugging on a queue, that actually has no requests! */ | ||
3227 | q->unplug_fn = drbd_unplug_fn; | ||
3228 | 3197 | ||
3229 | mdev->md_io_page = alloc_page(GFP_KERNEL); | 3198 | mdev->md_io_page = alloc_page(GFP_KERNEL); |
3230 | if (!mdev->md_io_page) | 3199 | if (!mdev->md_io_page) |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 24487d4fb202..84132f8bf8a4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int | |||
187 | return NULL; | 187 | return NULL; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* kick lower level device, if we have more than (arbitrary number) | ||
191 | * reference counts on it, which typically are locally submitted io | ||
192 | * requests. don't use unacked_cnt, so we speed up proto A and B, too. */ | ||
193 | static void maybe_kick_lo(struct drbd_conf *mdev) | ||
194 | { | ||
195 | if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark) | ||
196 | drbd_kick_lo(mdev); | ||
197 | } | ||
198 | |||
199 | static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) | 190 | static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) |
200 | { | 191 | { |
201 | struct drbd_epoch_entry *e; | 192 | struct drbd_epoch_entry *e; |
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) | |||
219 | LIST_HEAD(reclaimed); | 210 | LIST_HEAD(reclaimed); |
220 | struct drbd_epoch_entry *e, *t; | 211 | struct drbd_epoch_entry *e, *t; |
221 | 212 | ||
222 | maybe_kick_lo(mdev); | ||
223 | spin_lock_irq(&mdev->req_lock); | 213 | spin_lock_irq(&mdev->req_lock); |
224 | reclaim_net_ee(mdev, &reclaimed); | 214 | reclaim_net_ee(mdev, &reclaimed); |
225 | spin_unlock_irq(&mdev->req_lock); | 215 | spin_unlock_irq(&mdev->req_lock); |
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) | |||
436 | while (!list_empty(head)) { | 426 | while (!list_empty(head)) { |
437 | prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); | 427 | prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); |
438 | spin_unlock_irq(&mdev->req_lock); | 428 | spin_unlock_irq(&mdev->req_lock); |
439 | drbd_kick_lo(mdev); | 429 | io_schedule(); |
440 | schedule(); | ||
441 | finish_wait(&mdev->ee_wait, &wait); | 430 | finish_wait(&mdev->ee_wait, &wait); |
442 | spin_lock_irq(&mdev->req_lock); | 431 | spin_lock_irq(&mdev->req_lock); |
443 | } | 432 | } |
@@ -1147,7 +1136,6 @@ next_bio: | |||
1147 | 1136 | ||
1148 | drbd_generic_make_request(mdev, fault_type, bio); | 1137 | drbd_generic_make_request(mdev, fault_type, bio); |
1149 | } while (bios); | 1138 | } while (bios); |
1150 | maybe_kick_lo(mdev); | ||
1151 | return 0; | 1139 | return 0; |
1152 | 1140 | ||
1153 | fail: | 1141 | fail: |
@@ -1167,9 +1155,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1167 | 1155 | ||
1168 | inc_unacked(mdev); | 1156 | inc_unacked(mdev); |
1169 | 1157 | ||
1170 | if (mdev->net_conf->wire_protocol != DRBD_PROT_C) | ||
1171 | drbd_kick_lo(mdev); | ||
1172 | |||
1173 | mdev->current_epoch->barrier_nr = p->barrier; | 1158 | mdev->current_epoch->barrier_nr = p->barrier; |
1174 | rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); | 1159 | rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); |
1175 | 1160 | ||
@@ -3556,9 +3541,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3556 | 3541 | ||
3557 | static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3542 | static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
3558 | { | 3543 | { |
3559 | if (mdev->state.disk >= D_INCONSISTENT) | ||
3560 | drbd_kick_lo(mdev); | ||
3561 | |||
3562 | /* Make sure we've acked all the TCP data associated | 3544 | /* Make sure we've acked all the TCP data associated |
3563 | * with the data requests being unplugged */ | 3545 | * with the data requests being unplugged */ |
3564 | drbd_tcp_quickack(mdev->data.socket); | 3546 | drbd_tcp_quickack(mdev->data.socket); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 11a75d32a2e2..ad3fc6228f27 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -960,10 +960,6 @@ allocate_barrier: | |||
960 | bio_endio(req->private_bio, -EIO); | 960 | bio_endio(req->private_bio, -EIO); |
961 | } | 961 | } |
962 | 962 | ||
963 | /* we need to plug ALWAYS since we possibly need to kick lo_dev. | ||
964 | * we plug after submit, so we won't miss an unplug event */ | ||
965 | drbd_plug_device(mdev); | ||
966 | |||
967 | return 0; | 963 | return 0; |
968 | 964 | ||
969 | fail_conflicting: | 965 | fail_conflicting: |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 34f224b018b3..e027446590d3 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
792 | * queue (or even the read operations for those packets | 792 | * queue (or even the read operations for those packets |
793 | * is not finished by now). Retry in 100ms. */ | 793 | * is not finished by now). Retry in 100ms. */ |
794 | 794 | ||
795 | drbd_kick_lo(mdev); | ||
796 | __set_current_state(TASK_INTERRUPTIBLE); | 795 | __set_current_state(TASK_INTERRUPTIBLE); |
797 | schedule_timeout(HZ / 10); | 796 | schedule_timeout(HZ / 10); |
798 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); | 797 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index defdb5013ea3..53586fa5ae1b 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h | |||
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev, | |||
45 | generic_make_request(bio); | 45 | generic_make_request(bio); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void drbd_plug_device(struct drbd_conf *mdev) | ||
49 | { | ||
50 | struct request_queue *q; | ||
51 | q = bdev_get_queue(mdev->this_bdev); | ||
52 | |||
53 | spin_lock_irq(q->queue_lock); | ||
54 | |||
55 | /* XXX the check on !blk_queue_plugged is redundant, | ||
56 | * implicitly checked in blk_plug_device */ | ||
57 | |||
58 | if (!blk_queue_plugged(q)) { | ||
59 | blk_plug_device(q); | ||
60 | del_timer(&q->unplug_timer); | ||
61 | /* unplugging should not happen automatically... */ | ||
62 | } | ||
63 | spin_unlock_irq(q->queue_lock); | ||
64 | } | ||
65 | |||
66 | static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) | 48 | static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) |
67 | { | 49 | { |
68 | return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) | 50 | return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b34..271142b9e2cd 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3837,7 +3837,6 @@ static int __floppy_read_block_0(struct block_device *bdev) | |||
3837 | bio.bi_end_io = floppy_rb0_complete; | 3837 | bio.bi_end_io = floppy_rb0_complete; |
3838 | 3838 | ||
3839 | submit_bio(READ, &bio); | 3839 | submit_bio(READ, &bio); |
3840 | generic_unplug_device(bdev_get_queue(bdev)); | ||
3841 | process_fd_request(); | 3840 | process_fd_request(); |
3842 | wait_for_completion(&complete); | 3841 | wait_for_completion(&complete); |
3843 | 3842 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 49e6a545eb63..01b8e4a87c9f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -541,17 +541,6 @@ out: | |||
541 | return 0; | 541 | return 0; |
542 | } | 542 | } |
543 | 543 | ||
544 | /* | ||
545 | * kick off io on the underlying address space | ||
546 | */ | ||
547 | static void loop_unplug(struct request_queue *q) | ||
548 | { | ||
549 | struct loop_device *lo = q->queuedata; | ||
550 | |||
551 | queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); | ||
552 | blk_run_address_space(lo->lo_backing_file->f_mapping); | ||
553 | } | ||
554 | |||
555 | struct switch_request { | 544 | struct switch_request { |
556 | struct file *file; | 545 | struct file *file; |
557 | struct completion wait; | 546 | struct completion wait; |
@@ -918,7 +907,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
918 | */ | 907 | */ |
919 | blk_queue_make_request(lo->lo_queue, loop_make_request); | 908 | blk_queue_make_request(lo->lo_queue, loop_make_request); |
920 | lo->lo_queue->queuedata = lo; | 909 | lo->lo_queue->queuedata = lo; |
921 | lo->lo_queue->unplug_fn = loop_unplug; | ||
922 | 910 | ||
923 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 911 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
924 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); | 912 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); |
@@ -1020,7 +1008,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
1020 | 1008 | ||
1021 | kthread_stop(lo->lo_thread); | 1009 | kthread_stop(lo->lo_thread); |
1022 | 1010 | ||
1023 | lo->lo_queue->unplug_fn = NULL; | ||
1024 | lo->lo_backing_file = NULL; | 1011 | lo->lo_backing_file = NULL; |
1025 | 1012 | ||
1026 | loop_release_xfer(lo); | 1013 | loop_release_xfer(lo); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 77d70eebb6b2..d20e13f80001 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar) | |||
1606 | min_sleep_time = pkt->sleep_time; | 1606 | min_sleep_time = pkt->sleep_time; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | generic_unplug_device(bdev_get_queue(pd->bdev)); | ||
1610 | |||
1611 | VPRINTK("kcdrwd: sleeping\n"); | 1609 | VPRINTK("kcdrwd: sleeping\n"); |
1612 | residue = schedule_timeout(min_sleep_time); | 1610 | residue = schedule_timeout(min_sleep_time); |
1613 | VPRINTK("kcdrwd: wake up\n"); | 1611 | VPRINTK("kcdrwd: wake up\n"); |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 8be57151f5d6..653439faa729 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) | |||
241 | * | 241 | * |
242 | * Whenever IO on the active page completes, the Ready page is activated | 242 | * Whenever IO on the active page completes, the Ready page is activated |
243 | * and the ex-Active page is clean out and made Ready. | 243 | * and the ex-Active page is clean out and made Ready. |
244 | * Otherwise the Ready page is only activated when it becomes full, or | 244 | * Otherwise the Ready page is only activated when it becomes full. |
245 | * when mm_unplug_device is called via the unplug_io_fn. | ||
246 | * | 245 | * |
247 | * If a request arrives while both pages a full, it is queued, and b_rdev is | 246 | * If a request arrives while both pages a full, it is queued, and b_rdev is |
248 | * overloaded to record whether it was a read or a write. | 247 | * overloaded to record whether it was a read or a write. |
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page) | |||
333 | page->biotail = &page->bio; | 332 | page->biotail = &page->bio; |
334 | } | 333 | } |
335 | 334 | ||
336 | static void mm_unplug_device(struct request_queue *q) | ||
337 | { | ||
338 | struct cardinfo *card = q->queuedata; | ||
339 | unsigned long flags; | ||
340 | |||
341 | spin_lock_irqsave(&card->lock, flags); | ||
342 | if (blk_remove_plug(q)) | ||
343 | activate(card); | ||
344 | spin_unlock_irqrestore(&card->lock, flags); | ||
345 | } | ||
346 | |||
347 | /* | 335 | /* |
348 | * If there is room on Ready page, take | 336 | * If there is room on Ready page, take |
349 | * one bh off list and add it. | 337 | * one bh off list and add it. |
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) | |||
535 | *card->biotail = bio; | 523 | *card->biotail = bio; |
536 | bio->bi_next = NULL; | 524 | bio->bi_next = NULL; |
537 | card->biotail = &bio->bi_next; | 525 | card->biotail = &bio->bi_next; |
538 | blk_plug_device(q); | ||
539 | spin_unlock_irq(&card->lock); | 526 | spin_unlock_irq(&card->lock); |
540 | 527 | ||
541 | return 0; | 528 | return 0; |
@@ -907,7 +894,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, | |||
907 | blk_queue_make_request(card->queue, mm_make_request); | 894 | blk_queue_make_request(card->queue, mm_make_request); |
908 | card->queue->queue_lock = &card->lock; | 895 | card->queue->queue_lock = &card->lock; |
909 | card->queue->queuedata = card; | 896 | card->queue->queuedata = card; |
910 | card->queue->unplug_fn = mm_unplug_device; | ||
911 | 897 | ||
912 | tasklet_init(&card->tasklet, process_page, (unsigned long)card); | 898 | tasklet_init(&card->tasklet, process_page, (unsigned long)card); |
913 | 899 | ||
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index e88a2cf17711..6f218e014e99 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special) | |||
233 | 233 | ||
234 | drive->hwif->rq = NULL; | 234 | drive->hwif->rq = NULL; |
235 | 235 | ||
236 | elv_add_request(drive->queue, &drive->sense_rq, | 236 | elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); |
237 | ELEVATOR_INSERT_FRONT, 0); | ||
238 | return 0; | 237 | return 0; |
239 | } | 238 | } |
240 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); | 239 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 999dac054bcc..f4077840d3ab 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -549,8 +549,6 @@ plug_device_2: | |||
549 | 549 | ||
550 | if (rq) | 550 | if (rq) |
551 | blk_requeue_request(q, rq); | 551 | blk_requeue_request(q, rq); |
552 | if (!elv_queue_empty(q)) | ||
553 | blk_plug_device(q); | ||
554 | } | 552 | } |
555 | 553 | ||
556 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 554 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | |||
562 | 560 | ||
563 | if (rq) | 561 | if (rq) |
564 | blk_requeue_request(q, rq); | 562 | blk_requeue_request(q, rq); |
565 | if (!elv_queue_empty(q)) | ||
566 | blk_plug_device(q); | ||
567 | 563 | ||
568 | spin_unlock_irqrestore(q->queue_lock, flags); | 564 | spin_unlock_irqrestore(q->queue_lock, flags); |
569 | } | 565 | } |
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 88a380c5a470..6ab9ab2a5081 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
52 | rq->cmd[0] = REQ_UNPARK_HEADS; | 52 | rq->cmd[0] = REQ_UNPARK_HEADS; |
53 | rq->cmd_len = 1; | 53 | rq->cmd_len = 1; |
54 | rq->cmd_type = REQ_TYPE_SPECIAL; | 54 | rq->cmd_type = REQ_TYPE_SPECIAL; |
55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); | 55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); |
56 | 56 | ||
57 | out: | 57 | out: |
58 | return; | 58 | return; |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 9a35320fb59f..54bfc274b39a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1339 | prepare_to_wait(&bitmap->overflow_wait, &__wait, | 1339 | prepare_to_wait(&bitmap->overflow_wait, &__wait, |
1340 | TASK_UNINTERRUPTIBLE); | 1340 | TASK_UNINTERRUPTIBLE); |
1341 | spin_unlock_irq(&bitmap->lock); | 1341 | spin_unlock_irq(&bitmap->lock); |
1342 | md_unplug(bitmap->mddev); | 1342 | io_schedule(); |
1343 | schedule(); | ||
1344 | finish_wait(&bitmap->overflow_wait, &__wait); | 1343 | finish_wait(&bitmap->overflow_wait, &__wait); |
1345 | continue; | 1344 | continue; |
1346 | } | 1345 | } |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e054bd91664..2c62c1169f78 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
991 | clone->bi_destructor = dm_crypt_bio_destructor; | 991 | clone->bi_destructor = dm_crypt_bio_destructor; |
992 | } | 992 | } |
993 | 993 | ||
994 | static void kcryptd_unplug(struct crypt_config *cc) | ||
995 | { | ||
996 | blk_unplug(bdev_get_queue(cc->dev->bdev)); | ||
997 | } | ||
998 | |||
999 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 994 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
1000 | { | 995 | { |
1001 | struct crypt_config *cc = io->target->private; | 996 | struct crypt_config *cc = io->target->private; |
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |||
1008 | * one in order to decrypt the whole bio data *afterwards*. | 1003 | * one in order to decrypt the whole bio data *afterwards*. |
1009 | */ | 1004 | */ |
1010 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); | 1005 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); |
1011 | if (!clone) { | 1006 | if (!clone) |
1012 | kcryptd_unplug(cc); | ||
1013 | return 1; | 1007 | return 1; |
1014 | } | ||
1015 | 1008 | ||
1016 | crypt_inc_pending(io); | 1009 | crypt_inc_pending(io); |
1017 | 1010 | ||
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 924f5f0084c2..400cf35094a4 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -37,13 +37,6 @@ struct dm_kcopyd_client { | |||
37 | unsigned int nr_pages; | 37 | unsigned int nr_pages; |
38 | unsigned int nr_free_pages; | 38 | unsigned int nr_free_pages; |
39 | 39 | ||
40 | /* | ||
41 | * Block devices to unplug. | ||
42 | * Non-NULL pointer means that a block device has some pending requests | ||
43 | * and needs to be unplugged. | ||
44 | */ | ||
45 | struct block_device *unplug[2]; | ||
46 | |||
47 | struct dm_io_client *io_client; | 40 | struct dm_io_client *io_client; |
48 | 41 | ||
49 | wait_queue_head_t destroyq; | 42 | wait_queue_head_t destroyq; |
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job) | |||
315 | return 0; | 308 | return 0; |
316 | } | 309 | } |
317 | 310 | ||
318 | /* | ||
319 | * Unplug the block device at the specified index. | ||
320 | */ | ||
321 | static void unplug(struct dm_kcopyd_client *kc, int rw) | ||
322 | { | ||
323 | if (kc->unplug[rw] != NULL) { | ||
324 | blk_unplug(bdev_get_queue(kc->unplug[rw])); | ||
325 | kc->unplug[rw] = NULL; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Prepare block device unplug. If there's another device | ||
331 | * to be unplugged at the same array index, we unplug that | ||
332 | * device first. | ||
333 | */ | ||
334 | static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, | ||
335 | struct block_device *bdev) | ||
336 | { | ||
337 | if (likely(kc->unplug[rw] == bdev)) | ||
338 | return; | ||
339 | unplug(kc, rw); | ||
340 | kc->unplug[rw] = bdev; | ||
341 | } | ||
342 | |||
343 | static void complete_io(unsigned long error, void *context) | 311 | static void complete_io(unsigned long error, void *context) |
344 | { | 312 | { |
345 | struct kcopyd_job *job = (struct kcopyd_job *) context; | 313 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
@@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job) | |||
386 | .client = job->kc->io_client, | 354 | .client = job->kc->io_client, |
387 | }; | 355 | }; |
388 | 356 | ||
389 | if (job->rw == READ) { | 357 | if (job->rw == READ) |
390 | r = dm_io(&io_req, 1, &job->source, NULL); | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
391 | prepare_unplug(job->kc, READ, job->source.bdev); | 359 | else { |
392 | } else { | ||
393 | if (job->num_dests > 1) | 360 | if (job->num_dests > 1) |
394 | io_req.bi_rw |= REQ_UNPLUG; | 361 | io_req.bi_rw |= REQ_UNPLUG; |
395 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); | 362 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
396 | if (!(io_req.bi_rw & REQ_UNPLUG)) | ||
397 | prepare_unplug(job->kc, WRITE, job->dests[0].bdev); | ||
398 | } | 363 | } |
399 | 364 | ||
400 | return r; | 365 | return r; |
@@ -466,6 +431,7 @@ static void do_work(struct work_struct *work) | |||
466 | { | 431 | { |
467 | struct dm_kcopyd_client *kc = container_of(work, | 432 | struct dm_kcopyd_client *kc = container_of(work, |
468 | struct dm_kcopyd_client, kcopyd_work); | 433 | struct dm_kcopyd_client, kcopyd_work); |
434 | struct blk_plug plug; | ||
469 | 435 | ||
470 | /* | 436 | /* |
471 | * The order that these are called is *very* important. | 437 | * The order that these are called is *very* important. |
@@ -473,18 +439,12 @@ static void do_work(struct work_struct *work) | |||
473 | * Pages jobs when successful will jump onto the io jobs | 439 | * Pages jobs when successful will jump onto the io jobs |
474 | * list. io jobs call wake when they complete and it all | 440 | * list. io jobs call wake when they complete and it all |
475 | * starts again. | 441 | * starts again. |
476 | * | ||
477 | * Note that io_jobs add block devices to the unplug array, | ||
478 | * this array is cleared with "unplug" calls. It is thus | ||
479 | * forbidden to run complete_jobs after io_jobs and before | ||
480 | * unplug because the block device could be destroyed in | ||
481 | * job completion callback. | ||
482 | */ | 442 | */ |
443 | blk_start_plug(&plug); | ||
483 | process_jobs(&kc->complete_jobs, kc, run_complete_job); | 444 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
484 | process_jobs(&kc->pages_jobs, kc, run_pages_job); | 445 | process_jobs(&kc->pages_jobs, kc, run_pages_job); |
485 | process_jobs(&kc->io_jobs, kc, run_io_job); | 446 | process_jobs(&kc->io_jobs, kc, run_io_job); |
486 | unplug(kc, READ); | 447 | blk_finish_plug(&plug); |
487 | unplug(kc, WRITE); | ||
488 | } | 448 | } |
489 | 449 | ||
490 | /* | 450 | /* |
@@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages, | |||
665 | INIT_LIST_HEAD(&kc->io_jobs); | 625 | INIT_LIST_HEAD(&kc->io_jobs); |
666 | INIT_LIST_HEAD(&kc->pages_jobs); | 626 | INIT_LIST_HEAD(&kc->pages_jobs); |
667 | 627 | ||
668 | memset(kc->unplug, 0, sizeof(kc->unplug)); | ||
669 | |||
670 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); | 628 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
671 | if (!kc->job_pool) | 629 | if (!kc->job_pool) |
672 | goto bad_slab; | 630 | goto bad_slab; |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b9e1e15ef11c..5ef136cdba91 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb) | |||
394 | { | 394 | { |
395 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | 395 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); |
396 | 396 | ||
397 | md_raid5_unplug_device(rs->md.private); | 397 | md_raid5_kick_device(rs->md.private); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index dee326775c60..976ad4688afc 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work) | |||
842 | do_reads(ms, &reads); | 842 | do_reads(ms, &reads); |
843 | do_writes(ms, &writes); | 843 | do_writes(ms, &writes); |
844 | do_failures(ms, &failures); | 844 | do_failures(ms, &failures); |
845 | |||
846 | dm_table_unplug_all(ms->ti->table); | ||
847 | } | 845 | } |
848 | 846 | ||
849 | /*----------------------------------------------------------------- | 847 | /*----------------------------------------------------------------- |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 38e4eb1bb965..f50a7b952257 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t) | |||
1275 | return 0; | 1275 | return 0; |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | void dm_table_unplug_all(struct dm_table *t) | ||
1279 | { | ||
1280 | struct dm_dev_internal *dd; | ||
1281 | struct list_head *devices = dm_table_get_devices(t); | ||
1282 | struct dm_target_callbacks *cb; | ||
1283 | |||
1284 | list_for_each_entry(dd, devices, list) { | ||
1285 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); | ||
1286 | char b[BDEVNAME_SIZE]; | ||
1287 | |||
1288 | if (likely(q)) | ||
1289 | blk_unplug(q); | ||
1290 | else | ||
1291 | DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", | ||
1292 | dm_device_name(t->md), | ||
1293 | bdevname(dd->dm_dev.bdev, b)); | ||
1294 | } | ||
1295 | |||
1296 | list_for_each_entry(cb, &t->target_callbacks, list) | ||
1297 | if (cb->unplug_fn) | ||
1298 | cb->unplug_fn(cb); | ||
1299 | } | ||
1300 | |||
1301 | struct mapped_device *dm_table_get_md(struct dm_table *t) | 1278 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
1302 | { | 1279 | { |
1303 | return t->md; | 1280 | return t->md; |
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode); | |||
1345 | EXPORT_SYMBOL(dm_table_get_md); | 1322 | EXPORT_SYMBOL(dm_table_get_md); |
1346 | EXPORT_SYMBOL(dm_table_put); | 1323 | EXPORT_SYMBOL(dm_table_put); |
1347 | EXPORT_SYMBOL(dm_table_get); | 1324 | EXPORT_SYMBOL(dm_table_get); |
1348 | EXPORT_SYMBOL(dm_table_unplug_all); | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eaa3af0e0632..d22b9905c168 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone) | |||
807 | dm_unprep_request(rq); | 807 | dm_unprep_request(rq); |
808 | 808 | ||
809 | spin_lock_irqsave(q->queue_lock, flags); | 809 | spin_lock_irqsave(q->queue_lock, flags); |
810 | if (elv_queue_empty(q)) | ||
811 | blk_plug_device(q); | ||
812 | blk_requeue_request(q, rq); | 810 | blk_requeue_request(q, rq); |
813 | spin_unlock_irqrestore(q->queue_lock, flags); | 811 | spin_unlock_irqrestore(q->queue_lock, flags); |
814 | 812 | ||
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q) | |||
1613 | * number of in-flight I/Os after the queue is stopped in | 1611 | * number of in-flight I/Os after the queue is stopped in |
1614 | * dm_suspend(). | 1612 | * dm_suspend(). |
1615 | */ | 1613 | */ |
1616 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | 1614 | while (!blk_queue_stopped(q)) { |
1617 | rq = blk_peek_request(q); | 1615 | rq = blk_peek_request(q); |
1618 | if (!rq) | 1616 | if (!rq) |
1619 | goto plug_and_out; | 1617 | goto delay_and_out; |
1620 | 1618 | ||
1621 | /* always use block 0 to find the target for flushes for now */ | 1619 | /* always use block 0 to find the target for flushes for now */ |
1622 | pos = 0; | 1620 | pos = 0; |
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q) | |||
1627 | BUG_ON(!dm_target_is_valid(ti)); | 1625 | BUG_ON(!dm_target_is_valid(ti)); |
1628 | 1626 | ||
1629 | if (ti->type->busy && ti->type->busy(ti)) | 1627 | if (ti->type->busy && ti->type->busy(ti)) |
1630 | goto plug_and_out; | 1628 | goto delay_and_out; |
1631 | 1629 | ||
1632 | blk_start_request(rq); | 1630 | blk_start_request(rq); |
1633 | clone = rq->special; | 1631 | clone = rq->special; |
@@ -1647,11 +1645,8 @@ requeued: | |||
1647 | BUG_ON(!irqs_disabled()); | 1645 | BUG_ON(!irqs_disabled()); |
1648 | spin_lock(q->queue_lock); | 1646 | spin_lock(q->queue_lock); |
1649 | 1647 | ||
1650 | plug_and_out: | 1648 | delay_and_out: |
1651 | if (!elv_queue_empty(q)) | 1649 | blk_delay_queue(q, HZ / 10); |
1652 | /* Some requests still remain, retry later */ | ||
1653 | blk_plug_device(q); | ||
1654 | |||
1655 | out: | 1650 | out: |
1656 | dm_table_put(map); | 1651 | dm_table_put(map); |
1657 | 1652 | ||
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q) | |||
1680 | return r; | 1675 | return r; |
1681 | } | 1676 | } |
1682 | 1677 | ||
1683 | static void dm_unplug_all(struct request_queue *q) | ||
1684 | { | ||
1685 | struct mapped_device *md = q->queuedata; | ||
1686 | struct dm_table *map = dm_get_live_table(md); | ||
1687 | |||
1688 | if (map) { | ||
1689 | if (dm_request_based(md)) | ||
1690 | generic_unplug_device(q); | ||
1691 | |||
1692 | dm_table_unplug_all(map); | ||
1693 | dm_table_put(map); | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | static int dm_any_congested(void *congested_data, int bdi_bits) | 1678 | static int dm_any_congested(void *congested_data, int bdi_bits) |
1698 | { | 1679 | { |
1699 | int r = bdi_bits; | 1680 | int r = bdi_bits; |
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md) | |||
1817 | md->queue->backing_dev_info.congested_data = md; | 1798 | md->queue->backing_dev_info.congested_data = md; |
1818 | blk_queue_make_request(md->queue, dm_request); | 1799 | blk_queue_make_request(md->queue, dm_request); |
1819 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1800 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1820 | md->queue->unplug_fn = dm_unplug_all; | ||
1821 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1801 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1822 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); | 1802 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); |
1823 | } | 1803 | } |
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2263 | int r = 0; | 2243 | int r = 0; |
2264 | DECLARE_WAITQUEUE(wait, current); | 2244 | DECLARE_WAITQUEUE(wait, current); |
2265 | 2245 | ||
2266 | dm_unplug_all(md->queue); | ||
2267 | |||
2268 | add_wait_queue(&md->wait, &wait); | 2246 | add_wait_queue(&md->wait, &wait); |
2269 | 2247 | ||
2270 | while (1) { | 2248 | while (1) { |
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md) | |||
2539 | 2517 | ||
2540 | clear_bit(DMF_SUSPENDED, &md->flags); | 2518 | clear_bit(DMF_SUSPENDED, &md->flags); |
2541 | 2519 | ||
2542 | dm_table_unplug_all(map); | ||
2543 | r = 0; | 2520 | r = 0; |
2544 | out: | 2521 | out: |
2545 | dm_table_put(map); | 2522 | dm_table_put(map); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d8..38861b5b9d90 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q, | |||
87 | return maxsectors << 9; | 87 | return maxsectors << 9; |
88 | } | 88 | } |
89 | 89 | ||
90 | static void linear_unplug(struct request_queue *q) | ||
91 | { | ||
92 | mddev_t *mddev = q->queuedata; | ||
93 | linear_conf_t *conf; | ||
94 | int i; | ||
95 | |||
96 | rcu_read_lock(); | ||
97 | conf = rcu_dereference(mddev->private); | ||
98 | |||
99 | for (i=0; i < mddev->raid_disks; i++) { | ||
100 | struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); | ||
101 | blk_unplug(r_queue); | ||
102 | } | ||
103 | rcu_read_unlock(); | ||
104 | } | ||
105 | |||
106 | static int linear_congested(void *data, int bits) | 90 | static int linear_congested(void *data, int bits) |
107 | { | 91 | { |
108 | mddev_t *mddev = data; | 92 | mddev_t *mddev = data; |
@@ -225,7 +209,6 @@ static int linear_run (mddev_t *mddev) | |||
225 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); | 209 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
226 | 210 | ||
227 | blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); | 211 | blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); |
228 | mddev->queue->unplug_fn = linear_unplug; | ||
229 | mddev->queue->backing_dev_info.congested_fn = linear_congested; | 212 | mddev->queue->backing_dev_info.congested_fn = linear_congested; |
230 | mddev->queue->backing_dev_info.congested_data = mddev; | 213 | mddev->queue->backing_dev_info.congested_data = mddev; |
231 | md_integrity_register(mddev); | 214 | md_integrity_register(mddev); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0cc30ecda4c1..ca0d79c264b9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -4812,7 +4812,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4812 | __md_stop_writes(mddev); | 4812 | __md_stop_writes(mddev); |
4813 | md_stop(mddev); | 4813 | md_stop(mddev); |
4814 | mddev->queue->merge_bvec_fn = NULL; | 4814 | mddev->queue->merge_bvec_fn = NULL; |
4815 | mddev->queue->unplug_fn = NULL; | ||
4816 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4815 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4817 | 4816 | ||
4818 | /* tell userspace to handle 'inactive' */ | 4817 | /* tell userspace to handle 'inactive' */ |
@@ -6669,8 +6668,6 @@ EXPORT_SYMBOL_GPL(md_allow_write); | |||
6669 | 6668 | ||
6670 | void md_unplug(mddev_t *mddev) | 6669 | void md_unplug(mddev_t *mddev) |
6671 | { | 6670 | { |
6672 | if (mddev->queue) | ||
6673 | blk_unplug(mddev->queue); | ||
6674 | if (mddev->plug) | 6671 | if (mddev->plug) |
6675 | mddev->plug->unplug_fn(mddev->plug); | 6672 | mddev->plug->unplug_fn(mddev->plug); |
6676 | } | 6673 | } |
@@ -6853,7 +6850,6 @@ void md_do_sync(mddev_t *mddev) | |||
6853 | >= mddev->resync_max - mddev->curr_resync_completed | 6850 | >= mddev->resync_max - mddev->curr_resync_completed |
6854 | )) { | 6851 | )) { |
6855 | /* time to update curr_resync_completed */ | 6852 | /* time to update curr_resync_completed */ |
6856 | md_unplug(mddev); | ||
6857 | wait_event(mddev->recovery_wait, | 6853 | wait_event(mddev->recovery_wait, |
6858 | atomic_read(&mddev->recovery_active) == 0); | 6854 | atomic_read(&mddev->recovery_active) == 0); |
6859 | mddev->curr_resync_completed = j; | 6855 | mddev->curr_resync_completed = j; |
@@ -6929,7 +6925,6 @@ void md_do_sync(mddev_t *mddev) | |||
6929 | * about not overloading the IO subsystem. (things like an | 6925 | * about not overloading the IO subsystem. (things like an |
6930 | * e2fsck being done on the RAID array should execute fast) | 6926 | * e2fsck being done on the RAID array should execute fast) |
6931 | */ | 6927 | */ |
6932 | md_unplug(mddev); | ||
6933 | cond_resched(); | 6928 | cond_resched(); |
6934 | 6929 | ||
6935 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 | 6930 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 |
@@ -6948,8 +6943,6 @@ void md_do_sync(mddev_t *mddev) | |||
6948 | * this also signals 'finished resyncing' to md_stop | 6943 | * this also signals 'finished resyncing' to md_stop |
6949 | */ | 6944 | */ |
6950 | out: | 6945 | out: |
6951 | md_unplug(mddev); | ||
6952 | |||
6953 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); | 6946 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); |
6954 | 6947 | ||
6955 | /* tell personality that we are finished */ | 6948 | /* tell personality that we are finished */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2e..1cc8ed44e4ad 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error) | |||
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
109 | static void unplug_slaves(mddev_t *mddev) | ||
110 | { | ||
111 | multipath_conf_t *conf = mddev->private; | ||
112 | int i; | ||
113 | |||
114 | rcu_read_lock(); | ||
115 | for (i=0; i<mddev->raid_disks; i++) { | ||
116 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | ||
117 | if (rdev && !test_bit(Faulty, &rdev->flags) | ||
118 | && atomic_read(&rdev->nr_pending)) { | ||
119 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
120 | |||
121 | atomic_inc(&rdev->nr_pending); | ||
122 | rcu_read_unlock(); | ||
123 | |||
124 | blk_unplug(r_queue); | ||
125 | |||
126 | rdev_dec_pending(rdev, mddev); | ||
127 | rcu_read_lock(); | ||
128 | } | ||
129 | } | ||
130 | rcu_read_unlock(); | ||
131 | } | ||
132 | |||
133 | static void multipath_unplug(struct request_queue *q) | ||
134 | { | ||
135 | unplug_slaves(q->queuedata); | ||
136 | } | ||
137 | |||
138 | |||
139 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) | 109 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) |
140 | { | 110 | { |
141 | multipath_conf_t *conf = mddev->private; | 111 | multipath_conf_t *conf = mddev->private; |
@@ -518,7 +488,6 @@ static int multipath_run (mddev_t *mddev) | |||
518 | */ | 488 | */ |
519 | md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); | 489 | md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); |
520 | 490 | ||
521 | mddev->queue->unplug_fn = multipath_unplug; | ||
522 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; | 491 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; |
523 | mddev->queue->backing_dev_info.congested_data = mddev; | 492 | mddev->queue->backing_dev_info.congested_data = mddev; |
524 | md_integrity_register(mddev); | 493 | md_integrity_register(mddev); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 637a96855edb..6338c0fe6208 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -25,21 +25,6 @@ | |||
25 | #include "raid0.h" | 25 | #include "raid0.h" |
26 | #include "raid5.h" | 26 | #include "raid5.h" |
27 | 27 | ||
28 | static void raid0_unplug(struct request_queue *q) | ||
29 | { | ||
30 | mddev_t *mddev = q->queuedata; | ||
31 | raid0_conf_t *conf = mddev->private; | ||
32 | mdk_rdev_t **devlist = conf->devlist; | ||
33 | int raid_disks = conf->strip_zone[0].nb_dev; | ||
34 | int i; | ||
35 | |||
36 | for (i=0; i < raid_disks; i++) { | ||
37 | struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); | ||
38 | |||
39 | blk_unplug(r_queue); | ||
40 | } | ||
41 | } | ||
42 | |||
43 | static int raid0_congested(void *data, int bits) | 28 | static int raid0_congested(void *data, int bits) |
44 | { | 29 | { |
45 | mddev_t *mddev = data; | 30 | mddev_t *mddev = data; |
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
272 | mdname(mddev), | 257 | mdname(mddev), |
273 | (unsigned long long)smallest->sectors); | 258 | (unsigned long long)smallest->sectors); |
274 | } | 259 | } |
275 | mddev->queue->unplug_fn = raid0_unplug; | ||
276 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; | 260 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
277 | mddev->queue->backing_dev_info.congested_data = mddev; | 261 | mddev->queue->backing_dev_info.congested_data = mddev; |
278 | 262 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..b67d822d57ae 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -52,23 +52,16 @@ | |||
52 | #define NR_RAID1_BIOS 256 | 52 | #define NR_RAID1_BIOS 256 |
53 | 53 | ||
54 | 54 | ||
55 | static void unplug_slaves(mddev_t *mddev); | ||
56 | |||
57 | static void allow_barrier(conf_t *conf); | 55 | static void allow_barrier(conf_t *conf); |
58 | static void lower_barrier(conf_t *conf); | 56 | static void lower_barrier(conf_t *conf); |
59 | 57 | ||
60 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | 58 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) |
61 | { | 59 | { |
62 | struct pool_info *pi = data; | 60 | struct pool_info *pi = data; |
63 | r1bio_t *r1_bio; | ||
64 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); | 61 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); |
65 | 62 | ||
66 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 63 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
67 | r1_bio = kzalloc(size, gfp_flags); | 64 | return kzalloc(size, gfp_flags); |
68 | if (!r1_bio && pi->mddev) | ||
69 | unplug_slaves(pi->mddev); | ||
70 | |||
71 | return r1_bio; | ||
72 | } | 65 | } |
73 | 66 | ||
74 | static void r1bio_pool_free(void *r1_bio, void *data) | 67 | static void r1bio_pool_free(void *r1_bio, void *data) |
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
91 | int i, j; | 84 | int i, j; |
92 | 85 | ||
93 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); | 86 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); |
94 | if (!r1_bio) { | 87 | if (!r1_bio) |
95 | unplug_slaves(pi->mddev); | ||
96 | return NULL; | 88 | return NULL; |
97 | } | ||
98 | 89 | ||
99 | /* | 90 | /* |
100 | * Allocate bios : 1 for reading, n-1 for writing | 91 | * Allocate bios : 1 for reading, n-1 for writing |
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) | |||
520 | return new_disk; | 511 | return new_disk; |
521 | } | 512 | } |
522 | 513 | ||
523 | static void unplug_slaves(mddev_t *mddev) | ||
524 | { | ||
525 | conf_t *conf = mddev->private; | ||
526 | int i; | ||
527 | |||
528 | rcu_read_lock(); | ||
529 | for (i=0; i<mddev->raid_disks; i++) { | ||
530 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | ||
531 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
532 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
533 | |||
534 | atomic_inc(&rdev->nr_pending); | ||
535 | rcu_read_unlock(); | ||
536 | |||
537 | blk_unplug(r_queue); | ||
538 | |||
539 | rdev_dec_pending(rdev, mddev); | ||
540 | rcu_read_lock(); | ||
541 | } | ||
542 | } | ||
543 | rcu_read_unlock(); | ||
544 | } | ||
545 | |||
546 | static void raid1_unplug(struct request_queue *q) | ||
547 | { | ||
548 | mddev_t *mddev = q->queuedata; | ||
549 | |||
550 | unplug_slaves(mddev); | ||
551 | md_wakeup_thread(mddev->thread); | ||
552 | } | ||
553 | |||
554 | static int raid1_congested(void *data, int bits) | 514 | static int raid1_congested(void *data, int bits) |
555 | { | 515 | { |
556 | mddev_t *mddev = data; | 516 | mddev_t *mddev = data; |
@@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits) | |||
580 | } | 540 | } |
581 | 541 | ||
582 | 542 | ||
583 | static int flush_pending_writes(conf_t *conf) | 543 | static void flush_pending_writes(conf_t *conf) |
584 | { | 544 | { |
585 | /* Any writes that have been queued but are awaiting | 545 | /* Any writes that have been queued but are awaiting |
586 | * bitmap updates get flushed here. | 546 | * bitmap updates get flushed here. |
587 | * We return 1 if any requests were actually submitted. | ||
588 | */ | 547 | */ |
589 | int rv = 0; | ||
590 | |||
591 | spin_lock_irq(&conf->device_lock); | 548 | spin_lock_irq(&conf->device_lock); |
592 | 549 | ||
593 | if (conf->pending_bio_list.head) { | 550 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 551 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 552 | bio = bio_list_get(&conf->pending_bio_list); |
596 | blk_remove_plug(conf->mddev->queue); | ||
597 | spin_unlock_irq(&conf->device_lock); | 553 | spin_unlock_irq(&conf->device_lock); |
598 | /* flush any pending bitmap writes to | 554 | /* flush any pending bitmap writes to |
599 | * disk before proceeding w/ I/O */ | 555 | * disk before proceeding w/ I/O */ |
@@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf) | |||
605 | generic_make_request(bio); | 561 | generic_make_request(bio); |
606 | bio = next; | 562 | bio = next; |
607 | } | 563 | } |
608 | rv = 1; | ||
609 | } else | 564 | } else |
610 | spin_unlock_irq(&conf->device_lock); | 565 | spin_unlock_irq(&conf->device_lock); |
611 | return rv; | 566 | } |
567 | |||
568 | static void md_kick_device(mddev_t *mddev) | ||
569 | { | ||
570 | blk_flush_plug(current); | ||
571 | md_wakeup_thread(mddev->thread); | ||
612 | } | 572 | } |
613 | 573 | ||
614 | /* Barriers.... | 574 | /* Barriers.... |
@@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf) | |||
640 | 600 | ||
641 | /* Wait until no block IO is waiting */ | 601 | /* Wait until no block IO is waiting */ |
642 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, | 602 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, |
643 | conf->resync_lock, | 603 | conf->resync_lock, md_kick_device(conf->mddev)); |
644 | raid1_unplug(conf->mddev->queue)); | ||
645 | 604 | ||
646 | /* block any new IO from starting */ | 605 | /* block any new IO from starting */ |
647 | conf->barrier++; | 606 | conf->barrier++; |
@@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf) | |||
649 | /* Now wait for all pending IO to complete */ | 608 | /* Now wait for all pending IO to complete */ |
650 | wait_event_lock_irq(conf->wait_barrier, | 609 | wait_event_lock_irq(conf->wait_barrier, |
651 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 610 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
652 | conf->resync_lock, | 611 | conf->resync_lock, md_kick_device(conf->mddev)); |
653 | raid1_unplug(conf->mddev->queue)); | ||
654 | 612 | ||
655 | spin_unlock_irq(&conf->resync_lock); | 613 | spin_unlock_irq(&conf->resync_lock); |
656 | } | 614 | } |
@@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf) | |||
672 | conf->nr_waiting++; | 630 | conf->nr_waiting++; |
673 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 631 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
674 | conf->resync_lock, | 632 | conf->resync_lock, |
675 | raid1_unplug(conf->mddev->queue)); | 633 | md_kick_device(conf->mddev)); |
676 | conf->nr_waiting--; | 634 | conf->nr_waiting--; |
677 | } | 635 | } |
678 | conf->nr_pending++; | 636 | conf->nr_pending++; |
@@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf) | |||
709 | conf->nr_pending == conf->nr_queued+1, | 667 | conf->nr_pending == conf->nr_queued+1, |
710 | conf->resync_lock, | 668 | conf->resync_lock, |
711 | ({ flush_pending_writes(conf); | 669 | ({ flush_pending_writes(conf); |
712 | raid1_unplug(conf->mddev->queue); })); | 670 | md_kick_device(conf->mddev); })); |
713 | spin_unlock_irq(&conf->resync_lock); | 671 | spin_unlock_irq(&conf->resync_lock); |
714 | } | 672 | } |
715 | static void unfreeze_array(conf_t *conf) | 673 | static void unfreeze_array(conf_t *conf) |
@@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_inc(&r1_bio->remaining); | 917 | atomic_inc(&r1_bio->remaining); |
960 | spin_lock_irqsave(&conf->device_lock, flags); | 918 | spin_lock_irqsave(&conf->device_lock, flags); |
961 | bio_list_add(&conf->pending_bio_list, mbio); | 919 | bio_list_add(&conf->pending_bio_list, mbio); |
962 | blk_plug_device(mddev->queue); | ||
963 | spin_unlock_irqrestore(&conf->device_lock, flags); | 920 | spin_unlock_irqrestore(&conf->device_lock, flags); |
964 | } | 921 | } |
965 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 922 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
968 | /* In case raid1d snuck in to freeze_array */ | 925 | /* In case raid1d snuck in to freeze_array */ |
969 | wake_up(&conf->wait_barrier); | 926 | wake_up(&conf->wait_barrier); |
970 | 927 | ||
971 | if (do_sync) | 928 | if (do_sync || !bitmap) |
972 | md_wakeup_thread(mddev->thread); | 929 | md_wakeup_thread(mddev->thread); |
973 | 930 | ||
974 | return 0; | 931 | return 0; |
@@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev) | |||
1558 | unsigned long flags; | 1515 | unsigned long flags; |
1559 | conf_t *conf = mddev->private; | 1516 | conf_t *conf = mddev->private; |
1560 | struct list_head *head = &conf->retry_list; | 1517 | struct list_head *head = &conf->retry_list; |
1561 | int unplug=0; | ||
1562 | mdk_rdev_t *rdev; | 1518 | mdk_rdev_t *rdev; |
1563 | 1519 | ||
1564 | md_check_recovery(mddev); | 1520 | md_check_recovery(mddev); |
@@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev) | |||
1566 | for (;;) { | 1522 | for (;;) { |
1567 | char b[BDEVNAME_SIZE]; | 1523 | char b[BDEVNAME_SIZE]; |
1568 | 1524 | ||
1569 | unplug += flush_pending_writes(conf); | 1525 | flush_pending_writes(conf); |
1570 | 1526 | ||
1571 | spin_lock_irqsave(&conf->device_lock, flags); | 1527 | spin_lock_irqsave(&conf->device_lock, flags); |
1572 | if (list_empty(head)) { | 1528 | if (list_empty(head)) { |
@@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev) | |||
1580 | 1536 | ||
1581 | mddev = r1_bio->mddev; | 1537 | mddev = r1_bio->mddev; |
1582 | conf = mddev->private; | 1538 | conf = mddev->private; |
1583 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) { | 1539 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) |
1584 | sync_request_write(mddev, r1_bio); | 1540 | sync_request_write(mddev, r1_bio); |
1585 | unplug = 1; | 1541 | else { |
1586 | } else { | ||
1587 | int disk; | 1542 | int disk; |
1588 | 1543 | ||
1589 | /* we got a read error. Maybe the drive is bad. Maybe just | 1544 | /* we got a read error. Maybe the drive is bad. Maybe just |
@@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev) | |||
1633 | bio->bi_end_io = raid1_end_read_request; | 1588 | bio->bi_end_io = raid1_end_read_request; |
1634 | bio->bi_rw = READ | do_sync; | 1589 | bio->bi_rw = READ | do_sync; |
1635 | bio->bi_private = r1_bio; | 1590 | bio->bi_private = r1_bio; |
1636 | unplug = 1; | ||
1637 | generic_make_request(bio); | 1591 | generic_make_request(bio); |
1638 | } | 1592 | } |
1639 | } | 1593 | } |
1640 | cond_resched(); | 1594 | cond_resched(); |
1641 | } | 1595 | } |
1642 | if (unplug) | ||
1643 | unplug_slaves(mddev); | ||
1644 | } | 1596 | } |
1645 | 1597 | ||
1646 | 1598 | ||
@@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev) | |||
2064 | 2016 | ||
2065 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); | 2017 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); |
2066 | 2018 | ||
2067 | mddev->queue->unplug_fn = raid1_unplug; | ||
2068 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; | 2019 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; |
2069 | mddev->queue->backing_dev_info.congested_data = mddev; | 2020 | mddev->queue->backing_dev_info.congested_data = mddev; |
2070 | md_integrity_register(mddev); | 2021 | md_integrity_register(mddev); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3b607b28741b..e79f1c5bf71b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -57,23 +57,16 @@ | |||
57 | */ | 57 | */ |
58 | #define NR_RAID10_BIOS 256 | 58 | #define NR_RAID10_BIOS 256 |
59 | 59 | ||
60 | static void unplug_slaves(mddev_t *mddev); | ||
61 | |||
62 | static void allow_barrier(conf_t *conf); | 60 | static void allow_barrier(conf_t *conf); |
63 | static void lower_barrier(conf_t *conf); | 61 | static void lower_barrier(conf_t *conf); |
64 | 62 | ||
65 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) | 63 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) |
66 | { | 64 | { |
67 | conf_t *conf = data; | 65 | conf_t *conf = data; |
68 | r10bio_t *r10_bio; | ||
69 | int size = offsetof(struct r10bio_s, devs[conf->copies]); | 66 | int size = offsetof(struct r10bio_s, devs[conf->copies]); |
70 | 67 | ||
71 | /* allocate a r10bio with room for raid_disks entries in the bios array */ | 68 | /* allocate a r10bio with room for raid_disks entries in the bios array */ |
72 | r10_bio = kzalloc(size, gfp_flags); | 69 | return kzalloc(size, gfp_flags); |
73 | if (!r10_bio && conf->mddev) | ||
74 | unplug_slaves(conf->mddev); | ||
75 | |||
76 | return r10_bio; | ||
77 | } | 70 | } |
78 | 71 | ||
79 | static void r10bio_pool_free(void *r10_bio, void *data) | 72 | static void r10bio_pool_free(void *r10_bio, void *data) |
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
106 | int nalloc; | 99 | int nalloc; |
107 | 100 | ||
108 | r10_bio = r10bio_pool_alloc(gfp_flags, conf); | 101 | r10_bio = r10bio_pool_alloc(gfp_flags, conf); |
109 | if (!r10_bio) { | 102 | if (!r10_bio) |
110 | unplug_slaves(conf->mddev); | ||
111 | return NULL; | 103 | return NULL; |
112 | } | ||
113 | 104 | ||
114 | if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) | 105 | if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) |
115 | nalloc = conf->copies; /* resync */ | 106 | nalloc = conf->copies; /* resync */ |
@@ -597,37 +588,6 @@ rb_out: | |||
597 | return disk; | 588 | return disk; |
598 | } | 589 | } |
599 | 590 | ||
600 | static void unplug_slaves(mddev_t *mddev) | ||
601 | { | ||
602 | conf_t *conf = mddev->private; | ||
603 | int i; | ||
604 | |||
605 | rcu_read_lock(); | ||
606 | for (i=0; i < conf->raid_disks; i++) { | ||
607 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | ||
608 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
609 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
610 | |||
611 | atomic_inc(&rdev->nr_pending); | ||
612 | rcu_read_unlock(); | ||
613 | |||
614 | blk_unplug(r_queue); | ||
615 | |||
616 | rdev_dec_pending(rdev, mddev); | ||
617 | rcu_read_lock(); | ||
618 | } | ||
619 | } | ||
620 | rcu_read_unlock(); | ||
621 | } | ||
622 | |||
623 | static void raid10_unplug(struct request_queue *q) | ||
624 | { | ||
625 | mddev_t *mddev = q->queuedata; | ||
626 | |||
627 | unplug_slaves(q->queuedata); | ||
628 | md_wakeup_thread(mddev->thread); | ||
629 | } | ||
630 | |||
631 | static int raid10_congested(void *data, int bits) | 591 | static int raid10_congested(void *data, int bits) |
632 | { | 592 | { |
633 | mddev_t *mddev = data; | 593 | mddev_t *mddev = data; |
@@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits) | |||
649 | return ret; | 609 | return ret; |
650 | } | 610 | } |
651 | 611 | ||
652 | static int flush_pending_writes(conf_t *conf) | 612 | static void flush_pending_writes(conf_t *conf) |
653 | { | 613 | { |
654 | /* Any writes that have been queued but are awaiting | 614 | /* Any writes that have been queued but are awaiting |
655 | * bitmap updates get flushed here. | 615 | * bitmap updates get flushed here. |
656 | * We return 1 if any requests were actually submitted. | ||
657 | */ | 616 | */ |
658 | int rv = 0; | ||
659 | |||
660 | spin_lock_irq(&conf->device_lock); | 617 | spin_lock_irq(&conf->device_lock); |
661 | 618 | ||
662 | if (conf->pending_bio_list.head) { | 619 | if (conf->pending_bio_list.head) { |
663 | struct bio *bio; | 620 | struct bio *bio; |
664 | bio = bio_list_get(&conf->pending_bio_list); | 621 | bio = bio_list_get(&conf->pending_bio_list); |
665 | blk_remove_plug(conf->mddev->queue); | ||
666 | spin_unlock_irq(&conf->device_lock); | 622 | spin_unlock_irq(&conf->device_lock); |
667 | /* flush any pending bitmap writes to disk | 623 | /* flush any pending bitmap writes to disk |
668 | * before proceeding w/ I/O */ | 624 | * before proceeding w/ I/O */ |
@@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf) | |||
674 | generic_make_request(bio); | 630 | generic_make_request(bio); |
675 | bio = next; | 631 | bio = next; |
676 | } | 632 | } |
677 | rv = 1; | ||
678 | } else | 633 | } else |
679 | spin_unlock_irq(&conf->device_lock); | 634 | spin_unlock_irq(&conf->device_lock); |
680 | return rv; | ||
681 | } | 635 | } |
636 | |||
637 | static void md_kick_device(mddev_t *mddev) | ||
638 | { | ||
639 | blk_flush_plug(current); | ||
640 | md_wakeup_thread(mddev->thread); | ||
641 | } | ||
642 | |||
682 | /* Barriers.... | 643 | /* Barriers.... |
683 | * Sometimes we need to suspend IO while we do something else, | 644 | * Sometimes we need to suspend IO while we do something else, |
684 | * either some resync/recovery, or reconfigure the array. | 645 | * either some resync/recovery, or reconfigure the array. |
@@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force) | |||
708 | 669 | ||
709 | /* Wait until no block IO is waiting (unless 'force') */ | 670 | /* Wait until no block IO is waiting (unless 'force') */ |
710 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, | 671 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, |
711 | conf->resync_lock, | 672 | conf->resync_lock, md_kick_device(conf->mddev)); |
712 | raid10_unplug(conf->mddev->queue)); | ||
713 | 673 | ||
714 | /* block any new IO from starting */ | 674 | /* block any new IO from starting */ |
715 | conf->barrier++; | 675 | conf->barrier++; |
@@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force) | |||
717 | /* No wait for all pending IO to complete */ | 677 | /* No wait for all pending IO to complete */ |
718 | wait_event_lock_irq(conf->wait_barrier, | 678 | wait_event_lock_irq(conf->wait_barrier, |
719 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 679 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
720 | conf->resync_lock, | 680 | conf->resync_lock, md_kick_device(conf->mddev)); |
721 | raid10_unplug(conf->mddev->queue)); | ||
722 | 681 | ||
723 | spin_unlock_irq(&conf->resync_lock); | 682 | spin_unlock_irq(&conf->resync_lock); |
724 | } | 683 | } |
@@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf) | |||
739 | conf->nr_waiting++; | 698 | conf->nr_waiting++; |
740 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 699 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
741 | conf->resync_lock, | 700 | conf->resync_lock, |
742 | raid10_unplug(conf->mddev->queue)); | 701 | md_kick_device(conf->mddev)); |
743 | conf->nr_waiting--; | 702 | conf->nr_waiting--; |
744 | } | 703 | } |
745 | conf->nr_pending++; | 704 | conf->nr_pending++; |
@@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf) | |||
776 | conf->nr_pending == conf->nr_queued+1, | 735 | conf->nr_pending == conf->nr_queued+1, |
777 | conf->resync_lock, | 736 | conf->resync_lock, |
778 | ({ flush_pending_writes(conf); | 737 | ({ flush_pending_writes(conf); |
779 | raid10_unplug(conf->mddev->queue); })); | 738 | md_kick_device(conf->mddev); })); |
780 | spin_unlock_irq(&conf->resync_lock); | 739 | spin_unlock_irq(&conf->resync_lock); |
781 | } | 740 | } |
782 | 741 | ||
@@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
971 | atomic_inc(&r10_bio->remaining); | 930 | atomic_inc(&r10_bio->remaining); |
972 | spin_lock_irqsave(&conf->device_lock, flags); | 931 | spin_lock_irqsave(&conf->device_lock, flags); |
973 | bio_list_add(&conf->pending_bio_list, mbio); | 932 | bio_list_add(&conf->pending_bio_list, mbio); |
974 | blk_plug_device(mddev->queue); | ||
975 | spin_unlock_irqrestore(&conf->device_lock, flags); | 933 | spin_unlock_irqrestore(&conf->device_lock, flags); |
976 | } | 934 | } |
977 | 935 | ||
@@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
988 | /* In case raid10d snuck in to freeze_array */ | 946 | /* In case raid10d snuck in to freeze_array */ |
989 | wake_up(&conf->wait_barrier); | 947 | wake_up(&conf->wait_barrier); |
990 | 948 | ||
991 | if (do_sync) | 949 | if (do_sync || !mddev->bitmap) |
992 | md_wakeup_thread(mddev->thread); | 950 | md_wakeup_thread(mddev->thread); |
993 | 951 | ||
994 | return 0; | 952 | return 0; |
@@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev) | |||
1681 | unsigned long flags; | 1639 | unsigned long flags; |
1682 | conf_t *conf = mddev->private; | 1640 | conf_t *conf = mddev->private; |
1683 | struct list_head *head = &conf->retry_list; | 1641 | struct list_head *head = &conf->retry_list; |
1684 | int unplug=0; | ||
1685 | mdk_rdev_t *rdev; | 1642 | mdk_rdev_t *rdev; |
1686 | 1643 | ||
1687 | md_check_recovery(mddev); | 1644 | md_check_recovery(mddev); |
@@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev) | |||
1689 | for (;;) { | 1646 | for (;;) { |
1690 | char b[BDEVNAME_SIZE]; | 1647 | char b[BDEVNAME_SIZE]; |
1691 | 1648 | ||
1692 | unplug += flush_pending_writes(conf); | 1649 | flush_pending_writes(conf); |
1693 | 1650 | ||
1694 | spin_lock_irqsave(&conf->device_lock, flags); | 1651 | spin_lock_irqsave(&conf->device_lock, flags); |
1695 | if (list_empty(head)) { | 1652 | if (list_empty(head)) { |
@@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev) | |||
1703 | 1660 | ||
1704 | mddev = r10_bio->mddev; | 1661 | mddev = r10_bio->mddev; |
1705 | conf = mddev->private; | 1662 | conf = mddev->private; |
1706 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) { | 1663 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) |
1707 | sync_request_write(mddev, r10_bio); | 1664 | sync_request_write(mddev, r10_bio); |
1708 | unplug = 1; | 1665 | else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) |
1709 | } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) { | ||
1710 | recovery_request_write(mddev, r10_bio); | 1666 | recovery_request_write(mddev, r10_bio); |
1711 | unplug = 1; | 1667 | else { |
1712 | } else { | ||
1713 | int mirror; | 1668 | int mirror; |
1714 | /* we got a read error. Maybe the drive is bad. Maybe just | 1669 | /* we got a read error. Maybe the drive is bad. Maybe just |
1715 | * the block and we can fix it. | 1670 | * the block and we can fix it. |
@@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev) | |||
1756 | bio->bi_rw = READ | do_sync; | 1711 | bio->bi_rw = READ | do_sync; |
1757 | bio->bi_private = r10_bio; | 1712 | bio->bi_private = r10_bio; |
1758 | bio->bi_end_io = raid10_end_read_request; | 1713 | bio->bi_end_io = raid10_end_read_request; |
1759 | unplug = 1; | ||
1760 | generic_make_request(bio); | 1714 | generic_make_request(bio); |
1761 | } | 1715 | } |
1762 | } | 1716 | } |
1763 | cond_resched(); | 1717 | cond_resched(); |
1764 | } | 1718 | } |
1765 | if (unplug) | ||
1766 | unplug_slaves(mddev); | ||
1767 | } | 1719 | } |
1768 | 1720 | ||
1769 | 1721 | ||
@@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev) | |||
2376 | md_set_array_sectors(mddev, size); | 2328 | md_set_array_sectors(mddev, size); |
2377 | mddev->resync_max_sectors = size; | 2329 | mddev->resync_max_sectors = size; |
2378 | 2330 | ||
2379 | mddev->queue->unplug_fn = raid10_unplug; | ||
2380 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; | 2331 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; |
2381 | mddev->queue->backing_dev_info.congested_data = mddev; | 2332 | mddev->queue->backing_dev_info.congested_data = mddev; |
2382 | 2333 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 702812824195..e867ee42b152 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void unplug_slaves(mddev_t *mddev); | ||
437 | |||
438 | static struct stripe_head * | 436 | static struct stripe_head * |
439 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 437 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
440 | int previous, int noblock, int noquiesce) | 438 | int previous, int noblock, int noquiesce) |
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
463 | < (conf->max_nr_stripes *3/4) | 461 | < (conf->max_nr_stripes *3/4) |
464 | || !conf->inactive_blocked), | 462 | || !conf->inactive_blocked), |
465 | conf->device_lock, | 463 | conf->device_lock, |
466 | md_raid5_unplug_device(conf) | 464 | md_raid5_kick_device(conf)); |
467 | ); | ||
468 | conf->inactive_blocked = 0; | 465 | conf->inactive_blocked = 0; |
469 | } else | 466 | } else |
470 | init_stripe(sh, sector, previous); | 467 | init_stripe(sh, sector, previous); |
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1473 | wait_event_lock_irq(conf->wait_for_stripe, | 1470 | wait_event_lock_irq(conf->wait_for_stripe, |
1474 | !list_empty(&conf->inactive_list), | 1471 | !list_empty(&conf->inactive_list), |
1475 | conf->device_lock, | 1472 | conf->device_lock, |
1476 | unplug_slaves(conf->mddev) | 1473 | blk_flush_plug(current)); |
1477 | ); | ||
1478 | osh = get_free_stripe(conf); | 1474 | osh = get_free_stripe(conf); |
1479 | spin_unlock_irq(&conf->device_lock); | 1475 | spin_unlock_irq(&conf->device_lock); |
1480 | atomic_set(&nsh->count, 1); | 1476 | atomic_set(&nsh->count, 1); |
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
3645 | } | 3641 | } |
3646 | } | 3642 | } |
3647 | 3643 | ||
3648 | static void unplug_slaves(mddev_t *mddev) | 3644 | void md_raid5_kick_device(raid5_conf_t *conf) |
3649 | { | 3645 | { |
3650 | raid5_conf_t *conf = mddev->private; | 3646 | blk_flush_plug(current); |
3651 | int i; | 3647 | raid5_activate_delayed(conf); |
3652 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | ||
3653 | |||
3654 | rcu_read_lock(); | ||
3655 | for (i = 0; i < devs; i++) { | ||
3656 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | ||
3657 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
3658 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
3659 | |||
3660 | atomic_inc(&rdev->nr_pending); | ||
3661 | rcu_read_unlock(); | ||
3662 | |||
3663 | blk_unplug(r_queue); | ||
3664 | |||
3665 | rdev_dec_pending(rdev, mddev); | ||
3666 | rcu_read_lock(); | ||
3667 | } | ||
3668 | } | ||
3669 | rcu_read_unlock(); | ||
3670 | } | ||
3671 | |||
3672 | void md_raid5_unplug_device(raid5_conf_t *conf) | ||
3673 | { | ||
3674 | unsigned long flags; | ||
3675 | |||
3676 | spin_lock_irqsave(&conf->device_lock, flags); | ||
3677 | |||
3678 | if (plugger_remove_plug(&conf->plug)) { | ||
3679 | conf->seq_flush++; | ||
3680 | raid5_activate_delayed(conf); | ||
3681 | } | ||
3682 | md_wakeup_thread(conf->mddev->thread); | 3648 | md_wakeup_thread(conf->mddev->thread); |
3683 | |||
3684 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
3685 | |||
3686 | unplug_slaves(conf->mddev); | ||
3687 | } | 3649 | } |
3688 | EXPORT_SYMBOL_GPL(md_raid5_unplug_device); | 3650 | EXPORT_SYMBOL_GPL(md_raid5_kick_device); |
3689 | 3651 | ||
3690 | static void raid5_unplug(struct plug_handle *plug) | 3652 | static void raid5_unplug(struct plug_handle *plug) |
3691 | { | 3653 | { |
3692 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); | 3654 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); |
3693 | md_raid5_unplug_device(conf); | ||
3694 | } | ||
3695 | 3655 | ||
3696 | static void raid5_unplug_queue(struct request_queue *q) | 3656 | md_raid5_kick_device(conf); |
3697 | { | ||
3698 | mddev_t *mddev = q->queuedata; | ||
3699 | md_raid5_unplug_device(mddev->private); | ||
3700 | } | 3657 | } |
3701 | 3658 | ||
3702 | int md_raid5_congested(mddev_t *mddev, int bits) | 3659 | int md_raid5_congested(mddev_t *mddev, int bits) |
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
4100 | * add failed due to overlap. Flush everything | 4057 | * add failed due to overlap. Flush everything |
4101 | * and wait a while | 4058 | * and wait a while |
4102 | */ | 4059 | */ |
4103 | md_raid5_unplug_device(conf); | 4060 | md_raid5_kick_device(conf); |
4104 | release_stripe(sh); | 4061 | release_stripe(sh); |
4105 | schedule(); | 4062 | schedule(); |
4106 | goto retry; | 4063 | goto retry; |
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4365 | 4322 | ||
4366 | if (sector_nr >= max_sector) { | 4323 | if (sector_nr >= max_sector) { |
4367 | /* just being told to finish up .. nothing much to do */ | 4324 | /* just being told to finish up .. nothing much to do */ |
4368 | unplug_slaves(mddev); | ||
4369 | 4325 | ||
4370 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { | 4326 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
4371 | end_reshape(conf); | 4327 | end_reshape(conf); |
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev) | |||
4569 | spin_unlock_irq(&conf->device_lock); | 4525 | spin_unlock_irq(&conf->device_lock); |
4570 | 4526 | ||
4571 | async_tx_issue_pending_all(); | 4527 | async_tx_issue_pending_all(); |
4572 | unplug_slaves(mddev); | ||
4573 | 4528 | ||
4574 | pr_debug("--- raid5d inactive\n"); | 4529 | pr_debug("--- raid5d inactive\n"); |
4575 | } | 4530 | } |
@@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev) | |||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5160 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5161 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->queue_lock = &conf->device_lock; | 5162 | mddev->queue->queue_lock = &conf->device_lock; |
5208 | mddev->queue->unplug_fn = raid5_unplug_queue; | ||
5209 | 5163 | ||
5210 | chunk_size = mddev->chunk_sectors << 9; | 5164 | chunk_size = mddev->chunk_sectors << 9; |
5211 | blk_queue_io_min(mddev->queue, chunk_size); | 5165 | blk_queue_io_min(mddev->queue, chunk_size); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2ace0582b409..8d563a4f022a 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout) | |||
503 | } | 503 | } |
504 | 504 | ||
505 | extern int md_raid5_congested(mddev_t *mddev, int bits); | 505 | extern int md_raid5_congested(mddev_t *mddev, int bits); |
506 | extern void md_raid5_unplug_device(raid5_conf_t *conf); | 506 | extern void md_raid5_kick_device(raid5_conf_t *conf); |
507 | extern int raid5_set_cache_size(mddev_t *mddev, int size); | 507 | extern int raid5_set_cache_size(mddev_t *mddev, int size); |
508 | #endif | 508 | #endif |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index ae7cad185898..b29eb4eaa86e 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -895,11 +895,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
895 | { | 895 | { |
896 | struct request *req; | 896 | struct request *req; |
897 | 897 | ||
898 | while (!blk_queue_plugged(q)) { | 898 | while ((req = blk_peek_request(q)) != NULL) { |
899 | req = blk_peek_request(q); | ||
900 | if (!req) | ||
901 | break; | ||
902 | |||
903 | if (req->cmd_type == REQ_TYPE_FS) { | 899 | if (req->cmd_type == REQ_TYPE_FS) { |
904 | struct i2o_block_delayed_request *dreq; | 900 | struct i2o_block_delayed_request *dreq; |
905 | struct i2o_block_request *ireq = req->special; | 901 | struct i2o_block_request *ireq = req->special; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 4e42d030e097..2ae727568df9 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d) | |||
55 | 55 | ||
56 | spin_lock_irq(q->queue_lock); | 56 | spin_lock_irq(q->queue_lock); |
57 | set_current_state(TASK_INTERRUPTIBLE); | 57 | set_current_state(TASK_INTERRUPTIBLE); |
58 | if (!blk_queue_plugged(q)) | 58 | req = blk_fetch_request(q); |
59 | req = blk_fetch_request(q); | ||
60 | mq->req = req; | 59 | mq->req = req; |
61 | spin_unlock_irq(q->queue_lock); | 60 | spin_unlock_irq(q->queue_lock); |
62 | 61 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 794bfd962266..4d2df2f76ea0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
1917 | return; | 1917 | return; |
1918 | } | 1918 | } |
1919 | /* Now we try to fetch requests from the request queue */ | 1919 | /* Now we try to fetch requests from the request queue */ |
1920 | while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { | 1920 | while ((req = blk_peek_request(queue))) { |
1921 | if (basedev->features & DASD_FEATURE_READONLY && | 1921 | if (basedev->features & DASD_FEATURE_READONLY && |
1922 | rq_data_dir(req) == WRITE) { | 1922 | rq_data_dir(req) == WRITE) { |
1923 | DBF_DEV_EVENT(DBF_ERR, basedev, | 1923 | DBF_DEV_EVENT(DBF_ERR, basedev, |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 55d2d0f4eabc..f061b2527b44 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) { | |||
161 | 161 | ||
162 | spin_lock_irq(&device->blk_data.request_queue_lock); | 162 | spin_lock_irq(&device->blk_data.request_queue_lock); |
163 | while ( | 163 | while ( |
164 | !blk_queue_plugged(queue) && | ||
165 | blk_peek_request(queue) && | 164 | blk_peek_request(queue) && |
166 | nr_queued < TAPEBLOCK_MIN_REQUEUE | 165 | nr_queued < TAPEBLOCK_MIN_REQUEUE |
167 | ) { | 166 | ) { |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 998c01be3234..2cefabd5bdb5 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, | |||
3913 | if (!get_device(dev)) | 3913 | if (!get_device(dev)) |
3914 | return; | 3914 | return; |
3915 | 3915 | ||
3916 | while (!blk_queue_plugged(q)) { | 3916 | while (1) { |
3917 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && | 3917 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && |
3918 | !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) | 3918 | !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) |
3919 | break; | 3919 | break; |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 927e99cb7225..c6fcf76cade5 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | |||
173 | int ret; | 173 | int ret; |
174 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); | 174 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); |
175 | 175 | ||
176 | while (!blk_queue_plugged(q)) { | 176 | while ((req = blk_fetch_request(q)) != NULL) { |
177 | req = blk_fetch_request(q); | ||
178 | if (!req) | ||
179 | break; | ||
180 | |||
181 | spin_unlock_irq(q->queue_lock); | 177 | spin_unlock_irq(q->queue_lock); |
182 | 178 | ||
183 | handler = to_sas_internal(shost->transportt)->f->smp_handler; | 179 | handler = to_sas_internal(shost->transportt)->f->smp_handler; |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 67f0c09983c8..c1b539d7b0d3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task) | |||
392 | { | 392 | { |
393 | struct se_device *dev = task->task_se_cmd->se_dev; | 393 | struct se_device *dev = task->task_se_cmd->se_dev; |
394 | struct iblock_req *req = IBLOCK_REQ(task); | 394 | struct iblock_req *req = IBLOCK_REQ(task); |
395 | struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev; | ||
396 | struct request_queue *q = bdev_get_queue(ibd->ibd_bd); | ||
397 | struct bio *bio = req->ib_bio, *nbio = NULL; | 395 | struct bio *bio = req->ib_bio, *nbio = NULL; |
396 | struct blk_plug plug; | ||
398 | int rw; | 397 | int rw; |
399 | 398 | ||
400 | if (task->task_data_direction == DMA_TO_DEVICE) { | 399 | if (task->task_data_direction == DMA_TO_DEVICE) { |
@@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task) | |||
412 | rw = READ; | 411 | rw = READ; |
413 | } | 412 | } |
414 | 413 | ||
414 | blk_start_plug(&plug); | ||
415 | while (bio) { | 415 | while (bio) { |
416 | nbio = bio->bi_next; | 416 | nbio = bio->bi_next; |
417 | bio->bi_next = NULL; | 417 | bio->bi_next = NULL; |
@@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task) | |||
421 | submit_bio(rw, bio); | 421 | submit_bio(rw, bio); |
422 | bio = nbio; | 422 | bio = nbio; |
423 | } | 423 | } |
424 | blk_finish_plug(&plug); | ||
424 | 425 | ||
425 | if (q->unplug_fn) | ||
426 | q->unplug_fn(q); | ||
427 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 426 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
428 | } | 427 | } |
429 | 428 | ||
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 65794b8fe79e..1cc84b276131 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c | |||
@@ -73,7 +73,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) | |||
73 | static const struct address_space_operations adfs_aops = { | 73 | static const struct address_space_operations adfs_aops = { |
74 | .readpage = adfs_readpage, | 74 | .readpage = adfs_readpage, |
75 | .writepage = adfs_writepage, | 75 | .writepage = adfs_writepage, |
76 | .sync_page = block_sync_page, | ||
77 | .write_begin = adfs_write_begin, | 76 | .write_begin = adfs_write_begin, |
78 | .write_end = generic_write_end, | 77 | .write_end = generic_write_end, |
79 | .bmap = _adfs_bmap | 78 | .bmap = _adfs_bmap |
diff --git a/fs/affs/file.c b/fs/affs/file.c index 0a90dcd46de2..acf321b70fcd 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) | |||
429 | const struct address_space_operations affs_aops = { | 429 | const struct address_space_operations affs_aops = { |
430 | .readpage = affs_readpage, | 430 | .readpage = affs_readpage, |
431 | .writepage = affs_writepage, | 431 | .writepage = affs_writepage, |
432 | .sync_page = block_sync_page, | ||
433 | .write_begin = affs_write_begin, | 432 | .write_begin = affs_write_begin, |
434 | .write_end = generic_write_end, | 433 | .write_end = generic_write_end, |
435 | .bmap = _affs_bmap | 434 | .bmap = _affs_bmap |
@@ -786,7 +785,6 @@ out: | |||
786 | const struct address_space_operations affs_aops_ofs = { | 785 | const struct address_space_operations affs_aops_ofs = { |
787 | .readpage = affs_readpage_ofs, | 786 | .readpage = affs_readpage_ofs, |
788 | //.writepage = affs_writepage_ofs, | 787 | //.writepage = affs_writepage_ofs, |
789 | //.sync_page = affs_sync_page_ofs, | ||
790 | .write_begin = affs_write_begin_ofs, | 788 | .write_begin = affs_write_begin_ofs, |
791 | .write_end = affs_write_end_ofs | 789 | .write_end = affs_write_end_ofs |
792 | }; | 790 | }; |
@@ -1550,9 +1550,11 @@ static void aio_batch_free(struct hlist_head *batch_hash) | |||
1550 | struct hlist_node *pos, *n; | 1550 | struct hlist_node *pos, *n; |
1551 | int i; | 1551 | int i; |
1552 | 1552 | ||
1553 | /* | ||
1554 | * TODO: kill this | ||
1555 | */ | ||
1553 | for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { | 1556 | for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { |
1554 | hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { | 1557 | hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { |
1555 | blk_run_address_space(abe->mapping); | ||
1556 | iput(abe->mapping->host); | 1558 | iput(abe->mapping->host); |
1557 | hlist_del(&abe->list); | 1559 | hlist_del(&abe->list); |
1558 | mempool_free(abe, abe_pool); | 1560 | mempool_free(abe, abe_pool); |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b1d0c794747b..06457ed8f3e7 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = { | |||
75 | 75 | ||
76 | static const struct address_space_operations befs_aops = { | 76 | static const struct address_space_operations befs_aops = { |
77 | .readpage = befs_readpage, | 77 | .readpage = befs_readpage, |
78 | .sync_page = block_sync_page, | ||
79 | .bmap = befs_bmap, | 78 | .bmap = befs_bmap, |
80 | }; | 79 | }; |
81 | 80 | ||
diff --git a/fs/bfs/file.c b/fs/bfs/file.c index eb67edd0f8ea..f20e8a71062f 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c | |||
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) | |||
186 | const struct address_space_operations bfs_aops = { | 186 | const struct address_space_operations bfs_aops = { |
187 | .readpage = bfs_readpage, | 187 | .readpage = bfs_readpage, |
188 | .writepage = bfs_writepage, | 188 | .writepage = bfs_writepage, |
189 | .sync_page = block_sync_page, | ||
190 | .write_begin = bfs_write_begin, | 189 | .write_begin = bfs_write_begin, |
191 | .write_end = generic_write_end, | 190 | .write_end = generic_write_end, |
192 | .bmap = bfs_bmap, | 191 | .bmap = bfs_bmap, |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 4fb8a3431531..fffc2c672396 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1520,7 +1520,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait) | |||
1520 | static const struct address_space_operations def_blk_aops = { | 1520 | static const struct address_space_operations def_blk_aops = { |
1521 | .readpage = blkdev_readpage, | 1521 | .readpage = blkdev_readpage, |
1522 | .writepage = blkdev_writepage, | 1522 | .writepage = blkdev_writepage, |
1523 | .sync_page = block_sync_page, | ||
1524 | .write_begin = blkdev_write_begin, | 1523 | .write_begin = blkdev_write_begin, |
1525 | .write_end = blkdev_write_end, | 1524 | .write_end = blkdev_write_end, |
1526 | .writepages = generic_writepages, | 1525 | .writepages = generic_writepages, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1aa8d607bc7..ada1f6bd0a57 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = { | |||
847 | .writepages = btree_writepages, | 847 | .writepages = btree_writepages, |
848 | .releasepage = btree_releasepage, | 848 | .releasepage = btree_releasepage, |
849 | .invalidatepage = btree_invalidatepage, | 849 | .invalidatepage = btree_invalidatepage, |
850 | .sync_page = block_sync_page, | ||
851 | #ifdef CONFIG_MIGRATION | 850 | #ifdef CONFIG_MIGRATION |
852 | .migratepage = btree_migratepage, | 851 | .migratepage = btree_migratepage, |
853 | #endif | 852 | #endif |
@@ -1331,82 +1330,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
1331 | } | 1330 | } |
1332 | 1331 | ||
1333 | /* | 1332 | /* |
1334 | * this unplugs every device on the box, and it is only used when page | ||
1335 | * is null | ||
1336 | */ | ||
1337 | static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1338 | { | ||
1339 | struct btrfs_device *device; | ||
1340 | struct btrfs_fs_info *info; | ||
1341 | |||
1342 | info = (struct btrfs_fs_info *)bdi->unplug_io_data; | ||
1343 | list_for_each_entry(device, &info->fs_devices->devices, dev_list) { | ||
1344 | if (!device->bdev) | ||
1345 | continue; | ||
1346 | |||
1347 | bdi = blk_get_backing_dev_info(device->bdev); | ||
1348 | if (bdi->unplug_io_fn) | ||
1349 | bdi->unplug_io_fn(bdi, page); | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1354 | { | ||
1355 | struct inode *inode; | ||
1356 | struct extent_map_tree *em_tree; | ||
1357 | struct extent_map *em; | ||
1358 | struct address_space *mapping; | ||
1359 | u64 offset; | ||
1360 | |||
1361 | /* the generic O_DIRECT read code does this */ | ||
1362 | if (1 || !page) { | ||
1363 | __unplug_io_fn(bdi, page); | ||
1364 | return; | ||
1365 | } | ||
1366 | |||
1367 | /* | ||
1368 | * page->mapping may change at any time. Get a consistent copy | ||
1369 | * and use that for everything below | ||
1370 | */ | ||
1371 | smp_mb(); | ||
1372 | mapping = page->mapping; | ||
1373 | if (!mapping) | ||
1374 | return; | ||
1375 | |||
1376 | inode = mapping->host; | ||
1377 | |||
1378 | /* | ||
1379 | * don't do the expensive searching for a small number of | ||
1380 | * devices | ||
1381 | */ | ||
1382 | if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { | ||
1383 | __unplug_io_fn(bdi, page); | ||
1384 | return; | ||
1385 | } | ||
1386 | |||
1387 | offset = page_offset(page); | ||
1388 | |||
1389 | em_tree = &BTRFS_I(inode)->extent_tree; | ||
1390 | read_lock(&em_tree->lock); | ||
1391 | em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); | ||
1392 | read_unlock(&em_tree->lock); | ||
1393 | if (!em) { | ||
1394 | __unplug_io_fn(bdi, page); | ||
1395 | return; | ||
1396 | } | ||
1397 | |||
1398 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | ||
1399 | free_extent_map(em); | ||
1400 | __unplug_io_fn(bdi, page); | ||
1401 | return; | ||
1402 | } | ||
1403 | offset = offset - em->start; | ||
1404 | btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, | ||
1405 | em->block_start + offset, page); | ||
1406 | free_extent_map(em); | ||
1407 | } | ||
1408 | |||
1409 | /* | ||
1410 | * If this fails, caller must call bdi_destroy() to get rid of the | 1333 | * If this fails, caller must call bdi_destroy() to get rid of the |
1411 | * bdi again. | 1334 | * bdi again. |
1412 | */ | 1335 | */ |
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) | |||
1420 | return err; | 1343 | return err; |
1421 | 1344 | ||
1422 | bdi->ra_pages = default_backing_dev_info.ra_pages; | 1345 | bdi->ra_pages = default_backing_dev_info.ra_pages; |
1423 | bdi->unplug_io_fn = btrfs_unplug_io_fn; | ||
1424 | bdi->unplug_io_data = info; | ||
1425 | bdi->congested_fn = btrfs_congested_fn; | 1346 | bdi->congested_fn = btrfs_congested_fn; |
1426 | bdi->congested_data = info; | 1347 | bdi->congested_data = info; |
1427 | return 0; | 1348 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fb9bd7832b6d..462e08e724b0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -7218,7 +7218,6 @@ static const struct address_space_operations btrfs_aops = { | |||
7218 | .writepage = btrfs_writepage, | 7218 | .writepage = btrfs_writepage, |
7219 | .writepages = btrfs_writepages, | 7219 | .writepages = btrfs_writepages, |
7220 | .readpages = btrfs_readpages, | 7220 | .readpages = btrfs_readpages, |
7221 | .sync_page = block_sync_page, | ||
7222 | .direct_IO = btrfs_direct_IO, | 7221 | .direct_IO = btrfs_direct_IO, |
7223 | .invalidatepage = btrfs_invalidatepage, | 7222 | .invalidatepage = btrfs_invalidatepage, |
7224 | .releasepage = btrfs_releasepage, | 7223 | .releasepage = btrfs_releasepage, |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..6e0e82a1b188 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
162 | struct bio *cur; | 162 | struct bio *cur; |
163 | int again = 0; | 163 | int again = 0; |
164 | unsigned long num_run; | 164 | unsigned long num_run; |
165 | unsigned long num_sync_run; | ||
166 | unsigned long batch_run = 0; | 165 | unsigned long batch_run = 0; |
167 | unsigned long limit; | 166 | unsigned long limit; |
168 | unsigned long last_waited = 0; | 167 | unsigned long last_waited = 0; |
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) | |||
173 | limit = btrfs_async_submit_limit(fs_info); | 172 | limit = btrfs_async_submit_limit(fs_info); |
174 | limit = limit * 2 / 3; | 173 | limit = limit * 2 / 3; |
175 | 174 | ||
176 | /* we want to make sure that every time we switch from the sync | ||
177 | * list to the normal list, we unplug | ||
178 | */ | ||
179 | num_sync_run = 0; | ||
180 | |||
181 | loop: | 175 | loop: |
182 | spin_lock(&device->io_lock); | 176 | spin_lock(&device->io_lock); |
183 | 177 | ||
@@ -223,15 +217,6 @@ loop_lock: | |||
223 | 217 | ||
224 | spin_unlock(&device->io_lock); | 218 | spin_unlock(&device->io_lock); |
225 | 219 | ||
226 | /* | ||
227 | * if we're doing the regular priority list, make sure we unplug | ||
228 | * for any high prio bios we've sent down | ||
229 | */ | ||
230 | if (pending_bios == &device->pending_bios && num_sync_run > 0) { | ||
231 | num_sync_run = 0; | ||
232 | blk_run_backing_dev(bdi, NULL); | ||
233 | } | ||
234 | |||
235 | while (pending) { | 220 | while (pending) { |
236 | 221 | ||
237 | rmb(); | 222 | rmb(); |
@@ -259,19 +244,11 @@ loop_lock: | |||
259 | 244 | ||
260 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 245 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
261 | 246 | ||
262 | if (cur->bi_rw & REQ_SYNC) | ||
263 | num_sync_run++; | ||
264 | |||
265 | submit_bio(cur->bi_rw, cur); | 247 | submit_bio(cur->bi_rw, cur); |
266 | num_run++; | 248 | num_run++; |
267 | batch_run++; | 249 | batch_run++; |
268 | if (need_resched()) { | 250 | if (need_resched()) |
269 | if (num_sync_run) { | ||
270 | blk_run_backing_dev(bdi, NULL); | ||
271 | num_sync_run = 0; | ||
272 | } | ||
273 | cond_resched(); | 251 | cond_resched(); |
274 | } | ||
275 | 252 | ||
276 | /* | 253 | /* |
277 | * we made progress, there is more work to do and the bdi | 254 | * we made progress, there is more work to do and the bdi |
@@ -304,13 +281,8 @@ loop_lock: | |||
304 | * against it before looping | 281 | * against it before looping |
305 | */ | 282 | */ |
306 | last_waited = ioc->last_waited; | 283 | last_waited = ioc->last_waited; |
307 | if (need_resched()) { | 284 | if (need_resched()) |
308 | if (num_sync_run) { | ||
309 | blk_run_backing_dev(bdi, NULL); | ||
310 | num_sync_run = 0; | ||
311 | } | ||
312 | cond_resched(); | 285 | cond_resched(); |
313 | } | ||
314 | continue; | 286 | continue; |
315 | } | 287 | } |
316 | spin_lock(&device->io_lock); | 288 | spin_lock(&device->io_lock); |
@@ -323,22 +295,6 @@ loop_lock: | |||
323 | } | 295 | } |
324 | } | 296 | } |
325 | 297 | ||
326 | if (num_sync_run) { | ||
327 | num_sync_run = 0; | ||
328 | blk_run_backing_dev(bdi, NULL); | ||
329 | } | ||
330 | /* | ||
331 | * IO has already been through a long path to get here. Checksumming, | ||
332 | * async helper threads, perhaps compression. We've done a pretty | ||
333 | * good job of collecting a batch of IO and should just unplug | ||
334 | * the device right away. | ||
335 | * | ||
336 | * This will help anyone who is waiting on the IO, they might have | ||
337 | * already unplugged, but managed to do so before the bio they | ||
338 | * cared about found its way down here. | ||
339 | */ | ||
340 | blk_run_backing_dev(bdi, NULL); | ||
341 | |||
342 | cond_resched(); | 298 | cond_resched(); |
343 | if (again) | 299 | if (again) |
344 | goto loop; | 300 | goto loop; |
@@ -2948,7 +2904,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num, | |||
2948 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | 2904 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
2949 | u64 logical, u64 *length, | 2905 | u64 logical, u64 *length, |
2950 | struct btrfs_multi_bio **multi_ret, | 2906 | struct btrfs_multi_bio **multi_ret, |
2951 | int mirror_num, struct page *unplug_page) | 2907 | int mirror_num) |
2952 | { | 2908 | { |
2953 | struct extent_map *em; | 2909 | struct extent_map *em; |
2954 | struct map_lookup *map; | 2910 | struct map_lookup *map; |
@@ -2980,11 +2936,6 @@ again: | |||
2980 | em = lookup_extent_mapping(em_tree, logical, *length); | 2936 | em = lookup_extent_mapping(em_tree, logical, *length); |
2981 | read_unlock(&em_tree->lock); | 2937 | read_unlock(&em_tree->lock); |
2982 | 2938 | ||
2983 | if (!em && unplug_page) { | ||
2984 | kfree(multi); | ||
2985 | return 0; | ||
2986 | } | ||
2987 | |||
2988 | if (!em) { | 2939 | if (!em) { |
2989 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", | 2940 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", |
2990 | (unsigned long long)logical, | 2941 | (unsigned long long)logical, |
@@ -3040,13 +2991,13 @@ again: | |||
3040 | *length = em->len - offset; | 2991 | *length = em->len - offset; |
3041 | } | 2992 | } |
3042 | 2993 | ||
3043 | if (!multi_ret && !unplug_page) | 2994 | if (!multi_ret) |
3044 | goto out; | 2995 | goto out; |
3045 | 2996 | ||
3046 | num_stripes = 1; | 2997 | num_stripes = 1; |
3047 | stripe_index = 0; | 2998 | stripe_index = 0; |
3048 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 2999 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
3049 | if (unplug_page || (rw & REQ_WRITE)) | 3000 | if (rw & REQ_WRITE) |
3050 | num_stripes = map->num_stripes; | 3001 | num_stripes = map->num_stripes; |
3051 | else if (mirror_num) | 3002 | else if (mirror_num) |
3052 | stripe_index = mirror_num - 1; | 3003 | stripe_index = mirror_num - 1; |
@@ -3068,7 +3019,7 @@ again: | |||
3068 | stripe_index = do_div(stripe_nr, factor); | 3019 | stripe_index = do_div(stripe_nr, factor); |
3069 | stripe_index *= map->sub_stripes; | 3020 | stripe_index *= map->sub_stripes; |
3070 | 3021 | ||
3071 | if (unplug_page || (rw & REQ_WRITE)) | 3022 | if (rw & REQ_WRITE) |
3072 | num_stripes = map->sub_stripes; | 3023 | num_stripes = map->sub_stripes; |
3073 | else if (mirror_num) | 3024 | else if (mirror_num) |
3074 | stripe_index += mirror_num - 1; | 3025 | stripe_index += mirror_num - 1; |
@@ -3088,22 +3039,10 @@ again: | |||
3088 | BUG_ON(stripe_index >= map->num_stripes); | 3039 | BUG_ON(stripe_index >= map->num_stripes); |
3089 | 3040 | ||
3090 | for (i = 0; i < num_stripes; i++) { | 3041 | for (i = 0; i < num_stripes; i++) { |
3091 | if (unplug_page) { | 3042 | multi->stripes[i].physical = |
3092 | struct btrfs_device *device; | 3043 | map->stripes[stripe_index].physical + |
3093 | struct backing_dev_info *bdi; | 3044 | stripe_offset + stripe_nr * map->stripe_len; |
3094 | 3045 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | |
3095 | device = map->stripes[stripe_index].dev; | ||
3096 | if (device->bdev) { | ||
3097 | bdi = blk_get_backing_dev_info(device->bdev); | ||
3098 | if (bdi->unplug_io_fn) | ||
3099 | bdi->unplug_io_fn(bdi, unplug_page); | ||
3100 | } | ||
3101 | } else { | ||
3102 | multi->stripes[i].physical = | ||
3103 | map->stripes[stripe_index].physical + | ||
3104 | stripe_offset + stripe_nr * map->stripe_len; | ||
3105 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | ||
3106 | } | ||
3107 | stripe_index++; | 3046 | stripe_index++; |
3108 | } | 3047 | } |
3109 | if (multi_ret) { | 3048 | if (multi_ret) { |
@@ -3121,7 +3060,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
3121 | struct btrfs_multi_bio **multi_ret, int mirror_num) | 3060 | struct btrfs_multi_bio **multi_ret, int mirror_num) |
3122 | { | 3061 | { |
3123 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, | 3062 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, |
3124 | mirror_num, NULL); | 3063 | mirror_num); |
3125 | } | 3064 | } |
3126 | 3065 | ||
3127 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | 3066 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
@@ -3189,14 +3128,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
3189 | return 0; | 3128 | return 0; |
3190 | } | 3129 | } |
3191 | 3130 | ||
3192 | int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, | ||
3193 | u64 logical, struct page *page) | ||
3194 | { | ||
3195 | u64 length = PAGE_CACHE_SIZE; | ||
3196 | return __btrfs_map_block(map_tree, READ, logical, &length, | ||
3197 | NULL, 0, page); | ||
3198 | } | ||
3199 | |||
3200 | static void end_bio_multi_stripe(struct bio *bio, int err) | 3131 | static void end_bio_multi_stripe(struct bio *bio, int err) |
3201 | { | 3132 | { |
3202 | struct btrfs_multi_bio *multi = bio->bi_private; | 3133 | struct btrfs_multi_bio *multi = bio->bi_private; |
diff --git a/fs/buffer.c b/fs/buffer.c index 2219a76e2caf..f903f2e5b4fe 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) | |||
54 | } | 54 | } |
55 | EXPORT_SYMBOL(init_buffer); | 55 | EXPORT_SYMBOL(init_buffer); |
56 | 56 | ||
57 | static int sync_buffer(void *word) | 57 | static int sleep_on_buffer(void *word) |
58 | { | 58 | { |
59 | struct block_device *bd; | ||
60 | struct buffer_head *bh | ||
61 | = container_of(word, struct buffer_head, b_state); | ||
62 | |||
63 | smp_mb(); | ||
64 | bd = bh->b_bdev; | ||
65 | if (bd) | ||
66 | blk_run_address_space(bd->bd_inode->i_mapping); | ||
67 | io_schedule(); | 59 | io_schedule(); |
68 | return 0; | 60 | return 0; |
69 | } | 61 | } |
70 | 62 | ||
71 | void __lock_buffer(struct buffer_head *bh) | 63 | void __lock_buffer(struct buffer_head *bh) |
72 | { | 64 | { |
73 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, | 65 | wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, |
74 | TASK_UNINTERRUPTIBLE); | 66 | TASK_UNINTERRUPTIBLE); |
75 | } | 67 | } |
76 | EXPORT_SYMBOL(__lock_buffer); | 68 | EXPORT_SYMBOL(__lock_buffer); |
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer); | |||
90 | */ | 82 | */ |
91 | void __wait_on_buffer(struct buffer_head * bh) | 83 | void __wait_on_buffer(struct buffer_head * bh) |
92 | { | 84 | { |
93 | wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); | 85 | wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); |
94 | } | 86 | } |
95 | EXPORT_SYMBOL(__wait_on_buffer); | 87 | EXPORT_SYMBOL(__wait_on_buffer); |
96 | 88 | ||
@@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
749 | { | 741 | { |
750 | struct buffer_head *bh; | 742 | struct buffer_head *bh; |
751 | struct list_head tmp; | 743 | struct list_head tmp; |
752 | struct address_space *mapping, *prev_mapping = NULL; | 744 | struct address_space *mapping; |
753 | int err = 0, err2; | 745 | int err = 0, err2; |
754 | 746 | ||
755 | INIT_LIST_HEAD(&tmp); | 747 | INIT_LIST_HEAD(&tmp); |
@@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
783 | * wait_on_buffer() will do that for us | 775 | * wait_on_buffer() will do that for us |
784 | * through sync_buffer(). | 776 | * through sync_buffer(). |
785 | */ | 777 | */ |
786 | if (prev_mapping && prev_mapping != mapping) | ||
787 | blk_run_address_space(prev_mapping); | ||
788 | prev_mapping = mapping; | ||
789 | |||
790 | brelse(bh); | 778 | brelse(bh); |
791 | spin_lock(lock); | 779 | spin_lock(lock); |
792 | } | 780 | } |
@@ -3138,17 +3126,6 @@ out: | |||
3138 | } | 3126 | } |
3139 | EXPORT_SYMBOL(try_to_free_buffers); | 3127 | EXPORT_SYMBOL(try_to_free_buffers); |
3140 | 3128 | ||
3141 | void block_sync_page(struct page *page) | ||
3142 | { | ||
3143 | struct address_space *mapping; | ||
3144 | |||
3145 | smp_mb(); | ||
3146 | mapping = page_mapping(page); | ||
3147 | if (mapping) | ||
3148 | blk_run_backing_dev(mapping->backing_dev_info, page); | ||
3149 | } | ||
3150 | EXPORT_SYMBOL(block_sync_page); | ||
3151 | |||
3152 | /* | 3129 | /* |
3153 | * There are no bdflush tunables left. But distributions are | 3130 | * There are no bdflush tunables left. But distributions are |
3154 | * still running obsolete flush daemons, so we terminate them here. | 3131 | * still running obsolete flush daemons, so we terminate them here. |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e964b1cd5dd0..c27d236738fc 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync) | |||
1569 | return rc; | 1569 | return rc; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | /* static void cifs_sync_page(struct page *page) | ||
1573 | { | ||
1574 | struct address_space *mapping; | ||
1575 | struct inode *inode; | ||
1576 | unsigned long index = page->index; | ||
1577 | unsigned int rpages = 0; | ||
1578 | int rc = 0; | ||
1579 | |||
1580 | cFYI(1, "sync page %p", page); | ||
1581 | mapping = page->mapping; | ||
1582 | if (!mapping) | ||
1583 | return 0; | ||
1584 | inode = mapping->host; | ||
1585 | if (!inode) | ||
1586 | return; */ | ||
1587 | |||
1588 | /* fill in rpages then | ||
1589 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ | ||
1590 | |||
1591 | /* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index); | ||
1592 | |||
1593 | #if 0 | ||
1594 | if (rc < 0) | ||
1595 | return rc; | ||
1596 | return 0; | ||
1597 | #endif | ||
1598 | } */ | ||
1599 | |||
1600 | /* | 1572 | /* |
1601 | * As file closes, flush all cached write data for this inode checking | 1573 | * As file closes, flush all cached write data for this inode checking |
1602 | * for write behind errors. | 1574 | * for write behind errors. |
@@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = { | |||
2510 | .set_page_dirty = __set_page_dirty_nobuffers, | 2482 | .set_page_dirty = __set_page_dirty_nobuffers, |
2511 | .releasepage = cifs_release_page, | 2483 | .releasepage = cifs_release_page, |
2512 | .invalidatepage = cifs_invalidate_page, | 2484 | .invalidatepage = cifs_invalidate_page, |
2513 | /* .sync_page = cifs_sync_page, */ | ||
2514 | /* .direct_IO = */ | 2485 | /* .direct_IO = */ |
2515 | }; | 2486 | }; |
2516 | 2487 | ||
@@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = { | |||
2528 | .set_page_dirty = __set_page_dirty_nobuffers, | 2499 | .set_page_dirty = __set_page_dirty_nobuffers, |
2529 | .releasepage = cifs_release_page, | 2500 | .releasepage = cifs_release_page, |
2530 | .invalidatepage = cifs_invalidate_page, | 2501 | .invalidatepage = cifs_invalidate_page, |
2531 | /* .sync_page = cifs_sync_page, */ | ||
2532 | /* .direct_IO = */ | 2502 | /* .direct_IO = */ |
2533 | }; | 2503 | }; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index b044705eedd4..df709b3b860a 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1110 | ((rw & READ) || (dio->result == dio->size))) | 1110 | ((rw & READ) || (dio->result == dio->size))) |
1111 | ret = -EIOCBQUEUED; | 1111 | ret = -EIOCBQUEUED; |
1112 | 1112 | ||
1113 | if (ret != -EIOCBQUEUED) { | 1113 | if (ret != -EIOCBQUEUED) |
1114 | /* All IO is now issued, send it on its way */ | ||
1115 | blk_run_address_space(inode->i_mapping); | ||
1116 | dio_await_completion(dio); | 1114 | dio_await_completion(dio); |
1117 | } | ||
1118 | 1115 | ||
1119 | /* | 1116 | /* |
1120 | * Sync will always be dropping the final ref and completing the | 1117 | * Sync will always be dropping the final ref and completing the |
diff --git a/fs/efs/inode.c b/fs/efs/inode.c index a8e7797b9477..9c13412e6c99 100644 --- a/fs/efs/inode.c +++ b/fs/efs/inode.c | |||
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block) | |||
23 | } | 23 | } |
24 | static const struct address_space_operations efs_aops = { | 24 | static const struct address_space_operations efs_aops = { |
25 | .readpage = efs_readpage, | 25 | .readpage = efs_readpage, |
26 | .sync_page = block_sync_page, | ||
27 | .bmap = _efs_bmap | 26 | .bmap = _efs_bmap |
28 | }; | 27 | }; |
29 | 28 | ||
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index a7555238c41a..82b94c8f5d22 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c | |||
@@ -795,7 +795,6 @@ const struct address_space_operations exofs_aops = { | |||
795 | .direct_IO = NULL, /* TODO: Should be trivial to do */ | 795 | .direct_IO = NULL, /* TODO: Should be trivial to do */ |
796 | 796 | ||
797 | /* With these NULL has special meaning or default is not exported */ | 797 | /* With these NULL has special meaning or default is not exported */ |
798 | .sync_page = NULL, | ||
799 | .get_xip_mem = NULL, | 798 | .get_xip_mem = NULL, |
800 | .migratepage = NULL, | 799 | .migratepage = NULL, |
801 | .launder_page = NULL, | 800 | .launder_page = NULL, |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 40ad210a5049..c47f706878b5 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = { | |||
860 | .readpage = ext2_readpage, | 860 | .readpage = ext2_readpage, |
861 | .readpages = ext2_readpages, | 861 | .readpages = ext2_readpages, |
862 | .writepage = ext2_writepage, | 862 | .writepage = ext2_writepage, |
863 | .sync_page = block_sync_page, | ||
864 | .write_begin = ext2_write_begin, | 863 | .write_begin = ext2_write_begin, |
865 | .write_end = ext2_write_end, | 864 | .write_end = ext2_write_end, |
866 | .bmap = ext2_bmap, | 865 | .bmap = ext2_bmap, |
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = { | |||
880 | .readpage = ext2_readpage, | 879 | .readpage = ext2_readpage, |
881 | .readpages = ext2_readpages, | 880 | .readpages = ext2_readpages, |
882 | .writepage = ext2_nobh_writepage, | 881 | .writepage = ext2_nobh_writepage, |
883 | .sync_page = block_sync_page, | ||
884 | .write_begin = ext2_nobh_write_begin, | 882 | .write_begin = ext2_nobh_write_begin, |
885 | .write_end = nobh_write_end, | 883 | .write_end = nobh_write_end, |
886 | .bmap = ext2_bmap, | 884 | .bmap = ext2_bmap, |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index ae94f6d949f5..fe2541d250e4 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = { | |||
1894 | .readpage = ext3_readpage, | 1894 | .readpage = ext3_readpage, |
1895 | .readpages = ext3_readpages, | 1895 | .readpages = ext3_readpages, |
1896 | .writepage = ext3_ordered_writepage, | 1896 | .writepage = ext3_ordered_writepage, |
1897 | .sync_page = block_sync_page, | ||
1898 | .write_begin = ext3_write_begin, | 1897 | .write_begin = ext3_write_begin, |
1899 | .write_end = ext3_ordered_write_end, | 1898 | .write_end = ext3_ordered_write_end, |
1900 | .bmap = ext3_bmap, | 1899 | .bmap = ext3_bmap, |
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = { | |||
1910 | .readpage = ext3_readpage, | 1909 | .readpage = ext3_readpage, |
1911 | .readpages = ext3_readpages, | 1910 | .readpages = ext3_readpages, |
1912 | .writepage = ext3_writeback_writepage, | 1911 | .writepage = ext3_writeback_writepage, |
1913 | .sync_page = block_sync_page, | ||
1914 | .write_begin = ext3_write_begin, | 1912 | .write_begin = ext3_write_begin, |
1915 | .write_end = ext3_writeback_write_end, | 1913 | .write_end = ext3_writeback_write_end, |
1916 | .bmap = ext3_bmap, | 1914 | .bmap = ext3_bmap, |
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = { | |||
1926 | .readpage = ext3_readpage, | 1924 | .readpage = ext3_readpage, |
1927 | .readpages = ext3_readpages, | 1925 | .readpages = ext3_readpages, |
1928 | .writepage = ext3_journalled_writepage, | 1926 | .writepage = ext3_journalled_writepage, |
1929 | .sync_page = block_sync_page, | ||
1930 | .write_begin = ext3_write_begin, | 1927 | .write_begin = ext3_write_begin, |
1931 | .write_end = ext3_journalled_write_end, | 1928 | .write_end = ext3_journalled_write_end, |
1932 | .set_page_dirty = ext3_journalled_set_page_dirty, | 1929 | .set_page_dirty = ext3_journalled_set_page_dirty, |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9f7f9e49914f..9297ad46c465 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = { | |||
3903 | .readpage = ext4_readpage, | 3903 | .readpage = ext4_readpage, |
3904 | .readpages = ext4_readpages, | 3904 | .readpages = ext4_readpages, |
3905 | .writepage = ext4_writepage, | 3905 | .writepage = ext4_writepage, |
3906 | .sync_page = block_sync_page, | ||
3907 | .write_begin = ext4_write_begin, | 3906 | .write_begin = ext4_write_begin, |
3908 | .write_end = ext4_ordered_write_end, | 3907 | .write_end = ext4_ordered_write_end, |
3909 | .bmap = ext4_bmap, | 3908 | .bmap = ext4_bmap, |
@@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = { | |||
3919 | .readpage = ext4_readpage, | 3918 | .readpage = ext4_readpage, |
3920 | .readpages = ext4_readpages, | 3919 | .readpages = ext4_readpages, |
3921 | .writepage = ext4_writepage, | 3920 | .writepage = ext4_writepage, |
3922 | .sync_page = block_sync_page, | ||
3923 | .write_begin = ext4_write_begin, | 3921 | .write_begin = ext4_write_begin, |
3924 | .write_end = ext4_writeback_write_end, | 3922 | .write_end = ext4_writeback_write_end, |
3925 | .bmap = ext4_bmap, | 3923 | .bmap = ext4_bmap, |
@@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = { | |||
3935 | .readpage = ext4_readpage, | 3933 | .readpage = ext4_readpage, |
3936 | .readpages = ext4_readpages, | 3934 | .readpages = ext4_readpages, |
3937 | .writepage = ext4_writepage, | 3935 | .writepage = ext4_writepage, |
3938 | .sync_page = block_sync_page, | ||
3939 | .write_begin = ext4_write_begin, | 3936 | .write_begin = ext4_write_begin, |
3940 | .write_end = ext4_journalled_write_end, | 3937 | .write_end = ext4_journalled_write_end, |
3941 | .set_page_dirty = ext4_journalled_set_page_dirty, | 3938 | .set_page_dirty = ext4_journalled_set_page_dirty, |
@@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = { | |||
3951 | .readpages = ext4_readpages, | 3948 | .readpages = ext4_readpages, |
3952 | .writepage = ext4_writepage, | 3949 | .writepage = ext4_writepage, |
3953 | .writepages = ext4_da_writepages, | 3950 | .writepages = ext4_da_writepages, |
3954 | .sync_page = block_sync_page, | ||
3955 | .write_begin = ext4_da_write_begin, | 3951 | .write_begin = ext4_da_write_begin, |
3956 | .write_end = ext4_da_write_end, | 3952 | .write_end = ext4_da_write_end, |
3957 | .bmap = ext4_bmap, | 3953 | .bmap = ext4_bmap, |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 86753fe10bd1..f4ff09fb79b1 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = { | |||
236 | .readpages = fat_readpages, | 236 | .readpages = fat_readpages, |
237 | .writepage = fat_writepage, | 237 | .writepage = fat_writepage, |
238 | .writepages = fat_writepages, | 238 | .writepages = fat_writepages, |
239 | .sync_page = block_sync_page, | ||
240 | .write_begin = fat_write_begin, | 239 | .write_begin = fat_write_begin, |
241 | .write_end = fat_write_end, | 240 | .write_end = fat_write_end, |
242 | .direct_IO = fat_direct_IO, | 241 | .direct_IO = fat_direct_IO, |
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index 1429f3ae1e86..5d318c44f855 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c | |||
@@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t); | |||
44 | const struct address_space_operations vxfs_aops = { | 44 | const struct address_space_operations vxfs_aops = { |
45 | .readpage = vxfs_readpage, | 45 | .readpage = vxfs_readpage, |
46 | .bmap = vxfs_bmap, | 46 | .bmap = vxfs_bmap, |
47 | .sync_page = block_sync_page, | ||
48 | }; | 47 | }; |
49 | 48 | ||
50 | inline void | 49 | inline void |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 9e3f68cc1bd1..09e8d51eeb64 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -868,7 +868,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) | |||
868 | 868 | ||
869 | fc->bdi.name = "fuse"; | 869 | fc->bdi.name = "fuse"; |
870 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 870 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
871 | fc->bdi.unplug_io_fn = default_unplug_io_fn; | ||
872 | /* fuse does it's own writeback accounting */ | 871 | /* fuse does it's own writeback accounting */ |
873 | fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; | 872 | fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; |
874 | 873 | ||
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 4f36f8832b9b..2f87ad27efd0 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -1116,7 +1116,6 @@ static const struct address_space_operations gfs2_writeback_aops = { | |||
1116 | .writepages = gfs2_writeback_writepages, | 1116 | .writepages = gfs2_writeback_writepages, |
1117 | .readpage = gfs2_readpage, | 1117 | .readpage = gfs2_readpage, |
1118 | .readpages = gfs2_readpages, | 1118 | .readpages = gfs2_readpages, |
1119 | .sync_page = block_sync_page, | ||
1120 | .write_begin = gfs2_write_begin, | 1119 | .write_begin = gfs2_write_begin, |
1121 | .write_end = gfs2_write_end, | 1120 | .write_end = gfs2_write_end, |
1122 | .bmap = gfs2_bmap, | 1121 | .bmap = gfs2_bmap, |
@@ -1132,7 +1131,6 @@ static const struct address_space_operations gfs2_ordered_aops = { | |||
1132 | .writepage = gfs2_ordered_writepage, | 1131 | .writepage = gfs2_ordered_writepage, |
1133 | .readpage = gfs2_readpage, | 1132 | .readpage = gfs2_readpage, |
1134 | .readpages = gfs2_readpages, | 1133 | .readpages = gfs2_readpages, |
1135 | .sync_page = block_sync_page, | ||
1136 | .write_begin = gfs2_write_begin, | 1134 | .write_begin = gfs2_write_begin, |
1137 | .write_end = gfs2_write_end, | 1135 | .write_end = gfs2_write_end, |
1138 | .set_page_dirty = gfs2_set_page_dirty, | 1136 | .set_page_dirty = gfs2_set_page_dirty, |
@@ -1150,7 +1148,6 @@ static const struct address_space_operations gfs2_jdata_aops = { | |||
1150 | .writepages = gfs2_jdata_writepages, | 1148 | .writepages = gfs2_jdata_writepages, |
1151 | .readpage = gfs2_readpage, | 1149 | .readpage = gfs2_readpage, |
1152 | .readpages = gfs2_readpages, | 1150 | .readpages = gfs2_readpages, |
1153 | .sync_page = block_sync_page, | ||
1154 | .write_begin = gfs2_write_begin, | 1151 | .write_begin = gfs2_write_begin, |
1155 | .write_end = gfs2_write_end, | 1152 | .write_end = gfs2_write_end, |
1156 | .set_page_dirty = gfs2_set_page_dirty, | 1153 | .set_page_dirty = gfs2_set_page_dirty, |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 939739c7b3f9..a566331db4e1 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
94 | const struct address_space_operations gfs2_meta_aops = { | 94 | const struct address_space_operations gfs2_meta_aops = { |
95 | .writepage = gfs2_aspace_writepage, | 95 | .writepage = gfs2_aspace_writepage, |
96 | .releasepage = gfs2_releasepage, | 96 | .releasepage = gfs2_releasepage, |
97 | .sync_page = block_sync_page, | ||
98 | }; | 97 | }; |
99 | 98 | ||
100 | /** | 99 | /** |
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index dffb4e996643..fff16c968e67 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c | |||
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping, | |||
150 | const struct address_space_operations hfs_btree_aops = { | 150 | const struct address_space_operations hfs_btree_aops = { |
151 | .readpage = hfs_readpage, | 151 | .readpage = hfs_readpage, |
152 | .writepage = hfs_writepage, | 152 | .writepage = hfs_writepage, |
153 | .sync_page = block_sync_page, | ||
154 | .write_begin = hfs_write_begin, | 153 | .write_begin = hfs_write_begin, |
155 | .write_end = generic_write_end, | 154 | .write_end = generic_write_end, |
156 | .bmap = hfs_bmap, | 155 | .bmap = hfs_bmap, |
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = { | |||
160 | const struct address_space_operations hfs_aops = { | 159 | const struct address_space_operations hfs_aops = { |
161 | .readpage = hfs_readpage, | 160 | .readpage = hfs_readpage, |
162 | .writepage = hfs_writepage, | 161 | .writepage = hfs_writepage, |
163 | .sync_page = block_sync_page, | ||
164 | .write_begin = hfs_write_begin, | 162 | .write_begin = hfs_write_begin, |
165 | .write_end = generic_write_end, | 163 | .write_end = generic_write_end, |
166 | .bmap = hfs_bmap, | 164 | .bmap = hfs_bmap, |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index a8df651747f0..b248a6cfcad9 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping, | |||
146 | const struct address_space_operations hfsplus_btree_aops = { | 146 | const struct address_space_operations hfsplus_btree_aops = { |
147 | .readpage = hfsplus_readpage, | 147 | .readpage = hfsplus_readpage, |
148 | .writepage = hfsplus_writepage, | 148 | .writepage = hfsplus_writepage, |
149 | .sync_page = block_sync_page, | ||
150 | .write_begin = hfsplus_write_begin, | 149 | .write_begin = hfsplus_write_begin, |
151 | .write_end = generic_write_end, | 150 | .write_end = generic_write_end, |
152 | .bmap = hfsplus_bmap, | 151 | .bmap = hfsplus_bmap, |
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = { | |||
156 | const struct address_space_operations hfsplus_aops = { | 155 | const struct address_space_operations hfsplus_aops = { |
157 | .readpage = hfsplus_readpage, | 156 | .readpage = hfsplus_readpage, |
158 | .writepage = hfsplus_writepage, | 157 | .writepage = hfsplus_writepage, |
159 | .sync_page = block_sync_page, | ||
160 | .write_begin = hfsplus_write_begin, | 158 | .write_begin = hfsplus_write_begin, |
161 | .write_end = generic_write_end, | 159 | .write_end = generic_write_end, |
162 | .bmap = hfsplus_bmap, | 160 | .bmap = hfsplus_bmap, |
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index c0340887c7ea..9e84257b3ad5 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c | |||
@@ -120,7 +120,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) | |||
120 | const struct address_space_operations hpfs_aops = { | 120 | const struct address_space_operations hpfs_aops = { |
121 | .readpage = hpfs_readpage, | 121 | .readpage = hpfs_readpage, |
122 | .writepage = hpfs_writepage, | 122 | .writepage = hpfs_writepage, |
123 | .sync_page = block_sync_page, | ||
124 | .write_begin = hpfs_write_begin, | 123 | .write_begin = hpfs_write_begin, |
125 | .write_end = generic_write_end, | 124 | .write_end = generic_write_end, |
126 | .bmap = _hpfs_bmap | 125 | .bmap = _hpfs_bmap |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index a0f3833c0dbf..3db5ba4568fc 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) | |||
1158 | 1158 | ||
1159 | static const struct address_space_operations isofs_aops = { | 1159 | static const struct address_space_operations isofs_aops = { |
1160 | .readpage = isofs_readpage, | 1160 | .readpage = isofs_readpage, |
1161 | .sync_page = block_sync_page, | ||
1162 | .bmap = _isofs_bmap | 1161 | .bmap = _isofs_bmap |
1163 | }; | 1162 | }; |
1164 | 1163 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9978803ceedc..eddbb373209e 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = { | |||
352 | .readpages = jfs_readpages, | 352 | .readpages = jfs_readpages, |
353 | .writepage = jfs_writepage, | 353 | .writepage = jfs_writepage, |
354 | .writepages = jfs_writepages, | 354 | .writepages = jfs_writepages, |
355 | .sync_page = block_sync_page, | ||
356 | .write_begin = jfs_write_begin, | 355 | .write_begin = jfs_write_begin, |
357 | .write_end = nobh_write_end, | 356 | .write_end = nobh_write_end, |
358 | .bmap = jfs_bmap, | 357 | .bmap = jfs_bmap, |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 48b44bd8267b..6740d34cd82b 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset) | |||
583 | const struct address_space_operations jfs_metapage_aops = { | 583 | const struct address_space_operations jfs_metapage_aops = { |
584 | .readpage = metapage_readpage, | 584 | .readpage = metapage_readpage, |
585 | .writepage = metapage_writepage, | 585 | .writepage = metapage_writepage, |
586 | .sync_page = block_sync_page, | ||
587 | .releasepage = metapage_releasepage, | 586 | .releasepage = metapage_releasepage, |
588 | .invalidatepage = metapage_invalidatepage, | 587 | .invalidatepage = metapage_invalidatepage, |
589 | .set_page_dirty = __set_page_dirty_nobuffers, | 588 | .set_page_dirty = __set_page_dirty_nobuffers, |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 723bc5bca09a..1adc8d455f0e 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) | |||
39 | bio.bi_end_io = request_complete; | 39 | bio.bi_end_io = request_complete; |
40 | 40 | ||
41 | submit_bio(rw, &bio); | 41 | submit_bio(rw, &bio); |
42 | generic_unplug_device(bdev_get_queue(bdev)); | ||
43 | wait_for_completion(&complete); | 42 | wait_for_completion(&complete); |
44 | return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; | 43 | return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; |
45 | } | 44 | } |
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len) | |||
168 | } | 167 | } |
169 | len = PAGE_ALIGN(len); | 168 | len = PAGE_ALIGN(len); |
170 | __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); | 169 | __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); |
171 | generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev)); | ||
172 | } | 170 | } |
173 | 171 | ||
174 | 172 | ||
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index ae0b83f476a6..adcdc0a4e182 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) | |||
399 | static const struct address_space_operations minix_aops = { | 399 | static const struct address_space_operations minix_aops = { |
400 | .readpage = minix_readpage, | 400 | .readpage = minix_readpage, |
401 | .writepage = minix_writepage, | 401 | .writepage = minix_writepage, |
402 | .sync_page = block_sync_page, | ||
403 | .write_begin = minix_write_begin, | 402 | .write_begin = minix_write_begin, |
404 | .write_end = generic_write_end, | 403 | .write_end = generic_write_end, |
405 | .bmap = minix_bmap | 404 | .bmap = minix_bmap |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f5286..f4f1c08807ed 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -40,14 +40,10 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc) | |||
40 | nilfs_mapping_init_once(btnc); | 40 | nilfs_mapping_init_once(btnc); |
41 | } | 41 | } |
42 | 42 | ||
43 | static const struct address_space_operations def_btnode_aops = { | ||
44 | .sync_page = block_sync_page, | ||
45 | }; | ||
46 | |||
47 | void nilfs_btnode_cache_init(struct address_space *btnc, | 43 | void nilfs_btnode_cache_init(struct address_space *btnc, |
48 | struct backing_dev_info *bdi) | 44 | struct backing_dev_info *bdi) |
49 | { | 45 | { |
50 | nilfs_mapping_init(btnc, bdi, &def_btnode_aops); | 46 | nilfs_mapping_init(btnc, bdi); |
51 | } | 47 | } |
52 | 48 | ||
53 | void nilfs_btnode_cache_clear(struct address_space *btnc) | 49 | void nilfs_btnode_cache_clear(struct address_space *btnc) |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index caf9a6a3fb54..1c2a3e23f8b2 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include "ifile.h" | 49 | #include "ifile.h" |
50 | 50 | ||
51 | static const struct address_space_operations def_gcinode_aops = { | 51 | static const struct address_space_operations def_gcinode_aops = { |
52 | .sync_page = block_sync_page, | ||
53 | }; | 52 | }; |
54 | 53 | ||
55 | /* | 54 | /* |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 2fd440d8d6b8..c89d5d1ea7c7 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -262,7 +262,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
262 | const struct address_space_operations nilfs_aops = { | 262 | const struct address_space_operations nilfs_aops = { |
263 | .writepage = nilfs_writepage, | 263 | .writepage = nilfs_writepage, |
264 | .readpage = nilfs_readpage, | 264 | .readpage = nilfs_readpage, |
265 | .sync_page = block_sync_page, | ||
266 | .writepages = nilfs_writepages, | 265 | .writepages = nilfs_writepages, |
267 | .set_page_dirty = nilfs_set_page_dirty, | 266 | .set_page_dirty = nilfs_set_page_dirty, |
268 | .readpages = nilfs_readpages, | 267 | .readpages = nilfs_readpages, |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f60..3fdb61d79c9a 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) | |||
399 | 399 | ||
400 | static const struct address_space_operations def_mdt_aops = { | 400 | static const struct address_space_operations def_mdt_aops = { |
401 | .writepage = nilfs_mdt_write_page, | 401 | .writepage = nilfs_mdt_write_page, |
402 | .sync_page = block_sync_page, | ||
403 | }; | 402 | }; |
404 | 403 | ||
405 | static const struct inode_operations def_mdt_iops; | 404 | static const struct inode_operations def_mdt_iops; |
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size, | |||
438 | mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); | 437 | mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); |
439 | } | 438 | } |
440 | 439 | ||
441 | static const struct address_space_operations shadow_map_aops = { | ||
442 | .sync_page = block_sync_page, | ||
443 | }; | ||
444 | |||
445 | /** | 440 | /** |
446 | * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file | 441 | * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file |
447 | * @inode: inode of the metadata file | 442 | * @inode: inode of the metadata file |
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, | |||
455 | 450 | ||
456 | INIT_LIST_HEAD(&shadow->frozen_buffers); | 451 | INIT_LIST_HEAD(&shadow->frozen_buffers); |
457 | nilfs_mapping_init_once(&shadow->frozen_data); | 452 | nilfs_mapping_init_once(&shadow->frozen_data); |
458 | nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); | 453 | nilfs_mapping_init(&shadow->frozen_data, bdi); |
459 | nilfs_mapping_init_once(&shadow->frozen_btnodes); | 454 | nilfs_mapping_init_once(&shadow->frozen_btnodes); |
460 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); | 455 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi); |
461 | mi->mi_shadow = shadow; | 456 | mi->mi_shadow = shadow; |
462 | return 0; | 457 | return 0; |
463 | } | 458 | } |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfef..3da37cc5de34 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -506,15 +506,14 @@ void nilfs_mapping_init_once(struct address_space *mapping) | |||
506 | } | 506 | } |
507 | 507 | ||
508 | void nilfs_mapping_init(struct address_space *mapping, | 508 | void nilfs_mapping_init(struct address_space *mapping, |
509 | struct backing_dev_info *bdi, | 509 | struct backing_dev_info *bdi) |
510 | const struct address_space_operations *aops) | ||
511 | { | 510 | { |
512 | mapping->host = NULL; | 511 | mapping->host = NULL; |
513 | mapping->flags = 0; | 512 | mapping->flags = 0; |
514 | mapping_set_gfp_mask(mapping, GFP_NOFS); | 513 | mapping_set_gfp_mask(mapping, GFP_NOFS); |
515 | mapping->assoc_mapping = NULL; | 514 | mapping->assoc_mapping = NULL; |
516 | mapping->backing_dev_info = bdi; | 515 | mapping->backing_dev_info = bdi; |
517 | mapping->a_ops = aops; | 516 | mapping->a_ops = NULL; |
518 | } | 517 | } |
519 | 518 | ||
520 | /* | 519 | /* |
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd891..ba4d6fd40b04 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h | |||
@@ -63,8 +63,7 @@ void nilfs_copy_back_pages(struct address_space *, struct address_space *); | |||
63 | void nilfs_clear_dirty_pages(struct address_space *); | 63 | void nilfs_clear_dirty_pages(struct address_space *); |
64 | void nilfs_mapping_init_once(struct address_space *mapping); | 64 | void nilfs_mapping_init_once(struct address_space *mapping); |
65 | void nilfs_mapping_init(struct address_space *mapping, | 65 | void nilfs_mapping_init(struct address_space *mapping, |
66 | struct backing_dev_info *bdi, | 66 | struct backing_dev_info *bdi); |
67 | const struct address_space_operations *aops); | ||
68 | unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); | 67 | unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); |
69 | unsigned long nilfs_find_uncommitted_extent(struct inode *inode, | 68 | unsigned long nilfs_find_uncommitted_extent(struct inode *inode, |
70 | sector_t start_blk, | 69 | sector_t start_blk, |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index c3c2c7ac9020..0b1e885b8cf8 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -1543,8 +1543,6 @@ err_out: | |||
1543 | */ | 1543 | */ |
1544 | const struct address_space_operations ntfs_aops = { | 1544 | const struct address_space_operations ntfs_aops = { |
1545 | .readpage = ntfs_readpage, /* Fill page with data. */ | 1545 | .readpage = ntfs_readpage, /* Fill page with data. */ |
1546 | .sync_page = block_sync_page, /* Currently, just unplugs the | ||
1547 | disk request queue. */ | ||
1548 | #ifdef NTFS_RW | 1546 | #ifdef NTFS_RW |
1549 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ | 1547 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ |
1550 | #endif /* NTFS_RW */ | 1548 | #endif /* NTFS_RW */ |
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = { | |||
1560 | */ | 1558 | */ |
1561 | const struct address_space_operations ntfs_mst_aops = { | 1559 | const struct address_space_operations ntfs_mst_aops = { |
1562 | .readpage = ntfs_readpage, /* Fill page with data. */ | 1560 | .readpage = ntfs_readpage, /* Fill page with data. */ |
1563 | .sync_page = block_sync_page, /* Currently, just unplugs the | ||
1564 | disk request queue. */ | ||
1565 | #ifdef NTFS_RW | 1561 | #ifdef NTFS_RW |
1566 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ | 1562 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ |
1567 | .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty | 1563 | .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty |
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index 6551c7cbad92..ef9ed854255c 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c | |||
@@ -698,8 +698,7 @@ lock_retry_remap: | |||
698 | "uptodate! Unplugging the disk queue " | 698 | "uptodate! Unplugging the disk queue " |
699 | "and rescheduling."); | 699 | "and rescheduling."); |
700 | get_bh(tbh); | 700 | get_bh(tbh); |
701 | blk_run_address_space(mapping); | 701 | io_schedule(); |
702 | schedule(); | ||
703 | put_bh(tbh); | 702 | put_bh(tbh); |
704 | if (unlikely(!buffer_uptodate(tbh))) | 703 | if (unlikely(!buffer_uptodate(tbh))) |
705 | goto read_err; | 704 | goto read_err; |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1fbb0e20131b..daea0359e974 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = { | |||
2043 | .write_begin = ocfs2_write_begin, | 2043 | .write_begin = ocfs2_write_begin, |
2044 | .write_end = ocfs2_write_end, | 2044 | .write_end = ocfs2_write_end, |
2045 | .bmap = ocfs2_bmap, | 2045 | .bmap = ocfs2_bmap, |
2046 | .sync_page = block_sync_page, | ||
2047 | .direct_IO = ocfs2_direct_IO, | 2046 | .direct_IO = ocfs2_direct_IO, |
2048 | .invalidatepage = ocfs2_invalidatepage, | 2047 | .invalidatepage = ocfs2_invalidatepage, |
2049 | .releasepage = ocfs2_releasepage, | 2048 | .releasepage = ocfs2_releasepage, |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index b108e863d8f6..1adab287bd24 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, | |||
367 | static void o2hb_wait_on_io(struct o2hb_region *reg, | 367 | static void o2hb_wait_on_io(struct o2hb_region *reg, |
368 | struct o2hb_bio_wait_ctxt *wc) | 368 | struct o2hb_bio_wait_ctxt *wc) |
369 | { | 369 | { |
370 | struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; | ||
371 | |||
372 | blk_run_address_space(mapping); | ||
373 | o2hb_bio_wait_dec(wc, 1); | 370 | o2hb_bio_wait_dec(wc, 1); |
374 | |||
375 | wait_for_completion(&wc->wc_io_complete); | 371 | wait_for_completion(&wc->wc_io_complete); |
376 | } | 372 | } |
377 | 373 | ||
diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 8a6d34fa668a..d738a7e493dd 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c | |||
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = { | |||
372 | .readpages = omfs_readpages, | 372 | .readpages = omfs_readpages, |
373 | .writepage = omfs_writepage, | 373 | .writepage = omfs_writepage, |
374 | .writepages = omfs_writepages, | 374 | .writepages = omfs_writepages, |
375 | .sync_page = block_sync_page, | ||
376 | .write_begin = omfs_write_begin, | 375 | .write_begin = omfs_write_begin, |
377 | .write_end = generic_write_end, | 376 | .write_end = generic_write_end, |
378 | .bmap = omfs_bmap, | 377 | .bmap = omfs_bmap, |
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index e63b4171d583..2b0646613f5a 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c | |||
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) | |||
335 | static const struct address_space_operations qnx4_aops = { | 335 | static const struct address_space_operations qnx4_aops = { |
336 | .readpage = qnx4_readpage, | 336 | .readpage = qnx4_readpage, |
337 | .writepage = qnx4_writepage, | 337 | .writepage = qnx4_writepage, |
338 | .sync_page = block_sync_page, | ||
339 | .write_begin = qnx4_write_begin, | 338 | .write_begin = qnx4_write_begin, |
340 | .write_end = generic_write_end, | 339 | .write_end = generic_write_end, |
341 | .bmap = qnx4_bmap | 340 | .bmap = qnx4_bmap |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 0bae036831e2..03674675f886 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -3212,7 +3212,6 @@ const struct address_space_operations reiserfs_address_space_operations = { | |||
3212 | .readpages = reiserfs_readpages, | 3212 | .readpages = reiserfs_readpages, |
3213 | .releasepage = reiserfs_releasepage, | 3213 | .releasepage = reiserfs_releasepage, |
3214 | .invalidatepage = reiserfs_invalidatepage, | 3214 | .invalidatepage = reiserfs_invalidatepage, |
3215 | .sync_page = block_sync_page, | ||
3216 | .write_begin = reiserfs_write_begin, | 3215 | .write_begin = reiserfs_write_begin, |
3217 | .write_end = reiserfs_write_end, | 3216 | .write_end = reiserfs_write_end, |
3218 | .bmap = reiserfs_aop_bmap, | 3217 | .bmap = reiserfs_aop_bmap, |
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 9ca66276315e..fa8d43c92bb8 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c | |||
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) | |||
488 | const struct address_space_operations sysv_aops = { | 488 | const struct address_space_operations sysv_aops = { |
489 | .readpage = sysv_readpage, | 489 | .readpage = sysv_readpage, |
490 | .writepage = sysv_writepage, | 490 | .writepage = sysv_writepage, |
491 | .sync_page = block_sync_page, | ||
492 | .write_begin = sysv_write_begin, | 491 | .write_begin = sysv_write_begin, |
493 | .write_end = generic_write_end, | 492 | .write_end = generic_write_end, |
494 | .bmap = sysv_bmap | 493 | .bmap = sysv_bmap |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 6e11c2975dcf..81368d4d4a2c 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1979,7 +1979,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) | |||
1979 | */ | 1979 | */ |
1980 | c->bdi.name = "ubifs", | 1980 | c->bdi.name = "ubifs", |
1981 | c->bdi.capabilities = BDI_CAP_MAP_COPY; | 1981 | c->bdi.capabilities = BDI_CAP_MAP_COPY; |
1982 | c->bdi.unplug_io_fn = default_unplug_io_fn; | ||
1983 | err = bdi_init(&c->bdi); | 1982 | err = bdi_init(&c->bdi); |
1984 | if (err) | 1983 | if (err) |
1985 | goto out_close; | 1984 | goto out_close; |
diff --git a/fs/udf/file.c b/fs/udf/file.c index 89c78486cbbe..94e4553491c9 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file, | |||
98 | const struct address_space_operations udf_adinicb_aops = { | 98 | const struct address_space_operations udf_adinicb_aops = { |
99 | .readpage = udf_adinicb_readpage, | 99 | .readpage = udf_adinicb_readpage, |
100 | .writepage = udf_adinicb_writepage, | 100 | .writepage = udf_adinicb_writepage, |
101 | .sync_page = block_sync_page, | ||
102 | .write_begin = simple_write_begin, | 101 | .write_begin = simple_write_begin, |
103 | .write_end = udf_adinicb_write_end, | 102 | .write_end = udf_adinicb_write_end, |
104 | }; | 103 | }; |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index c6a2e782b97b..fa96fc0fe12b 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -133,7 +133,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) | |||
133 | const struct address_space_operations udf_aops = { | 133 | const struct address_space_operations udf_aops = { |
134 | .readpage = udf_readpage, | 134 | .readpage = udf_readpage, |
135 | .writepage = udf_writepage, | 135 | .writepage = udf_writepage, |
136 | .sync_page = block_sync_page, | ||
137 | .write_begin = udf_write_begin, | 136 | .write_begin = udf_write_begin, |
138 | .write_end = generic_write_end, | 137 | .write_end = generic_write_end, |
139 | .bmap = udf_bmap, | 138 | .bmap = udf_bmap, |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 2b251f2093af..83b28444eb17 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -588,7 +588,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) | |||
588 | const struct address_space_operations ufs_aops = { | 588 | const struct address_space_operations ufs_aops = { |
589 | .readpage = ufs_readpage, | 589 | .readpage = ufs_readpage, |
590 | .writepage = ufs_writepage, | 590 | .writepage = ufs_writepage, |
591 | .sync_page = block_sync_page, | ||
592 | .write_begin = ufs_write_begin, | 591 | .write_begin = ufs_write_begin, |
593 | .write_end = generic_write_end, | 592 | .write_end = generic_write_end, |
594 | .bmap = ufs_bmap | 593 | .bmap = ufs_bmap |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index a58f9155fc9a..ff0e79276f2d 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -481,7 +481,7 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) | |||
481 | break; | 481 | break; |
482 | if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) | 482 | if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) |
483 | ufs_sync_inode (inode); | 483 | ufs_sync_inode (inode); |
484 | blk_run_address_space(inode->i_mapping); | 484 | blk_flush_plug(current); |
485 | yield(); | 485 | yield(); |
486 | } | 486 | } |
487 | 487 | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index ec7bbb5645b6..83c1c20d145a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1495,7 +1495,6 @@ const struct address_space_operations xfs_address_space_operations = { | |||
1495 | .readpages = xfs_vm_readpages, | 1495 | .readpages = xfs_vm_readpages, |
1496 | .writepage = xfs_vm_writepage, | 1496 | .writepage = xfs_vm_writepage, |
1497 | .writepages = xfs_vm_writepages, | 1497 | .writepages = xfs_vm_writepages, |
1498 | .sync_page = block_sync_page, | ||
1499 | .releasepage = xfs_vm_releasepage, | 1498 | .releasepage = xfs_vm_releasepage, |
1500 | .invalidatepage = xfs_vm_invalidatepage, | 1499 | .invalidatepage = xfs_vm_invalidatepage, |
1501 | .write_begin = xfs_vm_write_begin, | 1500 | .write_begin = xfs_vm_write_begin, |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ac1c7e8378dd..4f8f53c4d42c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -991,7 +991,7 @@ xfs_buf_lock( | |||
991 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 991 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) |
992 | xfs_log_force(bp->b_target->bt_mount, 0); | 992 | xfs_log_force(bp->b_target->bt_mount, 0); |
993 | if (atomic_read(&bp->b_io_remaining)) | 993 | if (atomic_read(&bp->b_io_remaining)) |
994 | blk_run_address_space(bp->b_target->bt_mapping); | 994 | blk_flush_plug(current); |
995 | down(&bp->b_sema); | 995 | down(&bp->b_sema); |
996 | XB_SET_OWNER(bp); | 996 | XB_SET_OWNER(bp); |
997 | 997 | ||
@@ -1035,9 +1035,7 @@ xfs_buf_wait_unpin( | |||
1035 | set_current_state(TASK_UNINTERRUPTIBLE); | 1035 | set_current_state(TASK_UNINTERRUPTIBLE); |
1036 | if (atomic_read(&bp->b_pin_count) == 0) | 1036 | if (atomic_read(&bp->b_pin_count) == 0) |
1037 | break; | 1037 | break; |
1038 | if (atomic_read(&bp->b_io_remaining)) | 1038 | io_schedule(); |
1039 | blk_run_address_space(bp->b_target->bt_mapping); | ||
1040 | schedule(); | ||
1041 | } | 1039 | } |
1042 | remove_wait_queue(&bp->b_waiters, &wait); | 1040 | remove_wait_queue(&bp->b_waiters, &wait); |
1043 | set_current_state(TASK_RUNNING); | 1041 | set_current_state(TASK_RUNNING); |
@@ -1443,7 +1441,7 @@ xfs_buf_iowait( | |||
1443 | trace_xfs_buf_iowait(bp, _RET_IP_); | 1441 | trace_xfs_buf_iowait(bp, _RET_IP_); |
1444 | 1442 | ||
1445 | if (atomic_read(&bp->b_io_remaining)) | 1443 | if (atomic_read(&bp->b_io_remaining)) |
1446 | blk_run_address_space(bp->b_target->bt_mapping); | 1444 | blk_flush_plug(current); |
1447 | wait_for_completion(&bp->b_iowait); | 1445 | wait_for_completion(&bp->b_iowait); |
1448 | 1446 | ||
1449 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | 1447 | trace_xfs_buf_iowait_done(bp, _RET_IP_); |
@@ -1667,7 +1665,6 @@ xfs_mapping_buftarg( | |||
1667 | struct inode *inode; | 1665 | struct inode *inode; |
1668 | struct address_space *mapping; | 1666 | struct address_space *mapping; |
1669 | static const struct address_space_operations mapping_aops = { | 1667 | static const struct address_space_operations mapping_aops = { |
1670 | .sync_page = block_sync_page, | ||
1671 | .migratepage = fail_migrate_page, | 1668 | .migratepage = fail_migrate_page, |
1672 | }; | 1669 | }; |
1673 | 1670 | ||
@@ -1948,7 +1945,7 @@ xfsbufd( | |||
1948 | count++; | 1945 | count++; |
1949 | } | 1946 | } |
1950 | if (count) | 1947 | if (count) |
1951 | blk_run_address_space(target->bt_mapping); | 1948 | blk_flush_plug(current); |
1952 | 1949 | ||
1953 | } while (!kthread_should_stop()); | 1950 | } while (!kthread_should_stop()); |
1954 | 1951 | ||
@@ -1996,7 +1993,7 @@ xfs_flush_buftarg( | |||
1996 | 1993 | ||
1997 | if (wait) { | 1994 | if (wait) { |
1998 | /* Expedite and wait for IO to complete. */ | 1995 | /* Expedite and wait for IO to complete. */ |
1999 | blk_run_address_space(target->bt_mapping); | 1996 | blk_flush_plug(current); |
2000 | while (!list_empty(&wait_list)) { | 1997 | while (!list_empty(&wait_list)) { |
2001 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 1998 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); |
2002 | 1999 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 4ce34fa937d4..96f4094b706d 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -66,8 +66,6 @@ struct backing_dev_info { | |||
66 | unsigned int capabilities; /* Device capabilities */ | 66 | unsigned int capabilities; /* Device capabilities */ |
67 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ | 67 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
68 | void *congested_data; /* Pointer to aux data for congested func */ | 68 | void *congested_data; /* Pointer to aux data for congested func */ |
69 | void (*unplug_io_fn)(struct backing_dev_info *, struct page *); | ||
70 | void *unplug_io_data; | ||
71 | 69 | ||
72 | char *name; | 70 | char *name; |
73 | 71 | ||
@@ -251,7 +249,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |||
251 | 249 | ||
252 | extern struct backing_dev_info default_backing_dev_info; | 250 | extern struct backing_dev_info default_backing_dev_info; |
253 | extern struct backing_dev_info noop_backing_dev_info; | 251 | extern struct backing_dev_info noop_backing_dev_info; |
254 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); | ||
255 | 252 | ||
256 | int writeback_in_progress(struct backing_dev_info *bdi); | 253 | int writeback_in_progress(struct backing_dev_info *bdi); |
257 | 254 | ||
@@ -336,17 +333,4 @@ static inline int bdi_sched_wait(void *word) | |||
336 | return 0; | 333 | return 0; |
337 | } | 334 | } |
338 | 335 | ||
339 | static inline void blk_run_backing_dev(struct backing_dev_info *bdi, | ||
340 | struct page *page) | ||
341 | { | ||
342 | if (bdi && bdi->unplug_io_fn) | ||
343 | bdi->unplug_io_fn(bdi, page); | ||
344 | } | ||
345 | |||
346 | static inline void blk_run_address_space(struct address_space *mapping) | ||
347 | { | ||
348 | if (mapping) | ||
349 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | ||
350 | } | ||
351 | |||
352 | #endif /* _LINUX_BACKING_DEV_H */ | 336 | #endif /* _LINUX_BACKING_DEV_H */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5873037eeb91..64ab2a1bb167 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -196,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
199 | typedef void (unplug_fn) (struct request_queue *); | ||
200 | 199 | ||
201 | struct bio_vec; | 200 | struct bio_vec; |
202 | struct bvec_merge_data { | 201 | struct bvec_merge_data { |
@@ -279,7 +278,6 @@ struct request_queue | |||
279 | make_request_fn *make_request_fn; | 278 | make_request_fn *make_request_fn; |
280 | prep_rq_fn *prep_rq_fn; | 279 | prep_rq_fn *prep_rq_fn; |
281 | unprep_rq_fn *unprep_rq_fn; | 280 | unprep_rq_fn *unprep_rq_fn; |
282 | unplug_fn *unplug_fn; | ||
283 | merge_bvec_fn *merge_bvec_fn; | 281 | merge_bvec_fn *merge_bvec_fn; |
284 | softirq_done_fn *softirq_done_fn; | 282 | softirq_done_fn *softirq_done_fn; |
285 | rq_timed_out_fn *rq_timed_out_fn; | 283 | rq_timed_out_fn *rq_timed_out_fn; |
@@ -293,14 +291,6 @@ struct request_queue | |||
293 | struct request *boundary_rq; | 291 | struct request *boundary_rq; |
294 | 292 | ||
295 | /* | 293 | /* |
296 | * Auto-unplugging state | ||
297 | */ | ||
298 | struct timer_list unplug_timer; | ||
299 | int unplug_thresh; /* After this many requests */ | ||
300 | unsigned long unplug_delay; /* After this many jiffies */ | ||
301 | struct work_struct unplug_work; | ||
302 | |||
303 | /* | ||
304 | * Delayed queue handling | 294 | * Delayed queue handling |
305 | */ | 295 | */ |
306 | struct delayed_work delay_work; | 296 | struct delayed_work delay_work; |
@@ -399,14 +389,13 @@ struct request_queue | |||
399 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
400 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
401 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
402 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ |
403 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ |
404 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ |
405 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | 395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ |
406 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ | 396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ |
407 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | 397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ |
408 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ |
409 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | ||
410 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
411 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
412 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
@@ -484,7 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
484 | __clear_bit(flag, &q->queue_flags); | 473 | __clear_bit(flag, &q->queue_flags); |
485 | } | 474 | } |
486 | 475 | ||
487 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | ||
488 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 476 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
489 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 477 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
490 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 478 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
@@ -679,9 +667,6 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
679 | extern void blk_rq_unprep_clone(struct request *rq); | 667 | extern void blk_rq_unprep_clone(struct request *rq); |
680 | extern int blk_insert_cloned_request(struct request_queue *q, | 668 | extern int blk_insert_cloned_request(struct request_queue *q, |
681 | struct request *rq); | 669 | struct request *rq); |
682 | extern void blk_plug_device(struct request_queue *); | ||
683 | extern void blk_plug_device_unlocked(struct request_queue *); | ||
684 | extern int blk_remove_plug(struct request_queue *); | ||
685 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 670 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
686 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 671 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
687 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 672 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
@@ -726,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
726 | struct request *, int); | 711 | struct request *, int); |
727 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 712 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
728 | struct request *, int, rq_end_io_fn *); | 713 | struct request *, int, rq_end_io_fn *); |
729 | extern void blk_unplug(struct request_queue *q); | ||
730 | 714 | ||
731 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 715 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
732 | { | 716 | { |
@@ -863,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd | |||
863 | 847 | ||
864 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
865 | extern void blk_dump_rq_flags(struct request *, char *); | 849 | extern void blk_dump_rq_flags(struct request *, char *); |
866 | extern void generic_unplug_device(struct request_queue *); | ||
867 | extern long nr_blockdev_pages(void); | 850 | extern long nr_blockdev_pages(void); |
868 | 851 | ||
869 | int blk_get_queue(struct request_queue *); | 852 | int blk_get_queue(struct request_queue *); |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 68d1fe7b877c..f5df23561b96 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -219,7 +219,6 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size); | |||
219 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 219 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
220 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 220 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
221 | get_block_t get_block); | 221 | get_block_t get_block); |
222 | void block_sync_page(struct page *); | ||
223 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 222 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
224 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 223 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
225 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, | 224 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 272496d1fae4..e2768834f397 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -286,11 +286,6 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback | |||
286 | int dm_table_complete(struct dm_table *t); | 286 | int dm_table_complete(struct dm_table *t); |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * Unplug all devices in a table. | ||
290 | */ | ||
291 | void dm_table_unplug_all(struct dm_table *t); | ||
292 | |||
293 | /* | ||
294 | * Table reference counting. | 289 | * Table reference counting. |
295 | */ | 290 | */ |
296 | struct dm_table *dm_get_live_table(struct mapped_device *md); | 291 | struct dm_table *dm_get_live_table(struct mapped_device *md); |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 8857cf9adbb7..ec6f72b84477 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -20,7 +20,6 @@ typedef void (elevator_bio_merged_fn) (struct request_queue *, | |||
20 | typedef int (elevator_dispatch_fn) (struct request_queue *, int); | 20 | typedef int (elevator_dispatch_fn) (struct request_queue *, int); |
21 | 21 | ||
22 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); | 22 | typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); |
23 | typedef int (elevator_queue_empty_fn) (struct request_queue *); | ||
24 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); | 23 | typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); |
25 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); | 24 | typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); |
26 | typedef int (elevator_may_queue_fn) (struct request_queue *, int); | 25 | typedef int (elevator_may_queue_fn) (struct request_queue *, int); |
@@ -46,7 +45,6 @@ struct elevator_ops | |||
46 | elevator_activate_req_fn *elevator_activate_req_fn; | 45 | elevator_activate_req_fn *elevator_activate_req_fn; |
47 | elevator_deactivate_req_fn *elevator_deactivate_req_fn; | 46 | elevator_deactivate_req_fn *elevator_deactivate_req_fn; |
48 | 47 | ||
49 | elevator_queue_empty_fn *elevator_queue_empty_fn; | ||
50 | elevator_completed_req_fn *elevator_completed_req_fn; | 48 | elevator_completed_req_fn *elevator_completed_req_fn; |
51 | 49 | ||
52 | elevator_request_list_fn *elevator_former_req_fn; | 50 | elevator_request_list_fn *elevator_former_req_fn; |
@@ -101,8 +99,8 @@ struct elevator_queue | |||
101 | */ | 99 | */ |
102 | extern void elv_dispatch_sort(struct request_queue *, struct request *); | 100 | extern void elv_dispatch_sort(struct request_queue *, struct request *); |
103 | extern void elv_dispatch_add_tail(struct request_queue *, struct request *); | 101 | extern void elv_dispatch_add_tail(struct request_queue *, struct request *); |
104 | extern void elv_add_request(struct request_queue *, struct request *, int, int); | 102 | extern void elv_add_request(struct request_queue *, struct request *, int); |
105 | extern void __elv_add_request(struct request_queue *, struct request *, int, int); | 103 | extern void __elv_add_request(struct request_queue *, struct request *, int); |
106 | extern void elv_insert(struct request_queue *, struct request *, int); | 104 | extern void elv_insert(struct request_queue *, struct request *, int); |
107 | extern int elv_merge(struct request_queue *, struct request **, struct bio *); | 105 | extern int elv_merge(struct request_queue *, struct request **, struct bio *); |
108 | extern int elv_try_merge(struct request *, struct bio *); | 106 | extern int elv_try_merge(struct request *, struct bio *); |
@@ -112,7 +110,6 @@ extern void elv_merged_request(struct request_queue *, struct request *, int); | |||
112 | extern void elv_bio_merged(struct request_queue *q, struct request *, | 110 | extern void elv_bio_merged(struct request_queue *q, struct request *, |
113 | struct bio *); | 111 | struct bio *); |
114 | extern void elv_requeue_request(struct request_queue *, struct request *); | 112 | extern void elv_requeue_request(struct request_queue *, struct request *); |
115 | extern int elv_queue_empty(struct request_queue *); | ||
116 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 113 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
117 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 114 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
118 | extern int elv_register_queue(struct request_queue *q); | 115 | extern int elv_register_queue(struct request_queue *q); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index bd3215940c37..9f2cf69911b8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -583,7 +583,6 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *, | |||
583 | struct address_space_operations { | 583 | struct address_space_operations { |
584 | int (*writepage)(struct page *page, struct writeback_control *wbc); | 584 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
585 | int (*readpage)(struct file *, struct page *); | 585 | int (*readpage)(struct file *, struct page *); |
586 | void (*sync_page)(struct page *); | ||
587 | 586 | ||
588 | /* Write back some dirty pages from this mapping. */ | 587 | /* Write back some dirty pages from this mapping. */ |
589 | int (*writepages)(struct address_space *, struct writeback_control *); | 588 | int (*writepages)(struct address_space *, struct writeback_control *); |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 9c66e994540f..e112b8db2f3c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -298,7 +298,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
298 | 298 | ||
299 | extern void __lock_page(struct page *page); | 299 | extern void __lock_page(struct page *page); |
300 | extern int __lock_page_killable(struct page *page); | 300 | extern int __lock_page_killable(struct page *page); |
301 | extern void __lock_page_nosync(struct page *page); | ||
302 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | 301 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
303 | unsigned int flags); | 302 | unsigned int flags); |
304 | extern void unlock_page(struct page *page); | 303 | extern void unlock_page(struct page *page); |
@@ -342,17 +341,6 @@ static inline int lock_page_killable(struct page *page) | |||
342 | } | 341 | } |
343 | 342 | ||
344 | /* | 343 | /* |
345 | * lock_page_nosync should only be used if we can't pin the page's inode. | ||
346 | * Doesn't play quite so well with block device plugging. | ||
347 | */ | ||
348 | static inline void lock_page_nosync(struct page *page) | ||
349 | { | ||
350 | might_sleep(); | ||
351 | if (!trylock_page(page)) | ||
352 | __lock_page_nosync(page); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * lock_page_or_retry - Lock the page, unless this would block and the | 344 | * lock_page_or_retry - Lock the page, unless this would block and the |
357 | * caller indicated that it can handle a retry. | 345 | * caller indicated that it can handle a retry. |
358 | */ | 346 | */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 4d559325d919..9ee321833b21 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -299,8 +299,6 @@ extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | |||
299 | struct page **pagep, swp_entry_t *ent); | 299 | struct page **pagep, swp_entry_t *ent); |
300 | #endif | 300 | #endif |
301 | 301 | ||
302 | extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | ||
303 | |||
304 | #ifdef CONFIG_SWAP | 302 | #ifdef CONFIG_SWAP |
305 | /* linux/mm/page_io.c */ | 303 | /* linux/mm/page_io.c */ |
306 | extern int swap_readpage(struct page *); | 304 | extern int swap_readpage(struct page *); |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 027100d30227..c91e139a652e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -14,17 +14,11 @@ | |||
14 | 14 | ||
15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | 15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
16 | 16 | ||
17 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
18 | { | ||
19 | } | ||
20 | EXPORT_SYMBOL(default_unplug_io_fn); | ||
21 | |||
22 | struct backing_dev_info default_backing_dev_info = { | 17 | struct backing_dev_info default_backing_dev_info = { |
23 | .name = "default", | 18 | .name = "default", |
24 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, | 19 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, |
25 | .state = 0, | 20 | .state = 0, |
26 | .capabilities = BDI_CAP_MAP_COPY, | 21 | .capabilities = BDI_CAP_MAP_COPY, |
27 | .unplug_io_fn = default_unplug_io_fn, | ||
28 | }; | 22 | }; |
29 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 23 | EXPORT_SYMBOL_GPL(default_backing_dev_info); |
30 | 24 | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 83a45d35468b..380776c2a9ac 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -155,45 +155,15 @@ void remove_from_page_cache(struct page *page) | |||
155 | } | 155 | } |
156 | EXPORT_SYMBOL(remove_from_page_cache); | 156 | EXPORT_SYMBOL(remove_from_page_cache); |
157 | 157 | ||
158 | static int sync_page(void *word) | 158 | static int sleep_on_page(void *word) |
159 | { | 159 | { |
160 | struct address_space *mapping; | ||
161 | struct page *page; | ||
162 | |||
163 | page = container_of((unsigned long *)word, struct page, flags); | ||
164 | |||
165 | /* | ||
166 | * page_mapping() is being called without PG_locked held. | ||
167 | * Some knowledge of the state and use of the page is used to | ||
168 | * reduce the requirements down to a memory barrier. | ||
169 | * The danger here is of a stale page_mapping() return value | ||
170 | * indicating a struct address_space different from the one it's | ||
171 | * associated with when it is associated with one. | ||
172 | * After smp_mb(), it's either the correct page_mapping() for | ||
173 | * the page, or an old page_mapping() and the page's own | ||
174 | * page_mapping() has gone NULL. | ||
175 | * The ->sync_page() address_space operation must tolerate | ||
176 | * page_mapping() going NULL. By an amazing coincidence, | ||
177 | * this comes about because none of the users of the page | ||
178 | * in the ->sync_page() methods make essential use of the | ||
179 | * page_mapping(), merely passing the page down to the backing | ||
180 | * device's unplug functions when it's non-NULL, which in turn | ||
181 | * ignore it for all cases but swap, where only page_private(page) is | ||
182 | * of interest. When page_mapping() does go NULL, the entire | ||
183 | * call stack gracefully ignores the page and returns. | ||
184 | * -- wli | ||
185 | */ | ||
186 | smp_mb(); | ||
187 | mapping = page_mapping(page); | ||
188 | if (mapping && mapping->a_ops && mapping->a_ops->sync_page) | ||
189 | mapping->a_ops->sync_page(page); | ||
190 | io_schedule(); | 160 | io_schedule(); |
191 | return 0; | 161 | return 0; |
192 | } | 162 | } |
193 | 163 | ||
194 | static int sync_page_killable(void *word) | 164 | static int sleep_on_page_killable(void *word) |
195 | { | 165 | { |
196 | sync_page(word); | 166 | sleep_on_page(word); |
197 | return fatal_signal_pending(current) ? -EINTR : 0; | 167 | return fatal_signal_pending(current) ? -EINTR : 0; |
198 | } | 168 | } |
199 | 169 | ||
@@ -479,12 +449,6 @@ struct page *__page_cache_alloc(gfp_t gfp) | |||
479 | EXPORT_SYMBOL(__page_cache_alloc); | 449 | EXPORT_SYMBOL(__page_cache_alloc); |
480 | #endif | 450 | #endif |
481 | 451 | ||
482 | static int __sleep_on_page_lock(void *word) | ||
483 | { | ||
484 | io_schedule(); | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | /* | 452 | /* |
489 | * In order to wait for pages to become available there must be | 453 | * In order to wait for pages to become available there must be |
490 | * waitqueues associated with pages. By using a hash table of | 454 | * waitqueues associated with pages. By using a hash table of |
@@ -512,7 +476,7 @@ void wait_on_page_bit(struct page *page, int bit_nr) | |||
512 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | 476 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); |
513 | 477 | ||
514 | if (test_bit(bit_nr, &page->flags)) | 478 | if (test_bit(bit_nr, &page->flags)) |
515 | __wait_on_bit(page_waitqueue(page), &wait, sync_page, | 479 | __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, |
516 | TASK_UNINTERRUPTIBLE); | 480 | TASK_UNINTERRUPTIBLE); |
517 | } | 481 | } |
518 | EXPORT_SYMBOL(wait_on_page_bit); | 482 | EXPORT_SYMBOL(wait_on_page_bit); |
@@ -576,17 +540,12 @@ EXPORT_SYMBOL(end_page_writeback); | |||
576 | /** | 540 | /** |
577 | * __lock_page - get a lock on the page, assuming we need to sleep to get it | 541 | * __lock_page - get a lock on the page, assuming we need to sleep to get it |
578 | * @page: the page to lock | 542 | * @page: the page to lock |
579 | * | ||
580 | * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some | ||
581 | * random driver's requestfn sets TASK_RUNNING, we could busywait. However | ||
582 | * chances are that on the second loop, the block layer's plug list is empty, | ||
583 | * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. | ||
584 | */ | 543 | */ |
585 | void __lock_page(struct page *page) | 544 | void __lock_page(struct page *page) |
586 | { | 545 | { |
587 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 546 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
588 | 547 | ||
589 | __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, | 548 | __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, |
590 | TASK_UNINTERRUPTIBLE); | 549 | TASK_UNINTERRUPTIBLE); |
591 | } | 550 | } |
592 | EXPORT_SYMBOL(__lock_page); | 551 | EXPORT_SYMBOL(__lock_page); |
@@ -596,24 +555,10 @@ int __lock_page_killable(struct page *page) | |||
596 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | 555 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); |
597 | 556 | ||
598 | return __wait_on_bit_lock(page_waitqueue(page), &wait, | 557 | return __wait_on_bit_lock(page_waitqueue(page), &wait, |
599 | sync_page_killable, TASK_KILLABLE); | 558 | sleep_on_page_killable, TASK_KILLABLE); |
600 | } | 559 | } |
601 | EXPORT_SYMBOL_GPL(__lock_page_killable); | 560 | EXPORT_SYMBOL_GPL(__lock_page_killable); |
602 | 561 | ||
603 | /** | ||
604 | * __lock_page_nosync - get a lock on the page, without calling sync_page() | ||
605 | * @page: the page to lock | ||
606 | * | ||
607 | * Variant of lock_page that does not require the caller to hold a reference | ||
608 | * on the page's mapping. | ||
609 | */ | ||
610 | void __lock_page_nosync(struct page *page) | ||
611 | { | ||
612 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | ||
613 | __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, | ||
614 | TASK_UNINTERRUPTIBLE); | ||
615 | } | ||
616 | |||
617 | int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | 562 | int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
618 | unsigned int flags) | 563 | unsigned int flags) |
619 | { | 564 | { |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 0207c2f6f8bd..bfba796d374d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -945,7 +945,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
945 | collect_procs(ppage, &tokill); | 945 | collect_procs(ppage, &tokill); |
946 | 946 | ||
947 | if (hpage != ppage) | 947 | if (hpage != ppage) |
948 | lock_page_nosync(ppage); | 948 | lock_page(ppage); |
949 | 949 | ||
950 | ret = try_to_unmap(ppage, ttu); | 950 | ret = try_to_unmap(ppage, ttu); |
951 | if (ret != SWAP_SUCCESS) | 951 | if (ret != SWAP_SUCCESS) |
@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
1038 | * Check "just unpoisoned", "filter hit", and | 1038 | * Check "just unpoisoned", "filter hit", and |
1039 | * "race with other subpage." | 1039 | * "race with other subpage." |
1040 | */ | 1040 | */ |
1041 | lock_page_nosync(hpage); | 1041 | lock_page(hpage); |
1042 | if (!PageHWPoison(hpage) | 1042 | if (!PageHWPoison(hpage) |
1043 | || (hwpoison_filter(p) && TestClearPageHWPoison(p)) | 1043 | || (hwpoison_filter(p) && TestClearPageHWPoison(p)) |
1044 | || (p != hpage && TestSetPageHWPoison(hpage))) { | 1044 | || (p != hpage && TestSetPageHWPoison(hpage))) { |
@@ -1088,7 +1088,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
1088 | * It's very difficult to mess with pages currently under IO | 1088 | * It's very difficult to mess with pages currently under IO |
1089 | * and in many cases impossible, so we just avoid it here. | 1089 | * and in many cases impossible, so we just avoid it here. |
1090 | */ | 1090 | */ |
1091 | lock_page_nosync(hpage); | 1091 | lock_page(hpage); |
1092 | 1092 | ||
1093 | /* | 1093 | /* |
1094 | * unpoison always clear PG_hwpoison inside page lock | 1094 | * unpoison always clear PG_hwpoison inside page lock |
@@ -1231,7 +1231,7 @@ int unpoison_memory(unsigned long pfn) | |||
1231 | return 0; | 1231 | return 0; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | lock_page_nosync(page); | 1234 | lock_page(page); |
1235 | /* | 1235 | /* |
1236 | * This test is racy because PG_hwpoison is set outside of page lock. | 1236 | * This test is racy because PG_hwpoison is set outside of page lock. |
1237 | * That's acceptable because that won't trigger kernel panic. Instead, | 1237 | * That's acceptable because that won't trigger kernel panic. Instead, |
diff --git a/mm/nommu.c b/mm/nommu.c index f59e1424d3db..fb6cbd6abe16 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1842,10 +1842,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |||
1842 | } | 1842 | } |
1843 | EXPORT_SYMBOL(remap_vmalloc_range); | 1843 | EXPORT_SYMBOL(remap_vmalloc_range); |
1844 | 1844 | ||
1845 | void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | ||
1846 | { | ||
1847 | } | ||
1848 | |||
1849 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, | 1845 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, |
1850 | unsigned long len, unsigned long pgoff, unsigned long flags) | 1846 | unsigned long len, unsigned long pgoff, unsigned long flags) |
1851 | { | 1847 | { |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2cb01f6ec5d0..cc0ede169e41 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1239,7 +1239,7 @@ int set_page_dirty_lock(struct page *page) | |||
1239 | { | 1239 | { |
1240 | int ret; | 1240 | int ret; |
1241 | 1241 | ||
1242 | lock_page_nosync(page); | 1242 | lock_page(page); |
1243 | ret = set_page_dirty(page); | 1243 | ret = set_page_dirty(page); |
1244 | unlock_page(page); | 1244 | unlock_page(page); |
1245 | return ret; | 1245 | return ret; |
diff --git a/mm/readahead.c b/mm/readahead.c index 77506a291a2d..cbddc3e17246 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -554,17 +554,5 @@ page_cache_async_readahead(struct address_space *mapping, | |||
554 | 554 | ||
555 | /* do read-ahead */ | 555 | /* do read-ahead */ |
556 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); | 556 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
557 | |||
558 | #ifdef CONFIG_BLOCK | ||
559 | /* | ||
560 | * Normally the current page is !uptodate and lock_page() will be | ||
561 | * immediately called to implicitly unplug the device. However this | ||
562 | * is not always true for RAID conifgurations, where data arrives | ||
563 | * not strictly in their submission order. In this case we need to | ||
564 | * explicitly kick off the IO. | ||
565 | */ | ||
566 | if (PageUptodate(page)) | ||
567 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | ||
568 | #endif | ||
569 | } | 557 | } |
570 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); | 558 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |
diff --git a/mm/shmem.c b/mm/shmem.c index 5ee67c990602..24d23f5bedf1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -224,7 +224,6 @@ static const struct vm_operations_struct shmem_vm_ops; | |||
224 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { | 224 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { |
225 | .ra_pages = 0, /* No readahead */ | 225 | .ra_pages = 0, /* No readahead */ |
226 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, | 226 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
227 | .unplug_io_fn = default_unplug_io_fn, | ||
228 | }; | 227 | }; |
229 | 228 | ||
230 | static LIST_HEAD(shmem_swaplist); | 229 | static LIST_HEAD(shmem_swaplist); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 5c8cfabbc9bc..46680461785b 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -24,12 +24,10 @@ | |||
24 | 24 | ||
25 | /* | 25 | /* |
26 | * swapper_space is a fiction, retained to simplify the path through | 26 | * swapper_space is a fiction, retained to simplify the path through |
27 | * vmscan's shrink_page_list, to make sync_page look nicer, and to allow | 27 | * vmscan's shrink_page_list. |
28 | * future use of radix_tree tags in the swap cache. | ||
29 | */ | 28 | */ |
30 | static const struct address_space_operations swap_aops = { | 29 | static const struct address_space_operations swap_aops = { |
31 | .writepage = swap_writepage, | 30 | .writepage = swap_writepage, |
32 | .sync_page = block_sync_page, | ||
33 | .set_page_dirty = __set_page_dirty_nobuffers, | 31 | .set_page_dirty = __set_page_dirty_nobuffers, |
34 | .migratepage = migrate_page, | 32 | .migratepage = migrate_page, |
35 | }; | 33 | }; |
@@ -37,7 +35,6 @@ static const struct address_space_operations swap_aops = { | |||
37 | static struct backing_dev_info swap_backing_dev_info = { | 35 | static struct backing_dev_info swap_backing_dev_info = { |
38 | .name = "swap", | 36 | .name = "swap", |
39 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, | 37 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
40 | .unplug_io_fn = swap_unplug_io_fn, | ||
41 | }; | 38 | }; |
42 | 39 | ||
43 | struct address_space swapper_space = { | 40 | struct address_space swapper_space = { |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa8..7ceea78ceb20 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -95,39 +95,6 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) | |||
95 | } | 95 | } |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * We need this because the bdev->unplug_fn can sleep and we cannot | ||
99 | * hold swap_lock while calling the unplug_fn. And swap_lock | ||
100 | * cannot be turned into a mutex. | ||
101 | */ | ||
102 | static DECLARE_RWSEM(swap_unplug_sem); | ||
103 | |||
104 | void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | ||
105 | { | ||
106 | swp_entry_t entry; | ||
107 | |||
108 | down_read(&swap_unplug_sem); | ||
109 | entry.val = page_private(page); | ||
110 | if (PageSwapCache(page)) { | ||
111 | struct block_device *bdev = swap_info[swp_type(entry)]->bdev; | ||
112 | struct backing_dev_info *bdi; | ||
113 | |||
114 | /* | ||
115 | * If the page is removed from swapcache from under us (with a | ||
116 | * racy try_to_unuse/swapoff) we need an additional reference | ||
117 | * count to avoid reading garbage from page_private(page) above. | ||
118 | * If the WARN_ON triggers during a swapoff it maybe the race | ||
119 | * condition and it's harmless. However if it triggers without | ||
120 | * swapoff it signals a problem. | ||
121 | */ | ||
122 | WARN_ON(page_count(page) <= 1); | ||
123 | |||
124 | bdi = bdev->bd_inode->i_mapping->backing_dev_info; | ||
125 | blk_run_backing_dev(bdi, page); | ||
126 | } | ||
127 | up_read(&swap_unplug_sem); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * swapon tell device that all the old swap contents can be discarded, | 98 | * swapon tell device that all the old swap contents can be discarded, |
132 | * to allow the swap device to optimize its wear-levelling. | 99 | * to allow the swap device to optimize its wear-levelling. |
133 | */ | 100 | */ |
@@ -1643,10 +1610,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1643 | goto out_dput; | 1610 | goto out_dput; |
1644 | } | 1611 | } |
1645 | 1612 | ||
1646 | /* wait for any unplug function to finish */ | ||
1647 | down_write(&swap_unplug_sem); | ||
1648 | up_write(&swap_unplug_sem); | ||
1649 | |||
1650 | destroy_swap_extents(p); | 1613 | destroy_swap_extents(p); |
1651 | if (p->flags & SWP_CONTINUED) | 1614 | if (p->flags & SWP_CONTINUED) |
1652 | free_swap_count_continuations(p); | 1615 | free_swap_count_continuations(p); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 17497d0cd8b9..251bed73ac03 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -358,7 +358,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi, | |||
358 | static void handle_write_error(struct address_space *mapping, | 358 | static void handle_write_error(struct address_space *mapping, |
359 | struct page *page, int error) | 359 | struct page *page, int error) |
360 | { | 360 | { |
361 | lock_page_nosync(page); | 361 | lock_page(page); |
362 | if (page_mapping(page) == mapping) | 362 | if (page_mapping(page) == mapping) |
363 | mapping_set_error(mapping, error); | 363 | mapping_set_error(mapping, error); |
364 | unlock_page(page); | 364 | unlock_page(page); |