aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/ll_rw_blk.c106
-rw-r--r--include/linux/blkdev.h21
-rw-r--r--include/linux/interrupt.h1
3 files changed, 124 insertions, 4 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 91d3b4828c49..8e136450abc2 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -27,6 +27,8 @@
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <linux/writeback.h> 28#include <linux/writeback.h>
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/interrupt.h>
31#include <linux/cpu.h>
30 32
31/* 33/*
32 * for max sense size 34 * for max sense size
@@ -62,13 +64,15 @@ static wait_queue_head_t congestion_wqh[2] = {
62/* 64/*
63 * Controlling structure to kblockd 65 * Controlling structure to kblockd
64 */ 66 */
65static struct workqueue_struct *kblockd_workqueue; 67static struct workqueue_struct *kblockd_workqueue;
66 68
67unsigned long blk_max_low_pfn, blk_max_pfn; 69unsigned long blk_max_low_pfn, blk_max_pfn;
68 70
69EXPORT_SYMBOL(blk_max_low_pfn); 71EXPORT_SYMBOL(blk_max_low_pfn);
70EXPORT_SYMBOL(blk_max_pfn); 72EXPORT_SYMBOL(blk_max_pfn);
71 73
74static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
75
72/* Amount of time in which a process may batch requests */ 76/* Amount of time in which a process may batch requests */
73#define BLK_BATCH_TIME (HZ/50UL) 77#define BLK_BATCH_TIME (HZ/50UL)
74 78
@@ -207,6 +211,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
207 211
208EXPORT_SYMBOL(blk_queue_merge_bvec); 212EXPORT_SYMBOL(blk_queue_merge_bvec);
209 213
214void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
215{
216 q->softirq_done_fn = fn;
217}
218
219EXPORT_SYMBOL(blk_queue_softirq_done);
220
210/** 221/**
211 * blk_queue_make_request - define an alternate make_request function for a device 222 * blk_queue_make_request - define an alternate make_request function for a device
212 * @q: the request queue for the device to be affected 223 * @q: the request queue for the device to be affected
@@ -270,6 +281,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
270static inline void rq_init(request_queue_t *q, struct request *rq) 281static inline void rq_init(request_queue_t *q, struct request *rq)
271{ 282{
272 INIT_LIST_HEAD(&rq->queuelist); 283 INIT_LIST_HEAD(&rq->queuelist);
284 INIT_LIST_HEAD(&rq->donelist);
273 285
274 rq->errors = 0; 286 rq->errors = 0;
275 rq->rq_status = RQ_ACTIVE; 287 rq->rq_status = RQ_ACTIVE;
@@ -286,6 +298,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
286 rq->sense = NULL; 298 rq->sense = NULL;
287 rq->end_io = NULL; 299 rq->end_io = NULL;
288 rq->end_io_data = NULL; 300 rq->end_io_data = NULL;
301 rq->completion_data = NULL;
289} 302}
290 303
291/** 304/**
@@ -3287,6 +3300,87 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3287EXPORT_SYMBOL(end_that_request_chunk); 3300EXPORT_SYMBOL(end_that_request_chunk);
3288 3301
3289/* 3302/*
3303 * splice the completion data to a local structure and hand off to
3304 * process_completion_queue() to complete the requests
3305 */
3306static void blk_done_softirq(struct softirq_action *h)
3307{
3308 struct list_head *cpu_list;
3309 LIST_HEAD(local_list);
3310
3311 local_irq_disable();
3312 cpu_list = &__get_cpu_var(blk_cpu_done);
3313 list_splice_init(cpu_list, &local_list);
3314 local_irq_enable();
3315
3316 while (!list_empty(&local_list)) {
3317 struct request *rq = list_entry(local_list.next, struct request, donelist);
3318
3319 list_del_init(&rq->donelist);
3320 rq->q->softirq_done_fn(rq);
3321 }
3322}
3323
3324#ifdef CONFIG_HOTPLUG_CPU
3325
3326static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3327 void *hcpu)
3328{
3329 /*
3330 * If a CPU goes away, splice its entries to the current CPU
3331 * and trigger a run of the softirq
3332 */
3333 if (action == CPU_DEAD) {
3334 int cpu = (unsigned long) hcpu;
3335
3336 local_irq_disable();
3337 list_splice_init(&per_cpu(blk_cpu_done, cpu),
3338 &__get_cpu_var(blk_cpu_done));
3339 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3340 local_irq_enable();
3341 }
3342
3343 return NOTIFY_OK;
3344}
3345
3346
3347static struct notifier_block __devinitdata blk_cpu_notifier = {
3348 .notifier_call = blk_cpu_notify,
3349};
3350
3351#endif /* CONFIG_HOTPLUG_CPU */
3352
3353/**
3354 * blk_complete_request - end I/O on a request
3355 * @req: the request being processed
3356 *
3357 * Description:
3358 * Ends all I/O on a request. It does not handle partial completions,
3359 * unless the driver actually implements this in its completionc callback
3360 * through requeueing. Theh actual completion happens out-of-order,
3361 * through a softirq handler. The user must have registered a completion
3362 * callback through blk_queue_softirq_done().
3363 **/
3364
3365void blk_complete_request(struct request *req)
3366{
3367 struct list_head *cpu_list;
3368 unsigned long flags;
3369
3370 BUG_ON(!req->q->softirq_done_fn);
3371
3372 local_irq_save(flags);
3373
3374 cpu_list = &__get_cpu_var(blk_cpu_done);
3375 list_add_tail(&req->donelist, cpu_list);
3376 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3377
3378 local_irq_restore(flags);
3379}
3380
3381EXPORT_SYMBOL(blk_complete_request);
3382
3383/*
3290 * queue lock must be held 3384 * queue lock must be held
3291 */ 3385 */
3292void end_that_request_last(struct request *req, int uptodate) 3386void end_that_request_last(struct request *req, int uptodate)
@@ -3364,6 +3458,8 @@ EXPORT_SYMBOL(kblockd_flush);
3364 3458
3365int __init blk_dev_init(void) 3459int __init blk_dev_init(void)
3366{ 3460{
3461 int i;
3462
3367 kblockd_workqueue = create_workqueue("kblockd"); 3463 kblockd_workqueue = create_workqueue("kblockd");
3368 if (!kblockd_workqueue) 3464 if (!kblockd_workqueue)
3369 panic("Failed to create kblockd\n"); 3465 panic("Failed to create kblockd\n");
@@ -3377,6 +3473,14 @@ int __init blk_dev_init(void)
3377 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3473 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3378 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3474 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3379 3475
3476 for (i = 0; i < NR_CPUS; i++)
3477 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3478
3479 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
3480#ifdef CONFIG_HOTPLUG_CPU
3481 register_cpu_notifier(&blk_cpu_notifier);
3482#endif
3483
3380 blk_max_low_pfn = max_low_pfn; 3484 blk_max_low_pfn = max_low_pfn;
3381 blk_max_pfn = max_pfn; 3485 blk_max_pfn = max_pfn;
3382 3486
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb0985377421..804cc4ec9533 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -118,9 +118,9 @@ struct request_list {
118 * try to put the fields that are referenced together in the same cacheline 118 * try to put the fields that are referenced together in the same cacheline
119 */ 119 */
120struct request { 120struct request {
121 struct list_head queuelist; /* looking for ->queue? you must _not_ 121 struct list_head queuelist;
122 * access it directly, use 122 struct list_head donelist;
123 * blkdev_dequeue_request! */ 123
124 unsigned long flags; /* see REQ_ bits below */ 124 unsigned long flags; /* see REQ_ bits below */
125 125
126 /* Maintain bio traversal state for part by part I/O submission. 126 /* Maintain bio traversal state for part by part I/O submission.
@@ -141,6 +141,7 @@ struct request {
141 struct bio *biotail; 141 struct bio *biotail;
142 142
143 void *elevator_private; 143 void *elevator_private;
144 void *completion_data;
144 145
145 unsigned short ioprio; 146 unsigned short ioprio;
146 147
@@ -291,6 +292,7 @@ typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
291typedef void (activity_fn) (void *data, int rw); 292typedef void (activity_fn) (void *data, int rw);
292typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 293typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
293typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 294typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
295typedef void (softirq_done_fn)(struct request *);
294 296
295enum blk_queue_state { 297enum blk_queue_state {
296 Queue_down, 298 Queue_down,
@@ -332,6 +334,7 @@ struct request_queue
332 activity_fn *activity_fn; 334 activity_fn *activity_fn;
333 issue_flush_fn *issue_flush_fn; 335 issue_flush_fn *issue_flush_fn;
334 prepare_flush_fn *prepare_flush_fn; 336 prepare_flush_fn *prepare_flush_fn;
337 softirq_done_fn *softirq_done_fn;
335 338
336 /* 339 /*
337 * Dispatch queue sorting 340 * Dispatch queue sorting
@@ -646,6 +649,17 @@ extern int end_that_request_first(struct request *, int, int);
646extern int end_that_request_chunk(struct request *, int, int); 649extern int end_that_request_chunk(struct request *, int, int);
647extern void end_that_request_last(struct request *, int); 650extern void end_that_request_last(struct request *, int);
648extern void end_request(struct request *req, int uptodate); 651extern void end_request(struct request *req, int uptodate);
652extern void blk_complete_request(struct request *);
653
654static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
655{
656 if (blk_fs_request(rq))
657 return (nr_bytes >= (rq->hard_nr_sectors << 9));
658 else if (blk_pc_request(rq))
659 return nr_bytes >= rq->data_len;
660
661 return 0;
662}
649 663
650/* 664/*
651 * end_that_request_first/chunk() takes an uptodate argument. we account 665 * end_that_request_first/chunk() takes an uptodate argument. we account
@@ -694,6 +708,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
694extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 708extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
695extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 709extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
696extern void blk_queue_dma_alignment(request_queue_t *, int); 710extern void blk_queue_dma_alignment(request_queue_t *, int);
711extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
697extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 712extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
698extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 713extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
699extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 714extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index e50a95fbeb11..f02204706984 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -112,6 +112,7 @@ enum
112 TIMER_SOFTIRQ, 112 TIMER_SOFTIRQ,
113 NET_TX_SOFTIRQ, 113 NET_TX_SOFTIRQ,
114 NET_RX_SOFTIRQ, 114 NET_RX_SOFTIRQ,
115 BLOCK_SOFTIRQ,
115 SCSI_SOFTIRQ, 116 SCSI_SOFTIRQ,
116 TASKLET_SOFTIRQ 117 TASKLET_SOFTIRQ
117}; 118};