aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 12:26:40 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 12:26:40 -0500
commite2688f00dc0ceb9d9867434dffbd080411fc23b0 (patch)
tree433549c6a655ac879654ba82d312911677650380 /block
parent356cebea1123804e4aa85b43ab39bbd0ac8e667c (diff)
parenta9925a06ea52a44b4bf4a941342e8240eaf22417 (diff)
Merge branch 'blk-softirq' of git://brick.kernel.dk/data/git/linux-2.6-block
Manual merge for trivial #include changes
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c106
1 files changed, 105 insertions, 1 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c44d6fe9f6ce..8e27d0ab0d7c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -26,6 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <linux/writeback.h> 28#include <linux/writeback.h>
29#include <linux/interrupt.h>
30#include <linux/cpu.h>
29 31
30/* 32/*
31 * for max sense size 33 * for max sense size
@@ -61,13 +63,15 @@ static wait_queue_head_t congestion_wqh[2] = {
61/* 63/*
62 * Controlling structure to kblockd 64 * Controlling structure to kblockd
63 */ 65 */
64static struct workqueue_struct *kblockd_workqueue; 66static struct workqueue_struct *kblockd_workqueue;
65 67
66unsigned long blk_max_low_pfn, blk_max_pfn; 68unsigned long blk_max_low_pfn, blk_max_pfn;
67 69
68EXPORT_SYMBOL(blk_max_low_pfn); 70EXPORT_SYMBOL(blk_max_low_pfn);
69EXPORT_SYMBOL(blk_max_pfn); 71EXPORT_SYMBOL(blk_max_pfn);
70 72
73static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
74
71/* Amount of time in which a process may batch requests */ 75/* Amount of time in which a process may batch requests */
72#define BLK_BATCH_TIME (HZ/50UL) 76#define BLK_BATCH_TIME (HZ/50UL)
73 77
@@ -206,6 +210,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
206 210
207EXPORT_SYMBOL(blk_queue_merge_bvec); 211EXPORT_SYMBOL(blk_queue_merge_bvec);
208 212
213void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
214{
215 q->softirq_done_fn = fn;
216}
217
218EXPORT_SYMBOL(blk_queue_softirq_done);
219
209/** 220/**
210 * blk_queue_make_request - define an alternate make_request function for a device 221 * blk_queue_make_request - define an alternate make_request function for a device
211 * @q: the request queue for the device to be affected 222 * @q: the request queue for the device to be affected
@@ -269,6 +280,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
269static inline void rq_init(request_queue_t *q, struct request *rq) 280static inline void rq_init(request_queue_t *q, struct request *rq)
270{ 281{
271 INIT_LIST_HEAD(&rq->queuelist); 282 INIT_LIST_HEAD(&rq->queuelist);
283 INIT_LIST_HEAD(&rq->donelist);
272 284
273 rq->errors = 0; 285 rq->errors = 0;
274 rq->rq_status = RQ_ACTIVE; 286 rq->rq_status = RQ_ACTIVE;
@@ -285,6 +297,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
285 rq->sense = NULL; 297 rq->sense = NULL;
286 rq->end_io = NULL; 298 rq->end_io = NULL;
287 rq->end_io_data = NULL; 299 rq->end_io_data = NULL;
300 rq->completion_data = NULL;
288} 301}
289 302
290/** 303/**
@@ -3262,6 +3275,87 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3262EXPORT_SYMBOL(end_that_request_chunk); 3275EXPORT_SYMBOL(end_that_request_chunk);
3263 3276
3264/* 3277/*
3278 * splice the completion data to a local structure and hand off to
3279 * process_completion_queue() to complete the requests
3280 */
3281static void blk_done_softirq(struct softirq_action *h)
3282{
3283 struct list_head *cpu_list;
3284 LIST_HEAD(local_list);
3285
3286 local_irq_disable();
3287 cpu_list = &__get_cpu_var(blk_cpu_done);
3288 list_splice_init(cpu_list, &local_list);
3289 local_irq_enable();
3290
3291 while (!list_empty(&local_list)) {
3292 struct request *rq = list_entry(local_list.next, struct request, donelist);
3293
3294 list_del_init(&rq->donelist);
3295 rq->q->softirq_done_fn(rq);
3296 }
3297}
3298
3299#ifdef CONFIG_HOTPLUG_CPU
3300
3301static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3302 void *hcpu)
3303{
3304 /*
3305 * If a CPU goes away, splice its entries to the current CPU
3306 * and trigger a run of the softirq
3307 */
3308 if (action == CPU_DEAD) {
3309 int cpu = (unsigned long) hcpu;
3310
3311 local_irq_disable();
3312 list_splice_init(&per_cpu(blk_cpu_done, cpu),
3313 &__get_cpu_var(blk_cpu_done));
3314 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3315 local_irq_enable();
3316 }
3317
3318 return NOTIFY_OK;
3319}
3320
3321
3322static struct notifier_block __devinitdata blk_cpu_notifier = {
3323 .notifier_call = blk_cpu_notify,
3324};
3325
3326#endif /* CONFIG_HOTPLUG_CPU */
3327
3328/**
3329 * blk_complete_request - end I/O on a request
3330 * @req: the request being processed
3331 *
3332 * Description:
3333 * Ends all I/O on a request. It does not handle partial completions,
3334 * unless the driver actually implements this in its completionc callback
3335 * through requeueing. Theh actual completion happens out-of-order,
3336 * through a softirq handler. The user must have registered a completion
3337 * callback through blk_queue_softirq_done().
3338 **/
3339
3340void blk_complete_request(struct request *req)
3341{
3342 struct list_head *cpu_list;
3343 unsigned long flags;
3344
3345 BUG_ON(!req->q->softirq_done_fn);
3346
3347 local_irq_save(flags);
3348
3349 cpu_list = &__get_cpu_var(blk_cpu_done);
3350 list_add_tail(&req->donelist, cpu_list);
3351 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3352
3353 local_irq_restore(flags);
3354}
3355
3356EXPORT_SYMBOL(blk_complete_request);
3357
3358/*
3265 * queue lock must be held 3359 * queue lock must be held
3266 */ 3360 */
3267void end_that_request_last(struct request *req, int uptodate) 3361void end_that_request_last(struct request *req, int uptodate)
@@ -3339,6 +3433,8 @@ EXPORT_SYMBOL(kblockd_flush);
3339 3433
3340int __init blk_dev_init(void) 3434int __init blk_dev_init(void)
3341{ 3435{
3436 int i;
3437
3342 kblockd_workqueue = create_workqueue("kblockd"); 3438 kblockd_workqueue = create_workqueue("kblockd");
3343 if (!kblockd_workqueue) 3439 if (!kblockd_workqueue)
3344 panic("Failed to create kblockd\n"); 3440 panic("Failed to create kblockd\n");
@@ -3352,6 +3448,14 @@ int __init blk_dev_init(void)
3352 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3448 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3353 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3354 3450
3451 for (i = 0; i < NR_CPUS; i++)
3452 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3453
3454 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
3455#ifdef CONFIG_HOTPLUG_CPU
3456 register_cpu_notifier(&blk_cpu_notifier);
3457#endif
3458
3355 blk_max_low_pfn = max_low_pfn; 3459 blk_max_low_pfn = max_low_pfn;
3356 blk_max_pfn = max_pfn; 3460 blk_max_pfn = max_pfn;
3357 3461