aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-05 10:35:29 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-05 10:35:29 -0400
commitcc44826a26b12b2489bc7dbb597fcdf107f2cc01 (patch)
tree98a7958212ac61345300944f512a949e5ee3e513 /block
parent482e6f8466ab1066f1a969bcdbe916b56439622c (diff)
parent7f0d384cafabfbb56663ee6944c18fc0450fc5d6 (diff)
Merge branch 'master' into for-linus
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c20
-rw-r--r--block/cfq-iosched.c101
-rw-r--r--block/elevator.c8
3 files changed, 98 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3bc5579d6f54..f84cce42fc58 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -467,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
467{ 467{
468 struct request_list *rl = &q->rq; 468 struct request_list *rl = &q->rq;
469 469
470 if (unlikely(rl->rq_pool))
471 return 0;
472
470 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 473 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
471 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 474 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
472 rl->elvpriv = 0; 475 rl->elvpriv = 0;
@@ -570,9 +573,17 @@ EXPORT_SYMBOL(blk_init_queue);
570struct request_queue * 573struct request_queue *
571blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 574blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
572{ 575{
573 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 576 struct request_queue *uninit_q, *q;
577
578 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
579 if (!uninit_q)
580 return NULL;
581
582 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
583 if (!q)
584 blk_cleanup_queue(uninit_q);
574 585
575 return blk_init_allocated_queue_node(q, rfn, lock, node_id); 586 return q;
576} 587}
577EXPORT_SYMBOL(blk_init_queue_node); 588EXPORT_SYMBOL(blk_init_queue_node);
578 589
@@ -592,10 +603,8 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
592 return NULL; 603 return NULL;
593 604
594 q->node = node_id; 605 q->node = node_id;
595 if (blk_init_free_list(q)) { 606 if (blk_init_free_list(q))
596 kmem_cache_free(blk_requestq_cachep, q);
597 return NULL; 607 return NULL;
598 }
599 608
600 q->request_fn = rfn; 609 q->request_fn = rfn;
601 q->prep_rq_fn = NULL; 610 q->prep_rq_fn = NULL;
@@ -618,7 +627,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
618 return q; 627 return q;
619 } 628 }
620 629
621 blk_put_queue(q);
622 return NULL; 630 return NULL;
623} 631}
624EXPORT_SYMBOL(blk_init_allocated_queue_node); 632EXPORT_SYMBOL(blk_init_allocated_queue_node);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ed897b5ef315..5ff4f4850e71 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -64,6 +64,9 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
64static struct completion *ioc_gone; 64static struct completion *ioc_gone;
65static DEFINE_SPINLOCK(ioc_gone_lock); 65static DEFINE_SPINLOCK(ioc_gone_lock);
66 66
67static DEFINE_SPINLOCK(cic_index_lock);
68static DEFINE_IDA(cic_index_ida);
69
67#define CFQ_PRIO_LISTS IOPRIO_BE_NR 70#define CFQ_PRIO_LISTS IOPRIO_BE_NR
68#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 71#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
69#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 72#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
@@ -271,6 +274,7 @@ struct cfq_data {
271 unsigned int cfq_latency; 274 unsigned int cfq_latency;
272 unsigned int cfq_group_isolation; 275 unsigned int cfq_group_isolation;
273 276
277 unsigned int cic_index;
274 struct list_head cic_list; 278 struct list_head cic_list;
275 279
276 /* 280 /*
@@ -430,6 +434,24 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
430 cic->cfqq[is_sync] = cfqq; 434 cic->cfqq[is_sync] = cfqq;
431} 435}
432 436
437#define CIC_DEAD_KEY 1ul
438#define CIC_DEAD_INDEX_SHIFT 1
439
440static inline void *cfqd_dead_key(struct cfq_data *cfqd)
441{
442 return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
443}
444
445static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
446{
447 struct cfq_data *cfqd = cic->key;
448
449 if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
450 return NULL;
451
452 return cfqd;
453}
454
433/* 455/*
434 * We regard a request as SYNC, if it's either a read or has the SYNC bit 456 * We regard a request as SYNC, if it's either a read or has the SYNC bit
435 * set (in which case it could also be direct WRITE). 457 * set (in which case it could also be direct WRITE).
@@ -2510,11 +2532,12 @@ static void cfq_cic_free(struct cfq_io_context *cic)
2510static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 2532static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2511{ 2533{
2512 unsigned long flags; 2534 unsigned long flags;
2535 unsigned long dead_key = (unsigned long) cic->key;
2513 2536
2514 BUG_ON(!cic->dead_key); 2537 BUG_ON(!(dead_key & CIC_DEAD_KEY));
2515 2538
2516 spin_lock_irqsave(&ioc->lock, flags); 2539 spin_lock_irqsave(&ioc->lock, flags);
2517 radix_tree_delete(&ioc->radix_root, cic->dead_key); 2540 radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2518 hlist_del_rcu(&cic->cic_list); 2541 hlist_del_rcu(&cic->cic_list);
2519 spin_unlock_irqrestore(&ioc->lock, flags); 2542 spin_unlock_irqrestore(&ioc->lock, flags);
2520 2543
@@ -2537,15 +2560,10 @@ static void cfq_free_io_context(struct io_context *ioc)
2537 __call_for_each_cic(ioc, cic_free_func); 2560 __call_for_each_cic(ioc, cic_free_func);
2538} 2561}
2539 2562
2540static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2563static void cfq_put_cooperator(struct cfq_queue *cfqq)
2541{ 2564{
2542 struct cfq_queue *__cfqq, *next; 2565 struct cfq_queue *__cfqq, *next;
2543 2566
2544 if (unlikely(cfqq == cfqd->active_queue)) {
2545 __cfq_slice_expired(cfqd, cfqq, 0);
2546 cfq_schedule_dispatch(cfqd);
2547 }
2548
2549 /* 2567 /*
2550 * If this queue was scheduled to merge with another queue, be 2568 * If this queue was scheduled to merge with another queue, be
2551 * sure to drop the reference taken on that queue (and others in 2569 * sure to drop the reference taken on that queue (and others in
@@ -2561,6 +2579,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2561 cfq_put_queue(__cfqq); 2579 cfq_put_queue(__cfqq);
2562 __cfqq = next; 2580 __cfqq = next;
2563 } 2581 }
2582}
2583
2584static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2585{
2586 if (unlikely(cfqq == cfqd->active_queue)) {
2587 __cfq_slice_expired(cfqd, cfqq, 0);
2588 cfq_schedule_dispatch(cfqd);
2589 }
2590
2591 cfq_put_cooperator(cfqq);
2564 2592
2565 cfq_put_queue(cfqq); 2593 cfq_put_queue(cfqq);
2566} 2594}
@@ -2573,11 +2601,10 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2573 list_del_init(&cic->queue_list); 2601 list_del_init(&cic->queue_list);
2574 2602
2575 /* 2603 /*
2576 * Make sure key == NULL is seen for dead queues 2604 * Make sure dead mark is seen for dead queues
2577 */ 2605 */
2578 smp_wmb(); 2606 smp_wmb();
2579 cic->dead_key = (unsigned long) cic->key; 2607 cic->key = cfqd_dead_key(cfqd);
2580 cic->key = NULL;
2581 2608
2582 if (ioc->ioc_data == cic) 2609 if (ioc->ioc_data == cic)
2583 rcu_assign_pointer(ioc->ioc_data, NULL); 2610 rcu_assign_pointer(ioc->ioc_data, NULL);
@@ -2596,7 +2623,7 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2596static void cfq_exit_single_io_context(struct io_context *ioc, 2623static void cfq_exit_single_io_context(struct io_context *ioc,
2597 struct cfq_io_context *cic) 2624 struct cfq_io_context *cic)
2598{ 2625{
2599 struct cfq_data *cfqd = cic->key; 2626 struct cfq_data *cfqd = cic_to_cfqd(cic);
2600 2627
2601 if (cfqd) { 2628 if (cfqd) {
2602 struct request_queue *q = cfqd->queue; 2629 struct request_queue *q = cfqd->queue;
@@ -2609,7 +2636,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
2609 * race between exiting task and queue 2636 * race between exiting task and queue
2610 */ 2637 */
2611 smp_read_barrier_depends(); 2638 smp_read_barrier_depends();
2612 if (cic->key) 2639 if (cic->key == cfqd)
2613 __cfq_exit_single_io_context(cfqd, cic); 2640 __cfq_exit_single_io_context(cfqd, cic);
2614 2641
2615 spin_unlock_irqrestore(q->queue_lock, flags); 2642 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2689,7 +2716,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2689 2716
2690static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 2717static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2691{ 2718{
2692 struct cfq_data *cfqd = cic->key; 2719 struct cfq_data *cfqd = cic_to_cfqd(cic);
2693 struct cfq_queue *cfqq; 2720 struct cfq_queue *cfqq;
2694 unsigned long flags; 2721 unsigned long flags;
2695 2722
@@ -2746,7 +2773,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2746static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) 2773static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2747{ 2774{
2748 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); 2775 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2749 struct cfq_data *cfqd = cic->key; 2776 struct cfq_data *cfqd = cic_to_cfqd(cic);
2750 unsigned long flags; 2777 unsigned long flags;
2751 struct request_queue *q; 2778 struct request_queue *q;
2752 2779
@@ -2883,12 +2910,13 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2883 unsigned long flags; 2910 unsigned long flags;
2884 2911
2885 WARN_ON(!list_empty(&cic->queue_list)); 2912 WARN_ON(!list_empty(&cic->queue_list));
2913 BUG_ON(cic->key != cfqd_dead_key(cfqd));
2886 2914
2887 spin_lock_irqsave(&ioc->lock, flags); 2915 spin_lock_irqsave(&ioc->lock, flags);
2888 2916
2889 BUG_ON(ioc->ioc_data == cic); 2917 BUG_ON(ioc->ioc_data == cic);
2890 2918
2891 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 2919 radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
2892 hlist_del_rcu(&cic->cic_list); 2920 hlist_del_rcu(&cic->cic_list);
2893 spin_unlock_irqrestore(&ioc->lock, flags); 2921 spin_unlock_irqrestore(&ioc->lock, flags);
2894 2922
@@ -2900,7 +2928,6 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2900{ 2928{
2901 struct cfq_io_context *cic; 2929 struct cfq_io_context *cic;
2902 unsigned long flags; 2930 unsigned long flags;
2903 void *k;
2904 2931
2905 if (unlikely(!ioc)) 2932 if (unlikely(!ioc))
2906 return NULL; 2933 return NULL;
@@ -2917,13 +2944,11 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2917 } 2944 }
2918 2945
2919 do { 2946 do {
2920 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 2947 cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
2921 rcu_read_unlock(); 2948 rcu_read_unlock();
2922 if (!cic) 2949 if (!cic)
2923 break; 2950 break;
2924 /* ->key must be copied to avoid race with cfq_exit_queue() */ 2951 if (unlikely(cic->key != cfqd)) {
2925 k = cic->key;
2926 if (unlikely(!k)) {
2927 cfq_drop_dead_cic(cfqd, ioc, cic); 2952 cfq_drop_dead_cic(cfqd, ioc, cic);
2928 rcu_read_lock(); 2953 rcu_read_lock();
2929 continue; 2954 continue;
@@ -2956,7 +2981,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2956 2981
2957 spin_lock_irqsave(&ioc->lock, flags); 2982 spin_lock_irqsave(&ioc->lock, flags);
2958 ret = radix_tree_insert(&ioc->radix_root, 2983 ret = radix_tree_insert(&ioc->radix_root,
2959 (unsigned long) cfqd, cic); 2984 cfqd->cic_index, cic);
2960 if (!ret) 2985 if (!ret)
2961 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 2986 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2962 spin_unlock_irqrestore(&ioc->lock, flags); 2987 spin_unlock_irqrestore(&ioc->lock, flags);
@@ -3516,6 +3541,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3516 } 3541 }
3517 3542
3518 cic_set_cfqq(cic, NULL, 1); 3543 cic_set_cfqq(cic, NULL, 1);
3544
3545 cfq_put_cooperator(cfqq);
3546
3519 cfq_put_queue(cfqq); 3547 cfq_put_queue(cfqq);
3520 return NULL; 3548 return NULL;
3521} 3549}
@@ -3708,10 +3736,32 @@ static void cfq_exit_queue(struct elevator_queue *e)
3708 3736
3709 cfq_shutdown_timer_wq(cfqd); 3737 cfq_shutdown_timer_wq(cfqd);
3710 3738
3739 spin_lock(&cic_index_lock);
3740 ida_remove(&cic_index_ida, cfqd->cic_index);
3741 spin_unlock(&cic_index_lock);
3742
3711 /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ 3743 /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3712 call_rcu(&cfqd->rcu, cfq_cfqd_free); 3744 call_rcu(&cfqd->rcu, cfq_cfqd_free);
3713} 3745}
3714 3746
3747static int cfq_alloc_cic_index(void)
3748{
3749 int index, error;
3750
3751 do {
3752 if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3753 return -ENOMEM;
3754
3755 spin_lock(&cic_index_lock);
3756 error = ida_get_new(&cic_index_ida, &index);
3757 spin_unlock(&cic_index_lock);
3758 if (error && error != -EAGAIN)
3759 return error;
3760 } while (error);
3761
3762 return index;
3763}
3764
3715static void *cfq_init_queue(struct request_queue *q) 3765static void *cfq_init_queue(struct request_queue *q)
3716{ 3766{
3717 struct cfq_data *cfqd; 3767 struct cfq_data *cfqd;
@@ -3719,10 +3769,16 @@ static void *cfq_init_queue(struct request_queue *q)
3719 struct cfq_group *cfqg; 3769 struct cfq_group *cfqg;
3720 struct cfq_rb_root *st; 3770 struct cfq_rb_root *st;
3721 3771
3772 i = cfq_alloc_cic_index();
3773 if (i < 0)
3774 return NULL;
3775
3722 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3776 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3723 if (!cfqd) 3777 if (!cfqd)
3724 return NULL; 3778 return NULL;
3725 3779
3780 cfqd->cic_index = i;
3781
3726 /* Init root service tree */ 3782 /* Init root service tree */
3727 cfqd->grp_service_tree = CFQ_RB_ROOT; 3783 cfqd->grp_service_tree = CFQ_RB_ROOT;
3728 3784
@@ -3984,6 +4040,7 @@ static void __exit cfq_exit(void)
3984 */ 4040 */
3985 if (elv_ioc_count_read(cfq_ioc_count)) 4041 if (elv_ioc_count_read(cfq_ioc_count))
3986 wait_for_completion(&all_gone); 4042 wait_for_completion(&all_gone);
4043 ida_destroy(&cic_index_ida);
3987 cfq_slab_kill(); 4044 cfq_slab_kill();
3988} 4045}
3989 4046
diff --git a/block/elevator.c b/block/elevator.c
index 6df2b5056b51..923a9139106c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,9 +242,11 @@ int elevator_init(struct request_queue *q, char *name)
242{ 242{
243 struct elevator_type *e = NULL; 243 struct elevator_type *e = NULL;
244 struct elevator_queue *eq; 244 struct elevator_queue *eq;
245 int ret = 0;
246 void *data; 245 void *data;
247 246
247 if (unlikely(q->elevator))
248 return 0;
249
248 INIT_LIST_HEAD(&q->queue_head); 250 INIT_LIST_HEAD(&q->queue_head);
249 q->last_merge = NULL; 251 q->last_merge = NULL;
250 q->end_sector = 0; 252 q->end_sector = 0;
@@ -284,7 +286,7 @@ int elevator_init(struct request_queue *q, char *name)
284 } 286 }
285 287
286 elevator_attach(q, eq, data); 288 elevator_attach(q, eq, data);
287 return ret; 289 return 0;
288} 290}
289EXPORT_SYMBOL(elevator_init); 291EXPORT_SYMBOL(elevator_init);
290 292
@@ -1097,7 +1099,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
1097 struct elevator_type *__e; 1099 struct elevator_type *__e;
1098 int len = 0; 1100 int len = 0;
1099 1101
1100 if (!q->elevator) 1102 if (!q->elevator || !blk_queue_stackable(q))
1101 return sprintf(name, "none\n"); 1103 return sprintf(name, "none\n");
1102 1104
1103 elv = e->elevator_type; 1105 elv = e->elevator_type;