aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2010-05-20 15:21:34 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-05-24 03:06:59 -0400
commitbca4b914b5da3d8e7b9b647f620b71dc85c0c394 (patch)
treeed9c468396ec6ca6a04da9f7fbc341eee95e3930 /block/cfq-iosched.c
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
cfq-iosched: remove dead_key from cfq_io_context
Remove ->dead_key field from cfq_io_context to shrink its size to 128 bytes. (64 bytes for 32-bit hosts) Use lower bit in ->key as dead-mark, instead of moving key to separate field. After this for dead cfq_io_context we got cic->key != cfqd automatically. Thus, io_context's last-hit cache should work without changing. Now to check ->key for non-dead state compare it with cfqd, instead of checking ->key for non-null value as it was before. Plus remove obsolete race protection in cfq_cic_lookup. This race gone after v2.6.24-1728-g4ac845a Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ed897b5ef315..407602350350 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -430,6 +430,23 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
430 cic->cfqq[is_sync] = cfqq; 430 cic->cfqq[is_sync] = cfqq;
431} 431}
432 432
433#define CIC_DEAD_KEY 1ul
434
435static inline void *cfqd_dead_key(struct cfq_data *cfqd)
436{
437 return (void *)((unsigned long) cfqd | CIC_DEAD_KEY);
438}
439
440static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
441{
442 struct cfq_data *cfqd = cic->key;
443
444 if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
445 return NULL;
446
447 return cfqd;
448}
449
433/* 450/*
434 * We regard a request as SYNC, if it's either a read or has the SYNC bit 451 * We regard a request as SYNC, if it's either a read or has the SYNC bit
435 * set (in which case it could also be direct WRITE). 452 * set (in which case it could also be direct WRITE).
@@ -2510,11 +2527,12 @@ static void cfq_cic_free(struct cfq_io_context *cic)
2510static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 2527static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2511{ 2528{
2512 unsigned long flags; 2529 unsigned long flags;
2530 unsigned long dead_key = (unsigned long) cic->key;
2513 2531
2514 BUG_ON(!cic->dead_key); 2532 BUG_ON(!(dead_key & CIC_DEAD_KEY));
2515 2533
2516 spin_lock_irqsave(&ioc->lock, flags); 2534 spin_lock_irqsave(&ioc->lock, flags);
2517 radix_tree_delete(&ioc->radix_root, cic->dead_key); 2535 radix_tree_delete(&ioc->radix_root, dead_key & ~CIC_DEAD_KEY);
2518 hlist_del_rcu(&cic->cic_list); 2536 hlist_del_rcu(&cic->cic_list);
2519 spin_unlock_irqrestore(&ioc->lock, flags); 2537 spin_unlock_irqrestore(&ioc->lock, flags);
2520 2538
@@ -2573,11 +2591,10 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2573 list_del_init(&cic->queue_list); 2591 list_del_init(&cic->queue_list);
2574 2592
2575 /* 2593 /*
2576 * Make sure key == NULL is seen for dead queues 2594 * Make sure dead mark is seen for dead queues
2577 */ 2595 */
2578 smp_wmb(); 2596 smp_wmb();
2579 cic->dead_key = (unsigned long) cic->key; 2597 cic->key = cfqd_dead_key(cfqd);
2580 cic->key = NULL;
2581 2598
2582 if (ioc->ioc_data == cic) 2599 if (ioc->ioc_data == cic)
2583 rcu_assign_pointer(ioc->ioc_data, NULL); 2600 rcu_assign_pointer(ioc->ioc_data, NULL);
@@ -2596,7 +2613,7 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2596static void cfq_exit_single_io_context(struct io_context *ioc, 2613static void cfq_exit_single_io_context(struct io_context *ioc,
2597 struct cfq_io_context *cic) 2614 struct cfq_io_context *cic)
2598{ 2615{
2599 struct cfq_data *cfqd = cic->key; 2616 struct cfq_data *cfqd = cic_to_cfqd(cic);
2600 2617
2601 if (cfqd) { 2618 if (cfqd) {
2602 struct request_queue *q = cfqd->queue; 2619 struct request_queue *q = cfqd->queue;
@@ -2609,7 +2626,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
2609 * race between exiting task and queue 2626 * race between exiting task and queue
2610 */ 2627 */
2611 smp_read_barrier_depends(); 2628 smp_read_barrier_depends();
2612 if (cic->key) 2629 if (cic->key == cfqd)
2613 __cfq_exit_single_io_context(cfqd, cic); 2630 __cfq_exit_single_io_context(cfqd, cic);
2614 2631
2615 spin_unlock_irqrestore(q->queue_lock, flags); 2632 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2689,7 +2706,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2689 2706
2690static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 2707static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2691{ 2708{
2692 struct cfq_data *cfqd = cic->key; 2709 struct cfq_data *cfqd = cic_to_cfqd(cic);
2693 struct cfq_queue *cfqq; 2710 struct cfq_queue *cfqq;
2694 unsigned long flags; 2711 unsigned long flags;
2695 2712
@@ -2746,7 +2763,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2746static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) 2763static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2747{ 2764{
2748 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); 2765 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2749 struct cfq_data *cfqd = cic->key; 2766 struct cfq_data *cfqd = cic_to_cfqd(cic);
2750 unsigned long flags; 2767 unsigned long flags;
2751 struct request_queue *q; 2768 struct request_queue *q;
2752 2769
@@ -2883,6 +2900,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2883 unsigned long flags; 2900 unsigned long flags;
2884 2901
2885 WARN_ON(!list_empty(&cic->queue_list)); 2902 WARN_ON(!list_empty(&cic->queue_list));
2903 BUG_ON(cic->key != cfqd_dead_key(cfqd));
2886 2904
2887 spin_lock_irqsave(&ioc->lock, flags); 2905 spin_lock_irqsave(&ioc->lock, flags);
2888 2906
@@ -2900,7 +2918,6 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2900{ 2918{
2901 struct cfq_io_context *cic; 2919 struct cfq_io_context *cic;
2902 unsigned long flags; 2920 unsigned long flags;
2903 void *k;
2904 2921
2905 if (unlikely(!ioc)) 2922 if (unlikely(!ioc))
2906 return NULL; 2923 return NULL;
@@ -2921,9 +2938,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2921 rcu_read_unlock(); 2938 rcu_read_unlock();
2922 if (!cic) 2939 if (!cic)
2923 break; 2940 break;
2924 /* ->key must be copied to avoid race with cfq_exit_queue() */ 2941 if (unlikely(cic->key != cfqd)) {
2925 k = cic->key;
2926 if (unlikely(!k)) {
2927 cfq_drop_dead_cic(cfqd, ioc, cic); 2942 cfq_drop_dead_cic(cfqd, ioc, cic);
2928 rcu_read_lock(); 2943 rcu_read_lock();
2929 continue; 2944 continue;