summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-07-23 19:03:51 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-23 10:41:56 -0400
commitbcfa57ce10d3d53d37a6e324f3010b1ce6a2784a (patch)
tree1d365a7a64e3ea1d1dc743afcc1de4b6935caa57 /kernel/rcutree_plugin.h
parent25d30cf4250f74e5ceb35f8f39739782408db633 (diff)
rcu: Eliminate signed overflow in synchronize_rcu_expedited()
In the C language, signed overflow is undefined. It is true that twos-complement arithmetic normally comes to the rescue, but if the compiler can subvert this any time it has any information about the values being compared. For example, given "if (a - b > 0)", if the compiler has enough information to realize that (for example) the value of "a" is positive and that of "b" is negative, the compiler is within its rights to optimize to a simple "if (1)", which might not be what you want. This commit therefore converts synchronize_rcu_expedited()'s work-done detection counter from signed to unsigned. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index eb8dcd1bc4b5..cb5879386a02 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -677,7 +677,7 @@ void synchronize_rcu(void)
677EXPORT_SYMBOL_GPL(synchronize_rcu); 677EXPORT_SYMBOL_GPL(synchronize_rcu);
678 678
679static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); 679static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
680static long sync_rcu_preempt_exp_count; 680static unsigned long sync_rcu_preempt_exp_count;
681static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); 681static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
682 682
683/* 683/*
@@ -792,7 +792,7 @@ void synchronize_rcu_expedited(void)
792 unsigned long flags; 792 unsigned long flags;
793 struct rcu_node *rnp; 793 struct rcu_node *rnp;
794 struct rcu_state *rsp = &rcu_preempt_state; 794 struct rcu_state *rsp = &rcu_preempt_state;
795 long snap; 795 unsigned long snap;
796 int trycount = 0; 796 int trycount = 0;
797 797
798 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 798 smp_mb(); /* Caller's modifications seen first by other CPUs. */
@@ -811,10 +811,10 @@ void synchronize_rcu_expedited(void)
811 synchronize_rcu(); 811 synchronize_rcu();
812 return; 812 return;
813 } 813 }
814 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) 814 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
815 goto mb_ret; /* Others did our work for us. */ 815 goto mb_ret; /* Others did our work for us. */
816 } 816 }
817 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) 817 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
818 goto unlock_mb_ret; /* Others did our work for us. */ 818 goto unlock_mb_ret; /* Others did our work for us. */
819 819
820 /* force all RCU readers onto ->blkd_tasks lists. */ 820 /* force all RCU readers onto ->blkd_tasks lists. */