aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-02-22 20:05:01 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-25 04:34:57 -0500
commit20133cfce7d0bbdcc0c398301030c091f5675c88 (patch)
tree337f242bfc89f5880cf86234fa6b574f52a2f0a5 /kernel/rcutree.c
parent1bd22e374b20c2f0ba1d2723c1f585acab2251c5 (diff)
rcu: Stop overflowing signed integers
The C standard does not specify the result of an operation that overflows a signed integer, so such operations need to be avoided. This patch changes the type of several fields from "long" to "unsigned long" and adjusts operations as needed. ULONG_CMP_GE() and ULONG_CMP_LT() macros are introduced to do the modular comparisons that are appropriate given that overflow is an expected event. Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-17-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 29d88c08d875..dd0d31dffcdc 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -500,7 +500,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
500 trigger_all_cpu_backtrace(); 500 trigger_all_cpu_backtrace();
501 501
502 spin_lock_irqsave(&rnp->lock, flags); 502 spin_lock_irqsave(&rnp->lock, flags);
503 if ((long)(jiffies - rsp->jiffies_stall) >= 0) 503 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
504 rsp->jiffies_stall = 504 rsp->jiffies_stall =
505 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 505 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
506 spin_unlock_irqrestore(&rnp->lock, flags); 506 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1216,8 +1216,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1216 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1216 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1217 return; /* Someone else is already on the job. */ 1217 return; /* Someone else is already on the job. */
1218 } 1218 }
1219 if (relaxed && 1219 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1220 (long)(rsp->jiffies_force_qs - jiffies) >= 0)
1221 goto unlock_fqs_ret; /* no emergency and done recently. */ 1220 goto unlock_fqs_ret; /* no emergency and done recently. */
1222 rsp->n_force_qs++; 1221 rsp->n_force_qs++;
1223 spin_lock(&rnp->lock); /* irqs already disabled */ 1222 spin_lock(&rnp->lock); /* irqs already disabled */
@@ -1295,7 +1294,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1295 * If an RCU GP has gone long enough, go check for dyntick 1294 * If an RCU GP has gone long enough, go check for dyntick
1296 * idle CPUs and, if needed, send resched IPIs. 1295 * idle CPUs and, if needed, send resched IPIs.
1297 */ 1296 */
1298 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1297 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1299 force_quiescent_state(rsp, 1); 1298 force_quiescent_state(rsp, 1);
1300 1299
1301 /* 1300 /*
@@ -1392,7 +1391,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1392 force_quiescent_state(rsp, 0); 1391 force_quiescent_state(rsp, 0);
1393 rdp->n_force_qs_snap = rsp->n_force_qs; 1392 rdp->n_force_qs_snap = rsp->n_force_qs;
1394 rdp->qlen_last_fqs_check = rdp->qlen; 1393 rdp->qlen_last_fqs_check = rdp->qlen;
1395 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1394 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1396 force_quiescent_state(rsp, 1); 1395 force_quiescent_state(rsp, 1);
1397 local_irq_restore(flags); 1396 local_irq_restore(flags);
1398} 1397}
@@ -1525,7 +1524,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1525 1524
1526 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1525 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1527 if (rcu_gp_in_progress(rsp) && 1526 if (rcu_gp_in_progress(rsp) &&
1528 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1527 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
1529 rdp->n_rp_need_fqs++; 1528 rdp->n_rp_need_fqs++;
1530 return 1; 1529 return 1;
1531 } 1530 }