diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-27 11:38:45 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-07-25 16:04:49 -0400 |
| commit | aed4e046863820e6d06ebf7c079e9ad924608edf (patch) | |
| tree | 8d48bf8b423c701758e8dda12fb9b91ebcd23e89 | |
| parent | f2dbe4a562d4f17cc1bad3e36a9d1ccb19c86604 (diff) | |
rcu: Remove unused RCU list functions
Given changes to callback migration, rcu_cblist_head(),
rcu_cblist_tail(), rcu_cblist_count_cbs(), rcu_segcblist_segempty(),
rcu_segcblist_dequeued_lazy(), and rcu_segcblist_new_cbs() are
no longer used. This commit therefore removes them.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
| -rw-r--r-- | kernel/rcu/rcu_segcblist.c | 84 | ||||
| -rw-r--r-- | kernel/rcu/rcu_segcblist.h | 26 |
2 files changed, 0 insertions, 110 deletions
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 7091d824b893..7649fcd2c4c7 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c | |||
| @@ -36,24 +36,6 @@ void rcu_cblist_init(struct rcu_cblist *rclp) | |||
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * Debug function to actually count the number of callbacks. | ||
| 40 | * If the number exceeds the limit specified, return -1. | ||
| 41 | */ | ||
| 42 | long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim) | ||
| 43 | { | ||
| 44 | int cnt = 0; | ||
| 45 | struct rcu_head **rhpp = &rclp->head; | ||
| 46 | |||
| 47 | for (;;) { | ||
| 48 | if (!*rhpp) | ||
| 49 | return cnt; | ||
| 50 | if (++cnt > lim) | ||
| 51 | return -1; | ||
| 52 | rhpp = &(*rhpp)->next; | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Dequeue the oldest rcu_head structure from the specified callback | 39 | * Dequeue the oldest rcu_head structure from the specified callback |
| 58 | * list. This function assumes that the callback is non-lazy, but | 40 | * list. This function assumes that the callback is non-lazy, but |
| 59 | * the caller can later invoke rcu_cblist_dequeued_lazy() if it | 41 | * the caller can later invoke rcu_cblist_dequeued_lazy() if it |
| @@ -103,17 +85,6 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) | |||
| 103 | } | 85 | } |
| 104 | 86 | ||
| 105 | /* | 87 | /* |
| 106 | * Is the specified segment of the specified rcu_segcblist structure | ||
| 107 | * empty of callbacks? | ||
| 108 | */ | ||
| 109 | bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) | ||
| 110 | { | ||
| 111 | if (seg == RCU_DONE_TAIL) | ||
| 112 | return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; | ||
| 113 | return rsclp->tails[seg - 1] == rsclp->tails[seg]; | ||
| 114 | } | ||
| 115 | |||
| 116 | /* | ||
| 117 | * Does the specified rcu_segcblist structure contain callbacks that | 88 | * Does the specified rcu_segcblist structure contain callbacks that |
| 118 | * are ready to be invoked? | 89 | * are ready to be invoked? |
| 119 | */ | 90 | */ |
| @@ -134,50 +105,6 @@ bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) | |||
| 134 | } | 105 | } |
| 135 | 106 | ||
| 136 | /* | 107 | /* |
| 137 | * Dequeue and return the first ready-to-invoke callback. If there | ||
| 138 | * are no ready-to-invoke callbacks, return NULL. Disables interrupts | ||
| 139 | * to avoid interference. Does not protect from interference from other | ||
| 140 | * CPUs or tasks. | ||
| 141 | */ | ||
| 142 | struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) | ||
| 143 | { | ||
| 144 | unsigned long flags; | ||
| 145 | int i; | ||
| 146 | struct rcu_head *rhp; | ||
| 147 | |||
| 148 | local_irq_save(flags); | ||
| 149 | if (!rcu_segcblist_ready_cbs(rsclp)) { | ||
| 150 | local_irq_restore(flags); | ||
| 151 | return NULL; | ||
| 152 | } | ||
| 153 | rhp = rsclp->head; | ||
| 154 | BUG_ON(!rhp); | ||
| 155 | rsclp->head = rhp->next; | ||
| 156 | for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { | ||
| 157 | if (rsclp->tails[i] != &rhp->next) | ||
| 158 | break; | ||
| 159 | rsclp->tails[i] = &rsclp->head; | ||
| 160 | } | ||
| 161 | smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ | ||
| 162 | WRITE_ONCE(rsclp->len, rsclp->len - 1); | ||
| 163 | local_irq_restore(flags); | ||
| 164 | return rhp; | ||
| 165 | } | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Account for the fact that a previously dequeued callback turned out | ||
| 169 | * to be marked as lazy. | ||
| 170 | */ | ||
| 171 | void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) | ||
| 172 | { | ||
| 173 | unsigned long flags; | ||
| 174 | |||
| 175 | local_irq_save(flags); | ||
| 176 | rsclp->len_lazy--; | ||
| 177 | local_irq_restore(flags); | ||
| 178 | } | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Return a pointer to the first callback in the specified rcu_segcblist | 108 | * Return a pointer to the first callback in the specified rcu_segcblist |
| 182 | * structure. This is useful for diagnostics. | 109 | * structure. This is useful for diagnostics. |
| 183 | */ | 110 | */ |
| @@ -203,17 +130,6 @@ struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) | |||
| 203 | } | 130 | } |
| 204 | 131 | ||
| 205 | /* | 132 | /* |
| 206 | * Does the specified rcu_segcblist structure contain callbacks that | ||
| 207 | * have not yet been processed beyond having been posted, that is, | ||
| 208 | * does it contain callbacks in its last segment? | ||
| 209 | */ | ||
| 210 | bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp) | ||
| 211 | { | ||
| 212 | return rcu_segcblist_is_enabled(rsclp) && | ||
| 213 | !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL); | ||
| 214 | } | ||
| 215 | |||
| 216 | /* | ||
| 217 | * Enqueue the specified callback onto the specified rcu_segcblist | 133 | * Enqueue the specified callback onto the specified rcu_segcblist |
| 218 | * structure, updating accounting as needed. Note that the ->len | 134 | * structure, updating accounting as needed. Note that the ->len |
| 219 | * field may be accessed locklessly, hence the WRITE_ONCE(). | 135 | * field may be accessed locklessly, hence the WRITE_ONCE(). |
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index c2f319f3f06a..581c12b63544 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h | |||
| @@ -31,29 +31,7 @@ static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) | |||
| 31 | rclp->len_lazy--; | 31 | rclp->len_lazy--; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | /* | ||
| 35 | * Interim function to return rcu_cblist head pointer. Longer term, the | ||
| 36 | * rcu_cblist will be used more pervasively, removing the need for this | ||
| 37 | * function. | ||
| 38 | */ | ||
| 39 | static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) | ||
| 40 | { | ||
| 41 | return rclp->head; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Interim function to return rcu_cblist head pointer. Longer term, the | ||
| 46 | * rcu_cblist will be used more pervasively, removing the need for this | ||
| 47 | * function. | ||
| 48 | */ | ||
| 49 | static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) | ||
| 50 | { | ||
| 51 | WARN_ON_ONCE(!rclp->head); | ||
| 52 | return rclp->tail; | ||
| 53 | } | ||
| 54 | |||
| 55 | void rcu_cblist_init(struct rcu_cblist *rclp); | 34 | void rcu_cblist_init(struct rcu_cblist *rclp); |
| 56 | long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim); | ||
| 57 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); | 35 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); |
| 58 | 36 | ||
| 59 | /* | 37 | /* |
| @@ -134,14 +112,10 @@ static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) | |||
| 134 | 112 | ||
| 135 | void rcu_segcblist_init(struct rcu_segcblist *rsclp); | 113 | void rcu_segcblist_init(struct rcu_segcblist *rsclp); |
| 136 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp); | 114 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp); |
| 137 | bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg); | ||
| 138 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); | 115 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); |
| 139 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); | 116 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); |
| 140 | struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp); | ||
| 141 | void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp); | ||
| 142 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); | 117 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); |
| 143 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); | 118 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); |
| 144 | bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp); | ||
| 145 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, | 119 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
| 146 | struct rcu_head *rhp, bool lazy); | 120 | struct rcu_head *rhp, bool lazy); |
| 147 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, | 121 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
