aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinicius Costa Gomes <vinicius.gomes@intel.com>2019-04-29 18:48:30 -0400
committerDavid S. Miller <davem@davemloft.net>2019-05-01 11:58:51 -0400
commit8c79f0ea5d6087645ed5ed5d638c338962052766 (patch)
treee3904ec24efdee2e6d7239885bbf53c7161bac02
parentcd86972a9fd076aea43165394b05bbca26254cd7 (diff)
taprio: Fix potencial use of invalid memory during dequeue()
Right now, this isn't a problem, but the next commit allows schedules to be added during runtime. When a new schedule transitions from the inactive to the active state ("admin" -> "oper") the previous one can be freed, if it's freed just after the RCU read lock is released, we may access an invalid entry. So, we should take care to protect the dequeue() flow, so all the places that access the entries are protected by the RCU read lock. Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_taprio.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 09563c245473..f827caa73862 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -136,8 +136,8 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
136{ 136{
137 struct taprio_sched *q = qdisc_priv(sch); 137 struct taprio_sched *q = qdisc_priv(sch);
138 struct net_device *dev = qdisc_dev(sch); 138 struct net_device *dev = qdisc_dev(sch);
139 struct sk_buff *skb = NULL;
139 struct sched_entry *entry; 140 struct sched_entry *entry;
140 struct sk_buff *skb;
141 u32 gate_mask; 141 u32 gate_mask;
142 int i; 142 int i;
143 143
@@ -154,10 +154,9 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
154 * "AdminGateSates" 154 * "AdminGateSates"
155 */ 155 */
156 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; 156 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
157 rcu_read_unlock();
158 157
159 if (!gate_mask) 158 if (!gate_mask)
160 return NULL; 159 goto done;
161 160
162 for (i = 0; i < dev->num_tx_queues; i++) { 161 for (i = 0; i < dev->num_tx_queues; i++) {
163 struct Qdisc *child = q->qdiscs[i]; 162 struct Qdisc *child = q->qdiscs[i];
@@ -197,16 +196,19 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
197 196
198 skb = child->ops->dequeue(child); 197 skb = child->ops->dequeue(child);
199 if (unlikely(!skb)) 198 if (unlikely(!skb))
200 return NULL; 199 goto done;
201 200
202 qdisc_bstats_update(sch, skb); 201 qdisc_bstats_update(sch, skb);
203 qdisc_qstats_backlog_dec(sch, skb); 202 qdisc_qstats_backlog_dec(sch, skb);
204 sch->q.qlen--; 203 sch->q.qlen--;
205 204
206 return skb; 205 goto done;
207 } 206 }
208 207
209 return NULL; 208done:
209 rcu_read_unlock();
210
211 return skb;
210} 212}
211 213
212static enum hrtimer_restart advance_sched(struct hrtimer *timer) 214static enum hrtimer_restart advance_sched(struct hrtimer *timer)