aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/can/pch_can.c14
-rw-r--r--net/ipv4/tcp_input.c29
-rw-r--r--net/sched/sch_sfq.c16
3 files changed, 31 insertions, 28 deletions
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 8d45fdd0180d..c42e97268248 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1078,15 +1078,17 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1078 1078
1079 /* Save Tx buffer enable state */ 1079 /* Save Tx buffer enable state */
1080 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) 1080 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1081 priv->tx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG); 1081 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1082 PCH_TX_IFREG);
1082 1083
1083 /* Disable all Transmit buffers */ 1084 /* Disable all Transmit buffers */
1084 pch_can_set_tx_all(priv, 0); 1085 pch_can_set_tx_all(priv, 0);
1085 1086
1086 /* Save Rx buffer enable state */ 1087 /* Save Rx buffer enable state */
1087 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { 1088 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1088 priv->rx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG); 1089 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1089 priv->rx_link[i] = pch_can_get_rx_buffer_link(priv, i); 1090 PCH_RX_IFREG);
1091 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1090 } 1092 }
1091 1093
1092 /* Disable all Receive buffers */ 1094 /* Disable all Receive buffers */
@@ -1139,15 +1141,15 @@ static int pch_can_resume(struct pci_dev *pdev)
1139 1141
1140 /* Enabling the transmit buffer. */ 1142 /* Enabling the transmit buffer. */
1141 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) 1143 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1142 pch_can_set_rxtx(priv, i, priv->tx_enable[i], PCH_TX_IFREG); 1144 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1143 1145
1144 /* Configuring the receive buffer and enabling them. */ 1146 /* Configuring the receive buffer and enabling them. */
1145 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) { 1147 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1146 /* Restore buffer link */ 1148 /* Restore buffer link */
1147 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i]); 1149 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1148 1150
1149 /* Restore buffer enables */ 1151 /* Restore buffer enables */
1150 pch_can_set_rxtx(priv, i, priv->rx_enable[i], PCH_RX_IFREG); 1152 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1151 } 1153 }
1152 1154
1153 /* Enable CAN Interrupts */ 1155 /* Enable CAN Interrupts */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 824e8c8a17ad..2549b29b062d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -916,25 +916,20 @@ static void tcp_init_metrics(struct sock *sk)
916 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 916 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
917 } 917 }
918 tcp_set_rto(sk); 918 tcp_set_rto(sk);
919 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 919 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
920 goto reset;
921
922cwnd:
923 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
924 tp->snd_cwnd_stamp = tcp_time_stamp;
925 return;
926
927reset: 920reset:
928 /* Play conservative. If timestamps are not 921 /* Play conservative. If timestamps are not
929 * supported, TCP will fail to recalculate correct 922 * supported, TCP will fail to recalculate correct
930 * rtt, if initial rto is too small. FORGET ALL AND RESET! 923 * rtt, if initial rto is too small. FORGET ALL AND RESET!
931 */ 924 */
932 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 925 if (!tp->rx_opt.saw_tstamp && tp->srtt) {
933 tp->srtt = 0; 926 tp->srtt = 0;
934 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 927 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
935 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 928 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
929 }
936 } 930 }
937 goto cwnd; 931 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp;
938} 933}
939 934
940static void tcp_update_reordering(struct sock *sk, const int metric, 935static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 13322e8a0456..6a2f88fea6d8 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -281,6 +281,7 @@ static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
281 struct sk_buff *skb = slot->skblist_prev; 281 struct sk_buff *skb = slot->skblist_prev;
282 282
283 slot->skblist_prev = skb->prev; 283 slot->skblist_prev = skb->prev;
284 skb->prev->next = (struct sk_buff *)slot;
284 skb->next = skb->prev = NULL; 285 skb->next = skb->prev = NULL;
285 return skb; 286 return skb;
286} 287}
@@ -608,14 +609,19 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
608 struct gnet_dump *d) 609 struct gnet_dump *d)
609{ 610{
610 struct sfq_sched_data *q = qdisc_priv(sch); 611 struct sfq_sched_data *q = qdisc_priv(sch);
611 const struct sfq_slot *slot = &q->slots[q->ht[cl - 1]]; 612 sfq_index idx = q->ht[cl - 1];
612 struct gnet_stats_queue qs = { .qlen = slot->qlen }; 613 struct gnet_stats_queue qs = { 0 };
613 struct tc_sfq_xstats xstats = { .allot = slot->allot }; 614 struct tc_sfq_xstats xstats = { 0 };
614 struct sk_buff *skb; 615 struct sk_buff *skb;
615 616
616 slot_queue_walk(slot, skb) 617 if (idx != SFQ_EMPTY_SLOT) {
617 qs.backlog += qdisc_pkt_len(skb); 618 const struct sfq_slot *slot = &q->slots[idx];
618 619
620 xstats.allot = slot->allot;
621 qs.qlen = slot->qlen;
622 slot_queue_walk(slot, skb)
623 qs.backlog += qdisc_pkt_len(skb);
624 }
619 if (gnet_stats_copy_queue(d, &qs) < 0) 625 if (gnet_stats_copy_queue(d, &qs) < 0)
620 return -1; 626 return -1;
621 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 627 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));