aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/caif
diff options
context:
space:
mode:
authorDmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>2012-04-12 04:27:25 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-13 11:37:36 -0400
commitece367d53a5bf46cc357163c7074a6546a0ec01c (patch)
tree741193e49df32040d03c9258c342a75eecfae9f1 /drivers/net/caif
parent447648128ec22e294604674ffe1064aa3ec3b767 (diff)
caif-hsi: robust frame aggregation for HSI
Implement aggregation algorithm, combining more data into a single HSI transfer. 4 different traffic categories are supported: 1. TC_PRIO_CONTROL .. TC_PRIO_MAX (CTL) 2. TC_PRIO_INTERACTIVE (VO) 3. TC_PRIO_INTERACTIVE_BULK (VI) 4. TC_PRIO_BESTEFFORT, TC_PRIO_BULK, TC_PRIO_FILLER (BEBK) Signed-off-by: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/caif')
-rw-r--r--drivers/net/caif/caif_hsi.c243
1 files changed, 188 insertions, 55 deletions
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9c1c8cd5223..9849a238d54 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -19,6 +19,7 @@
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/pkt_sched.h>
22#include <net/caif/caif_layer.h> 23#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h> 24#include <net/caif/caif_hsi.h>
24 25
@@ -34,6 +35,10 @@ static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); 35module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); 36MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36 37
38static int aggregation_timeout = 1;
39module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
40MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
41
37/* 42/*
38 * HSI padding options. 43 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero ! 44 * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +91,84 @@ static void cfhsi_inactivity_tout(unsigned long arg)
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 91 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87} 92}
88 93
94static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
95 const struct sk_buff *skb,
96 int direction)
97{
98 struct caif_payload_info *info;
99 int hpad, tpad, len;
100
101 info = (struct caif_payload_info *)&skb->cb;
102 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
103 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
104 len = skb->len + hpad + tpad;
105
106 if (direction > 0)
107 cfhsi->aggregation_len += len;
108 else if (direction < 0)
109 cfhsi->aggregation_len -= len;
110}
111
112static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
113{
114 int i;
115
116 if (cfhsi->aggregation_timeout < 0)
117 return true;
118
119 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
120 if (cfhsi->qhead[i].qlen)
121 return true;
122 }
123
124 /* TODO: Use aggregation_len instead */
125 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
126 return true;
127
128 return false;
129}
130
131static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
132{
133 struct sk_buff *skb;
134 int i;
135
136 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
137 skb = skb_dequeue(&cfhsi->qhead[i]);
138 if (skb)
139 break;
140 }
141
142 return skb;
143}
144
145static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
146{
147 int i, len = 0;
148 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
149 len += skb_queue_len(&cfhsi->qhead[i]);
150 return len;
151}
152
89static void cfhsi_abort_tx(struct cfhsi *cfhsi) 153static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{ 154{
91 struct sk_buff *skb; 155 struct sk_buff *skb;
92 156
93 for (;;) { 157 for (;;) {
94 spin_lock_bh(&cfhsi->lock); 158 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead); 159 skb = cfhsi_dequeue(cfhsi);
96 if (!skb) 160 if (!skb)
97 break; 161 break;
98 162
99 cfhsi->ndev->stats.tx_errors++; 163 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++; 164 cfhsi->ndev->stats.tx_dropped++;
165 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
101 spin_unlock_bh(&cfhsi->lock); 166 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb); 167 kfree_skb(skb);
103 } 168 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 169 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 170 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106 mod_timer(&cfhsi->timer, 171 mod_timer(&cfhsi->inactivity_timer,
107 jiffies + cfhsi->inactivity_timeout); 172 jiffies + cfhsi->inactivity_timeout);
108 spin_unlock_bh(&cfhsi->lock); 173 spin_unlock_bh(&cfhsi->lock);
109} 174}
@@ -169,7 +234,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
169 struct sk_buff *skb; 234 struct sk_buff *skb;
170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 235 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
171 236
172 skb = skb_dequeue(&cfhsi->qhead); 237 skb = cfhsi_dequeue(cfhsi);
173 if (!skb) 238 if (!skb)
174 return 0; 239 return 0;
175 240
@@ -196,11 +261,16 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
196 pemb += hpad; 261 pemb += hpad;
197 262
198 /* Update network statistics. */ 263 /* Update network statistics. */
264 spin_lock_bh(&cfhsi->lock);
199 cfhsi->ndev->stats.tx_packets++; 265 cfhsi->ndev->stats.tx_packets++;
200 cfhsi->ndev->stats.tx_bytes += skb->len; 266 cfhsi->ndev->stats.tx_bytes += skb->len;
267 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
268 spin_unlock_bh(&cfhsi->lock);
201 269
202 /* Copy in embedded CAIF frame. */ 270 /* Copy in embedded CAIF frame. */
203 skb_copy_bits(skb, 0, pemb, skb->len); 271 skb_copy_bits(skb, 0, pemb, skb->len);
272
273 /* Consume the SKB */
204 consume_skb(skb); 274 consume_skb(skb);
205 skb = NULL; 275 skb = NULL;
206 } 276 }
@@ -214,7 +284,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
214 int tpad = 0; 284 int tpad = 0;
215 285
216 if (!skb) 286 if (!skb)
217 skb = skb_dequeue(&cfhsi->qhead); 287 skb = cfhsi_dequeue(cfhsi);
218 288
219 if (!skb) 289 if (!skb)
220 break; 290 break;
@@ -233,8 +303,11 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
233 pfrm += hpad; 303 pfrm += hpad;
234 304
235 /* Update network statistics. */ 305 /* Update network statistics. */
306 spin_lock_bh(&cfhsi->lock);
236 cfhsi->ndev->stats.tx_packets++; 307 cfhsi->ndev->stats.tx_packets++;
237 cfhsi->ndev->stats.tx_bytes += skb->len; 308 cfhsi->ndev->stats.tx_bytes += skb->len;
309 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
310 spin_unlock_bh(&cfhsi->lock);
238 311
239 /* Copy in CAIF frame. */ 312 /* Copy in CAIF frame. */
240 skb_copy_bits(skb, 0, pfrm, skb->len); 313 skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +317,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
244 317
245 /* Update frame pointer. */ 318 /* Update frame pointer. */
246 pfrm += skb->len + tpad; 319 pfrm += skb->len + tpad;
320
321 /* Consume the SKB */
247 consume_skb(skb); 322 consume_skb(skb);
248 skb = NULL; 323 skb = NULL;
249 324
@@ -258,8 +333,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
258 } 333 }
259 334
260 /* Check if we can piggy-back another descriptor. */ 335 /* Check if we can piggy-back another descriptor. */
261 skb = skb_peek(&cfhsi->qhead); 336 if (cfhsi_can_send_aggregate(cfhsi))
262 if (skb)
263 desc->header |= CFHSI_PIGGY_DESC; 337 desc->header |= CFHSI_PIGGY_DESC;
264 else 338 else
265 desc->header &= ~CFHSI_PIGGY_DESC; 339 desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +341,71 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
267 return CFHSI_DESC_SZ + pld_len; 341 return CFHSI_DESC_SZ + pld_len;
268} 342}
269 343
270static void cfhsi_tx_done(struct cfhsi *cfhsi) 344static void cfhsi_start_tx(struct cfhsi *cfhsi)
271{ 345{
272 struct cfhsi_desc *desc = NULL; 346 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
273 int len = 0; 347 int len, res;
274 int res;
275 348
276 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 349 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
277 350
278 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 351 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
279 return; 352 return;
280 353
281 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
282
283 do { 354 do {
284 /*
285 * Send flow on if flow off has been previously signalled
286 * and number of packets is below low water mark.
287 */
288 spin_lock_bh(&cfhsi->lock);
289 if (cfhsi->flow_off_sent &&
290 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291 cfhsi->cfdev.flowctrl) {
292
293 cfhsi->flow_off_sent = 0;
294 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
295 }
296 spin_unlock_bh(&cfhsi->lock);
297
298 /* Create HSI frame. */ 355 /* Create HSI frame. */
299 do { 356 len = cfhsi_tx_frm(desc, cfhsi);
300 len = cfhsi_tx_frm(desc, cfhsi); 357 if (!len) {
301 if (!len) { 358 spin_lock_bh(&cfhsi->lock);
302 spin_lock_bh(&cfhsi->lock); 359 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
303 if (unlikely(skb_peek(&cfhsi->qhead))) {
304 spin_unlock_bh(&cfhsi->lock);
305 continue;
306 }
307 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308 /* Start inactivity timer. */
309 mod_timer(&cfhsi->timer,
310 jiffies + cfhsi->inactivity_timeout);
311 spin_unlock_bh(&cfhsi->lock); 360 spin_unlock_bh(&cfhsi->lock);
312 goto done; 361 res = -EAGAIN;
362 continue;
313 } 363 }
314 } while (!len); 364 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
365 /* Start inactivity timer. */
366 mod_timer(&cfhsi->inactivity_timer,
367 jiffies + cfhsi->inactivity_timeout);
368 spin_unlock_bh(&cfhsi->lock);
369 break;
370 }
315 371
316 /* Set up new transfer. */ 372 /* Set up new transfer. */
317 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 373 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318 if (WARN_ON(res < 0)) { 374 if (WARN_ON(res < 0))
319 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 375 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320 __func__, res); 376 __func__, res);
321 }
322 } while (res < 0); 377 } while (res < 0);
378}
379
380static void cfhsi_tx_done(struct cfhsi *cfhsi)
381{
382 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
383
384 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
385 return;
386
387 /*
388 * Send flow on if flow off has been previously signalled
389 * and number of packets is below low water mark.
390 */
391 spin_lock_bh(&cfhsi->lock);
392 if (cfhsi->flow_off_sent &&
393 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
394 cfhsi->cfdev.flowctrl) {
395
396 cfhsi->flow_off_sent = 0;
397 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
398 }
399
400 if (cfhsi_can_send_aggregate(cfhsi)) {
401 spin_unlock_bh(&cfhsi->lock);
402 cfhsi_start_tx(cfhsi);
403 } else {
404 mod_timer(&cfhsi->aggregation_timer,
405 jiffies + cfhsi->aggregation_timeout);
406 spin_unlock_bh(&cfhsi->lock);
407 }
323 408
324done:
325 return; 409 return;
326} 410}
327 411
@@ -560,7 +644,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
560 644
561 /* Update inactivity timer if pending. */ 645 /* Update inactivity timer if pending. */
562 spin_lock_bh(&cfhsi->lock); 646 spin_lock_bh(&cfhsi->lock);
563 mod_timer_pending(&cfhsi->timer, 647 mod_timer_pending(&cfhsi->inactivity_timer,
564 jiffies + cfhsi->inactivity_timeout); 648 jiffies + cfhsi->inactivity_timeout);
565 spin_unlock_bh(&cfhsi->lock); 649 spin_unlock_bh(&cfhsi->lock);
566 650
@@ -793,12 +877,12 @@ wake_ack:
793 877
794 spin_lock_bh(&cfhsi->lock); 878 spin_lock_bh(&cfhsi->lock);
795 879
796 /* Resume transmit if queue is not empty. */ 880 /* Resume transmit if queues are not empty. */
797 if (!skb_peek(&cfhsi->qhead)) { 881 if (!cfhsi_tx_queue_len(cfhsi)) {
798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 882 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
799 __func__); 883 __func__);
800 /* Start inactivity timer. */ 884 /* Start inactivity timer. */
801 mod_timer(&cfhsi->timer, 885 mod_timer(&cfhsi->inactivity_timer,
802 jiffies + cfhsi->inactivity_timeout); 886 jiffies + cfhsi->inactivity_timeout);
803 spin_unlock_bh(&cfhsi->lock); 887 spin_unlock_bh(&cfhsi->lock);
804 return; 888 return;
@@ -934,20 +1018,53 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
934 wake_up_interruptible(&cfhsi->wake_down_wait); 1018 wake_up_interruptible(&cfhsi->wake_down_wait);
935} 1019}
936 1020
1021static void cfhsi_aggregation_tout(unsigned long arg)
1022{
1023 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1024
1025 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1026 __func__);
1027
1028 cfhsi_start_tx(cfhsi);
1029}
1030
937static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 1031static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
938{ 1032{
939 struct cfhsi *cfhsi = NULL; 1033 struct cfhsi *cfhsi = NULL;
940 int start_xfer = 0; 1034 int start_xfer = 0;
941 int timer_active; 1035 int timer_active;
1036 int prio;
942 1037
943 if (!dev) 1038 if (!dev)
944 return -EINVAL; 1039 return -EINVAL;
945 1040
946 cfhsi = netdev_priv(dev); 1041 cfhsi = netdev_priv(dev);
947 1042
1043 switch (skb->priority) {
1044 case TC_PRIO_BESTEFFORT:
1045 case TC_PRIO_FILLER:
1046 case TC_PRIO_BULK:
1047 prio = CFHSI_PRIO_BEBK;
1048 break;
1049 case TC_PRIO_INTERACTIVE_BULK:
1050 prio = CFHSI_PRIO_VI;
1051 break;
1052 case TC_PRIO_INTERACTIVE:
1053 prio = CFHSI_PRIO_VO;
1054 break;
1055 case TC_PRIO_CONTROL:
1056 default:
1057 prio = CFHSI_PRIO_CTL;
1058 break;
1059 }
1060
948 spin_lock_bh(&cfhsi->lock); 1061 spin_lock_bh(&cfhsi->lock);
949 1062
950 skb_queue_tail(&cfhsi->qhead, skb); 1063 /* Update aggregation statistics */
1064 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1065
1066 /* Queue the SKB */
1067 skb_queue_tail(&cfhsi->qhead[prio], skb);
951 1068
952 /* Sanity check; xmit should not be called after unregister_netdev */ 1069 /* Sanity check; xmit should not be called after unregister_netdev */
953 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { 1070 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1075,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
958 1075
959 /* Send flow off if number of packets is above high water mark. */ 1076 /* Send flow off if number of packets is above high water mark. */
960 if (!cfhsi->flow_off_sent && 1077 if (!cfhsi->flow_off_sent &&
961 cfhsi->qhead.qlen > cfhsi->q_high_mark && 1078 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
962 cfhsi->cfdev.flowctrl) { 1079 cfhsi->cfdev.flowctrl) {
963 cfhsi->flow_off_sent = 1; 1080 cfhsi->flow_off_sent = 1;
964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1081 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1087,18 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
970 } 1087 }
971 1088
972 if (!start_xfer) { 1089 if (!start_xfer) {
1090 /* Send aggregate if it is possible */
1091 bool aggregate_ready =
1092 cfhsi_can_send_aggregate(cfhsi) &&
1093 del_timer(&cfhsi->aggregation_timer) > 0;
973 spin_unlock_bh(&cfhsi->lock); 1094 spin_unlock_bh(&cfhsi->lock);
1095 if (aggregate_ready)
1096 cfhsi_start_tx(cfhsi);
974 return 0; 1097 return 0;
975 } 1098 }
976 1099
977 /* Delete inactivity timer if started. */ 1100 /* Delete inactivity timer if started. */
978 timer_active = del_timer_sync(&cfhsi->timer); 1101 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
979 1102
980 spin_unlock_bh(&cfhsi->lock); 1103 spin_unlock_bh(&cfhsi->lock);
981 1104
@@ -1026,6 +1149,7 @@ static const struct net_device_ops cfhsi_ops = {
1026 1149
1027static void cfhsi_setup(struct net_device *dev) 1150static void cfhsi_setup(struct net_device *dev)
1028{ 1151{
1152 int i;
1029 struct cfhsi *cfhsi = netdev_priv(dev); 1153 struct cfhsi *cfhsi = netdev_priv(dev);
1030 dev->features = 0; 1154 dev->features = 0;
1031 dev->netdev_ops = &cfhsi_ops; 1155 dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1158,8 @@ static void cfhsi_setup(struct net_device *dev)
1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1158 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035 dev->tx_queue_len = 0; 1159 dev->tx_queue_len = 0;
1036 dev->destructor = free_netdev; 1160 dev->destructor = free_netdev;
1037 skb_queue_head_init(&cfhsi->qhead); 1161 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1162 skb_queue_head_init(&cfhsi->qhead[i]);
1038 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1163 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039 cfhsi->cfdev.use_frag = false; 1164 cfhsi->cfdev.use_frag = false;
1040 cfhsi->cfdev.use_stx = false; 1165 cfhsi->cfdev.use_stx = false;
@@ -1111,6 +1236,9 @@ int cfhsi_probe(struct platform_device *pdev)
1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1236 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1112 } 1237 }
1113 1238
1239 /* Initialize aggregation timeout */
1240 cfhsi->aggregation_timeout = aggregation_timeout;
1241
1114 /* Initialize recieve vaiables. */ 1242 /* Initialize recieve vaiables. */
1115 cfhsi->rx_ptr = cfhsi->rx_buf; 1243 cfhsi->rx_ptr = cfhsi->rx_buf;
1116 cfhsi->rx_len = CFHSI_DESC_SZ; 1244 cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1150,13 +1278,17 @@ int cfhsi_probe(struct platform_device *pdev)
1150 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1278 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1151 1279
1152 /* Setup the inactivity timer. */ 1280 /* Setup the inactivity timer. */
1153 init_timer(&cfhsi->timer); 1281 init_timer(&cfhsi->inactivity_timer);
1154 cfhsi->timer.data = (unsigned long)cfhsi; 1282 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1155 cfhsi->timer.function = cfhsi_inactivity_tout; 1283 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1156 /* Setup the slowpath RX timer. */ 1284 /* Setup the slowpath RX timer. */
1157 init_timer(&cfhsi->rx_slowpath_timer); 1285 init_timer(&cfhsi->rx_slowpath_timer);
1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; 1286 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; 1287 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1288 /* Setup the aggregation timer. */
1289 init_timer(&cfhsi->aggregation_timer);
1290 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1291 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1160 1292
1161 /* Add CAIF HSI device to list. */ 1293 /* Add CAIF HSI device to list. */
1162 spin_lock(&cfhsi_list_lock); 1294 spin_lock(&cfhsi_list_lock);
@@ -1222,8 +1354,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1222 flush_workqueue(cfhsi->wq); 1354 flush_workqueue(cfhsi->wq);
1223 1355
1224 /* Delete timers if pending */ 1356 /* Delete timers if pending */
1225 del_timer_sync(&cfhsi->timer); 1357 del_timer_sync(&cfhsi->inactivity_timer);
1226 del_timer_sync(&cfhsi->rx_slowpath_timer); 1358 del_timer_sync(&cfhsi->rx_slowpath_timer);
1359 del_timer_sync(&cfhsi->aggregation_timer);
1227 1360
1228 /* Cancel pending RX request (if any) */ 1361 /* Cancel pending RX request (if any) */
1229 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1362 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);