diff options
-rw-r--r-- | drivers/net/caif/caif_hsi.c | 151 | ||||
-rw-r--r-- | include/net/caif/caif_hsi.h | 14 |
2 files changed, 82 insertions, 83 deletions
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 0927c108bd14..1c2bd01e1592 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
@@ -32,51 +32,39 @@ MODULE_DESCRIPTION("CAIF HSI driver"); | |||
32 | #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ | 32 | #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ |
33 | (((pow)-((x)&((pow)-1))))) | 33 | (((pow)-((x)&((pow)-1))))) |
34 | 34 | ||
35 | static int inactivity_timeout = 1000; | 35 | static const struct cfhsi_config hsi_default_config = { |
36 | module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); | ||
37 | MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); | ||
38 | 36 | ||
39 | static int aggregation_timeout = 1; | 37 | /* Inactivity timeout on HSI, ms */ |
40 | module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); | 38 | .inactivity_timeout = HZ, |
41 | MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms."); | ||
42 | 39 | ||
43 | /* | 40 | /* Aggregation timeout (ms) of zero means no aggregation is done*/ |
44 | * HSI padding options. | 41 | .aggregation_timeout = 1, |
45 | * Warning: must be a base of 2 (& operation used) and can not be zero ! | ||
46 | */ | ||
47 | static int hsi_head_align = 4; | ||
48 | module_param(hsi_head_align, int, S_IRUGO); | ||
49 | MODULE_PARM_DESC(hsi_head_align, "HSI head alignment."); | ||
50 | 42 | ||
51 | static int hsi_tail_align = 4; | 43 | /* |
52 | module_param(hsi_tail_align, int, S_IRUGO); | 44 | * HSI link layer flow-control thresholds. |
53 | MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment."); | 45 | * Threshold values for the HSI packet queue. Flow-control will be |
54 | 46 | * asserted when the number of packets exceeds q_high_mark. It will | |
55 | /* | 47 | * not be de-asserted before the number of packets drops below |
56 | * HSI link layer flowcontrol thresholds. | 48 | * q_low_mark. |
57 | * Warning: A high threshold value migth increase throughput but it will at | 49 | * Warning: A high threshold value might increase throughput but it |
58 | * the same time prevent channel prioritization and increase the risk of | 50 | * will at the same time prevent channel prioritization and increase |
59 | * flooding the modem. The high threshold should be above the low. | 51 | * the risk of flooding the modem. The high threshold should be above |
60 | */ | 52 | * the low. |
61 | static int hsi_high_threshold = 100; | 53 | */ |
62 | module_param(hsi_high_threshold, int, S_IRUGO); | 54 | .q_high_mark = 100, |
63 | MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF)."); | 55 | .q_low_mark = 50, |
64 | 56 | ||
65 | static int hsi_low_threshold = 50; | 57 | /* |
66 | module_param(hsi_low_threshold, int, S_IRUGO); | 58 | * HSI padding options. |
67 | MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON)."); | 59 | * Warning: must be a base of 2 (& operation used) and can not be zero ! |
60 | */ | ||
61 | .head_align = 4, | ||
62 | .tail_align = 4, | ||
63 | }; | ||
68 | 64 | ||
69 | #define ON 1 | 65 | #define ON 1 |
70 | #define OFF 0 | 66 | #define OFF 0 |
71 | 67 | ||
72 | /* | ||
73 | * Threshold values for the HSI packet queue. Flowcontrol will be asserted | ||
74 | * when the number of packets exceeds HIGH_WATER_MARK. It will not be | ||
75 | * de-asserted before the number of packets drops below LOW_WATER_MARK. | ||
76 | */ | ||
77 | #define LOW_WATER_MARK hsi_low_threshold | ||
78 | #define HIGH_WATER_MARK hsi_high_threshold | ||
79 | |||
80 | static LIST_HEAD(cfhsi_list); | 68 | static LIST_HEAD(cfhsi_list); |
81 | 69 | ||
82 | static void cfhsi_inactivity_tout(unsigned long arg) | 70 | static void cfhsi_inactivity_tout(unsigned long arg) |
@@ -99,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, | |||
99 | int hpad, tpad, len; | 87 | int hpad, tpad, len; |
100 | 88 | ||
101 | info = (struct caif_payload_info *)&skb->cb; | 89 | info = (struct caif_payload_info *)&skb->cb; |
102 | hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); | 90 | hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); |
103 | tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); | 91 | tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); |
104 | len = skb->len + hpad + tpad; | 92 | len = skb->len + hpad + tpad; |
105 | 93 | ||
106 | if (direction > 0) | 94 | if (direction > 0) |
@@ -113,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) | |||
113 | { | 101 | { |
114 | int i; | 102 | int i; |
115 | 103 | ||
116 | if (cfhsi->aggregation_timeout == 0) | 104 | if (cfhsi->cfg.aggregation_timeout == 0) |
117 | return true; | 105 | return true; |
118 | 106 | ||
119 | for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { | 107 | for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { |
@@ -169,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi) | |||
169 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; | 157 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; |
170 | if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 158 | if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
171 | mod_timer(&cfhsi->inactivity_timer, | 159 | mod_timer(&cfhsi->inactivity_timer, |
172 | jiffies + cfhsi->inactivity_timeout); | 160 | jiffies + cfhsi->cfg.inactivity_timeout); |
173 | spin_unlock_bh(&cfhsi->lock); | 161 | spin_unlock_bh(&cfhsi->lock); |
174 | } | 162 | } |
175 | 163 | ||
@@ -250,8 +238,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
250 | /* Calculate needed head alignment and tail alignment. */ | 238 | /* Calculate needed head alignment and tail alignment. */ |
251 | info = (struct caif_payload_info *)&skb->cb; | 239 | info = (struct caif_payload_info *)&skb->cb; |
252 | 240 | ||
253 | hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); | 241 | hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); |
254 | tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); | 242 | tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); |
255 | 243 | ||
256 | /* Check if frame still fits with added alignment. */ | 244 | /* Check if frame still fits with added alignment. */ |
257 | if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { | 245 | if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { |
@@ -292,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
292 | /* Calculate needed head alignment and tail alignment. */ | 280 | /* Calculate needed head alignment and tail alignment. */ |
293 | info = (struct caif_payload_info *)&skb->cb; | 281 | info = (struct caif_payload_info *)&skb->cb; |
294 | 282 | ||
295 | hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); | 283 | hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); |
296 | tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); | 284 | tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); |
297 | 285 | ||
298 | /* Fill in CAIF frame length in descriptor. */ | 286 | /* Fill in CAIF frame length in descriptor. */ |
299 | desc->cffrm_len[nfrms] = hpad + skb->len + tpad; | 287 | desc->cffrm_len[nfrms] = hpad + skb->len + tpad; |
@@ -364,7 +352,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi) | |||
364 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; | 352 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; |
365 | /* Start inactivity timer. */ | 353 | /* Start inactivity timer. */ |
366 | mod_timer(&cfhsi->inactivity_timer, | 354 | mod_timer(&cfhsi->inactivity_timer, |
367 | jiffies + cfhsi->inactivity_timeout); | 355 | jiffies + cfhsi->cfg.inactivity_timeout); |
368 | spin_unlock_bh(&cfhsi->lock); | 356 | spin_unlock_bh(&cfhsi->lock); |
369 | break; | 357 | break; |
370 | } | 358 | } |
@@ -390,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi) | |||
390 | */ | 378 | */ |
391 | spin_lock_bh(&cfhsi->lock); | 379 | spin_lock_bh(&cfhsi->lock); |
392 | if (cfhsi->flow_off_sent && | 380 | if (cfhsi->flow_off_sent && |
393 | cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && | 381 | cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && |
394 | cfhsi->cfdev.flowctrl) { | 382 | cfhsi->cfdev.flowctrl) { |
395 | 383 | ||
396 | cfhsi->flow_off_sent = 0; | 384 | cfhsi->flow_off_sent = 0; |
@@ -402,7 +390,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi) | |||
402 | cfhsi_start_tx(cfhsi); | 390 | cfhsi_start_tx(cfhsi); |
403 | } else { | 391 | } else { |
404 | mod_timer(&cfhsi->aggregation_timer, | 392 | mod_timer(&cfhsi->aggregation_timer, |
405 | jiffies + cfhsi->aggregation_timeout); | 393 | jiffies + cfhsi->cfg.aggregation_timeout); |
406 | spin_unlock_bh(&cfhsi->lock); | 394 | spin_unlock_bh(&cfhsi->lock); |
407 | } | 395 | } |
408 | 396 | ||
@@ -645,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi) | |||
645 | /* Update inactivity timer if pending. */ | 633 | /* Update inactivity timer if pending. */ |
646 | spin_lock_bh(&cfhsi->lock); | 634 | spin_lock_bh(&cfhsi->lock); |
647 | mod_timer_pending(&cfhsi->inactivity_timer, | 635 | mod_timer_pending(&cfhsi->inactivity_timer, |
648 | jiffies + cfhsi->inactivity_timeout); | 636 | jiffies + cfhsi->cfg.inactivity_timeout); |
649 | spin_unlock_bh(&cfhsi->lock); | 637 | spin_unlock_bh(&cfhsi->lock); |
650 | 638 | ||
651 | if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { | 639 | if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { |
@@ -880,7 +868,7 @@ wake_ack: | |||
880 | __func__); | 868 | __func__); |
881 | /* Start inactivity timer. */ | 869 | /* Start inactivity timer. */ |
882 | mod_timer(&cfhsi->inactivity_timer, | 870 | mod_timer(&cfhsi->inactivity_timer, |
883 | jiffies + cfhsi->inactivity_timeout); | 871 | jiffies + cfhsi->cfg.inactivity_timeout); |
884 | spin_unlock_bh(&cfhsi->lock); | 872 | spin_unlock_bh(&cfhsi->lock); |
885 | return; | 873 | return; |
886 | } | 874 | } |
@@ -1071,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1071 | 1059 | ||
1072 | /* Send flow off if number of packets is above high water mark. */ | 1060 | /* Send flow off if number of packets is above high water mark. */ |
1073 | if (!cfhsi->flow_off_sent && | 1061 | if (!cfhsi->flow_off_sent && |
1074 | cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && | 1062 | cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && |
1075 | cfhsi->cfdev.flowctrl) { | 1063 | cfhsi->cfdev.flowctrl) { |
1076 | cfhsi->flow_off_sent = 1; | 1064 | cfhsi->flow_off_sent = 1; |
1077 | cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); | 1065 | cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); |
@@ -1143,6 +1131,7 @@ static void cfhsi_setup(struct net_device *dev) | |||
1143 | cfhsi->cfdev.use_stx = false; | 1131 | cfhsi->cfdev.use_stx = false; |
1144 | cfhsi->cfdev.use_fcs = false; | 1132 | cfhsi->cfdev.use_fcs = false; |
1145 | cfhsi->ndev = dev; | 1133 | cfhsi->ndev = dev; |
1134 | cfhsi->cfg = hsi_default_config; | ||
1146 | } | 1135 | } |
1147 | 1136 | ||
1148 | static int cfhsi_open(struct net_device *ndev) | 1137 | static int cfhsi_open(struct net_device *ndev) |
@@ -1158,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev) | |||
1158 | 1147 | ||
1159 | /* Set flow info */ | 1148 | /* Set flow info */ |
1160 | cfhsi->flow_off_sent = 0; | 1149 | cfhsi->flow_off_sent = 0; |
1161 | cfhsi->q_low_mark = LOW_WATER_MARK; | ||
1162 | cfhsi->q_high_mark = HIGH_WATER_MARK; | ||
1163 | |||
1164 | 1150 | ||
1165 | /* | 1151 | /* |
1166 | * Allocate a TX buffer with the size of a HSI packet descriptors | 1152 | * Allocate a TX buffer with the size of a HSI packet descriptors |
@@ -1188,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev) | |||
1188 | goto err_alloc_rx_flip; | 1174 | goto err_alloc_rx_flip; |
1189 | } | 1175 | } |
1190 | 1176 | ||
1191 | /* Pre-calculate inactivity timeout. */ | ||
1192 | if (inactivity_timeout != -1) { | ||
1193 | cfhsi->inactivity_timeout = | ||
1194 | inactivity_timeout * HZ / 1000; | ||
1195 | if (!cfhsi->inactivity_timeout) | ||
1196 | cfhsi->inactivity_timeout = 1; | ||
1197 | else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA) | ||
1198 | cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; | ||
1199 | } else { | ||
1200 | cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; | ||
1201 | } | ||
1202 | |||
1203 | /* Initialize aggregation timeout */ | 1177 | /* Initialize aggregation timeout */ |
1204 | cfhsi->aggregation_timeout = aggregation_timeout; | 1178 | cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; |
1205 | 1179 | ||
1206 | /* Initialize recieve vaiables. */ | 1180 | /* Initialize recieve vaiables. */ |
1207 | cfhsi->rx_ptr = cfhsi->rx_buf; | 1181 | cfhsi->rx_ptr = cfhsi->rx_buf; |
@@ -1350,24 +1324,39 @@ static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) | |||
1350 | } | 1324 | } |
1351 | 1325 | ||
1352 | i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; | 1326 | i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; |
1353 | if (data[i]) | 1327 | /* |
1354 | inactivity_timeout = nla_get_u32(data[i]); | 1328 | * Inactivity timeout in millisecs. Lowest possible value is 1, |
1329 | * and highest possible is NEXT_TIMER_MAX_DELTA. | ||
1330 | */ | ||
1331 | if (data[i]) { | ||
1332 | u32 inactivity_timeout = nla_get_u32(data[i]); | ||
1333 | /* Pre-calculate inactivity timeout. */ | ||
1334 | cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; | ||
1335 | if (cfhsi->cfg.inactivity_timeout == 0) | ||
1336 | cfhsi->cfg.inactivity_timeout = 1; | ||
1337 | else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) | ||
1338 | cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; | ||
1339 | } | ||
1355 | 1340 | ||
1356 | i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; | 1341 | i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; |
1357 | if (data[i]) | 1342 | if (data[i]) |
1358 | aggregation_timeout = nla_get_u32(data[i]); | 1343 | cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); |
1359 | 1344 | ||
1360 | i = __IFLA_CAIF_HSI_HEAD_ALIGN; | 1345 | i = __IFLA_CAIF_HSI_HEAD_ALIGN; |
1361 | if (data[i]) | 1346 | if (data[i]) |
1362 | hsi_head_align = nla_get_u32(data[i]); | 1347 | cfhsi->cfg.head_align = nla_get_u32(data[i]); |
1363 | 1348 | ||
1364 | i = __IFLA_CAIF_HSI_TAIL_ALIGN; | 1349 | i = __IFLA_CAIF_HSI_TAIL_ALIGN; |
1365 | if (data[i]) | 1350 | if (data[i]) |
1366 | hsi_tail_align = nla_get_u32(data[i]); | 1351 | cfhsi->cfg.tail_align = nla_get_u32(data[i]); |
1367 | 1352 | ||
1368 | i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; | 1353 | i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; |
1369 | if (data[i]) | 1354 | if (data[i]) |
1370 | hsi_high_threshold = nla_get_u32(data[i]); | 1355 | cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); |
1356 | |||
1357 | i = __IFLA_CAIF_HSI_QLOW_WATERMARK; | ||
1358 | if (data[i]) | ||
1359 | cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); | ||
1371 | } | 1360 | } |
1372 | 1361 | ||
1373 | static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], | 1362 | static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], |
@@ -1398,16 +1387,20 @@ static size_t caif_hsi_get_size(const struct net_device *dev) | |||
1398 | 1387 | ||
1399 | static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) | 1388 | static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) |
1400 | { | 1389 | { |
1390 | struct cfhsi *cfhsi = netdev_priv(dev); | ||
1391 | |||
1401 | if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, | 1392 | if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, |
1402 | inactivity_timeout) || | 1393 | cfhsi->cfg.inactivity_timeout) || |
1403 | nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, | 1394 | nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, |
1404 | aggregation_timeout) || | 1395 | cfhsi->cfg.aggregation_timeout) || |
1405 | nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, hsi_head_align) || | 1396 | nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, |
1406 | nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, hsi_tail_align) || | 1397 | cfhsi->cfg.head_align) || |
1398 | nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, | ||
1399 | cfhsi->cfg.tail_align) || | ||
1407 | nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, | 1400 | nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, |
1408 | hsi_high_threshold) || | 1401 | cfhsi->cfg.q_high_mark) || |
1409 | nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, | 1402 | nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, |
1410 | hsi_low_threshold)) | 1403 | cfhsi->cfg.q_low_mark)) |
1411 | return -EMSGSIZE; | 1404 | return -EMSGSIZE; |
1412 | 1405 | ||
1413 | return 0; | 1406 | return 0; |
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h index 6dc7dc2674b2..bcb9cc3ce98b 100644 --- a/include/net/caif/caif_hsi.h +++ b/include/net/caif/caif_hsi.h | |||
@@ -132,6 +132,15 @@ enum { | |||
132 | CFHSI_PRIO_LAST, | 132 | CFHSI_PRIO_LAST, |
133 | }; | 133 | }; |
134 | 134 | ||
135 | struct cfhsi_config { | ||
136 | u32 inactivity_timeout; | ||
137 | u32 aggregation_timeout; | ||
138 | u32 head_align; | ||
139 | u32 tail_align; | ||
140 | u32 q_high_mark; | ||
141 | u32 q_low_mark; | ||
142 | }; | ||
143 | |||
135 | /* Structure implemented by CAIF HSI drivers. */ | 144 | /* Structure implemented by CAIF HSI drivers. */ |
136 | struct cfhsi { | 145 | struct cfhsi { |
137 | struct caif_dev_common cfdev; | 146 | struct caif_dev_common cfdev; |
@@ -142,7 +151,7 @@ struct cfhsi { | |||
142 | struct cfhsi_ops *ops; | 151 | struct cfhsi_ops *ops; |
143 | int tx_state; | 152 | int tx_state; |
144 | struct cfhsi_rx_state rx_state; | 153 | struct cfhsi_rx_state rx_state; |
145 | unsigned long inactivity_timeout; | 154 | struct cfhsi_config cfg; |
146 | int rx_len; | 155 | int rx_len; |
147 | u8 *rx_ptr; | 156 | u8 *rx_ptr; |
148 | u8 *tx_buf; | 157 | u8 *tx_buf; |
@@ -150,8 +159,6 @@ struct cfhsi { | |||
150 | u8 *rx_flip_buf; | 159 | u8 *rx_flip_buf; |
151 | spinlock_t lock; | 160 | spinlock_t lock; |
152 | int flow_off_sent; | 161 | int flow_off_sent; |
153 | u32 q_low_mark; | ||
154 | u32 q_high_mark; | ||
155 | struct list_head list; | 162 | struct list_head list; |
156 | struct work_struct wake_up_work; | 163 | struct work_struct wake_up_work; |
157 | struct work_struct wake_down_work; | 164 | struct work_struct wake_down_work; |
@@ -164,7 +171,6 @@ struct cfhsi { | |||
164 | struct timer_list rx_slowpath_timer; | 171 | struct timer_list rx_slowpath_timer; |
165 | 172 | ||
166 | /* TX aggregation */ | 173 | /* TX aggregation */ |
167 | unsigned long aggregation_timeout; | ||
168 | int aggregation_len; | 174 | int aggregation_len; |
169 | struct timer_list aggregation_timer; | 175 | struct timer_list aggregation_timer; |
170 | 176 | ||