diff options
Diffstat (limited to 'drivers/net/enic/enic_main.c')
-rw-r--r-- | drivers/net/enic/enic_main.c | 223 |
1 files changed, 139 insertions, 84 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index d69d52ed7726..cf098bb636b8 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | 51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ |
52 | 52 | ||
53 | /* Supported devices */ | 53 | /* Supported devices */ |
54 | static struct pci_device_id enic_id_table[] = { | 54 | static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { |
55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, | 55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, |
56 | { 0, } /* end of table */ | 56 | { 0, } /* end of table */ |
57 | }; | 57 | }; |
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value) | |||
261 | enic->msg_enable = value; | 261 | enic->msg_enable = value; |
262 | } | 262 | } |
263 | 263 | ||
264 | static int enic_get_coalesce(struct net_device *netdev, | ||
265 | struct ethtool_coalesce *ecmd) | ||
266 | { | ||
267 | struct enic *enic = netdev_priv(netdev); | ||
268 | |||
269 | ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; | ||
270 | ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int enic_set_coalesce(struct net_device *netdev, | ||
276 | struct ethtool_coalesce *ecmd) | ||
277 | { | ||
278 | struct enic *enic = netdev_priv(netdev); | ||
279 | u32 tx_coalesce_usecs; | ||
280 | u32 rx_coalesce_usecs; | ||
281 | |||
282 | tx_coalesce_usecs = min_t(u32, | ||
283 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), | ||
284 | ecmd->tx_coalesce_usecs); | ||
285 | rx_coalesce_usecs = min_t(u32, | ||
286 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), | ||
287 | ecmd->rx_coalesce_usecs); | ||
288 | |||
289 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
290 | case VNIC_DEV_INTR_MODE_INTX: | ||
291 | if (tx_coalesce_usecs != rx_coalesce_usecs) | ||
292 | return -EINVAL; | ||
293 | |||
294 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ], | ||
295 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | ||
296 | break; | ||
297 | case VNIC_DEV_INTR_MODE_MSI: | ||
298 | if (tx_coalesce_usecs != rx_coalesce_usecs) | ||
299 | return -EINVAL; | ||
300 | |||
301 | vnic_intr_coalescing_timer_set(&enic->intr[0], | ||
302 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | ||
303 | break; | ||
304 | case VNIC_DEV_INTR_MODE_MSIX: | ||
305 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ], | ||
306 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | ||
307 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ], | ||
308 | INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); | ||
309 | break; | ||
310 | default: | ||
311 | break; | ||
312 | } | ||
313 | |||
314 | enic->tx_coalesce_usecs = tx_coalesce_usecs; | ||
315 | enic->rx_coalesce_usecs = rx_coalesce_usecs; | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
264 | static const struct ethtool_ops enic_ethtool_ops = { | 320 | static const struct ethtool_ops enic_ethtool_ops = { |
265 | .get_settings = enic_get_settings, | 321 | .get_settings = enic_get_settings, |
266 | .get_drvinfo = enic_get_drvinfo, | 322 | .get_drvinfo = enic_get_drvinfo, |
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = { | |||
278 | .set_sg = ethtool_op_set_sg, | 334 | .set_sg = ethtool_op_set_sg, |
279 | .get_tso = ethtool_op_get_tso, | 335 | .get_tso = ethtool_op_get_tso, |
280 | .set_tso = enic_set_tso, | 336 | .set_tso = enic_set_tso, |
337 | .get_coalesce = enic_get_coalesce, | ||
338 | .set_coalesce = enic_set_coalesce, | ||
281 | .get_flags = ethtool_op_get_flags, | 339 | .get_flags = ethtool_op_get_flags, |
282 | .set_flags = ethtool_op_set_flags, | 340 | .set_flags = ethtool_op_set_flags, |
283 | }; | 341 | }; |
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic) | |||
363 | u32 mtu = vnic_dev_mtu(enic->vdev); | 421 | u32 mtu = vnic_dev_mtu(enic->vdev); |
364 | 422 | ||
365 | if (mtu && mtu != enic->port_mtu) { | 423 | if (mtu && mtu != enic->port_mtu) { |
424 | enic->port_mtu = mtu; | ||
366 | if (mtu < enic->netdev->mtu) | 425 | if (mtu < enic->netdev->mtu) |
367 | printk(KERN_WARNING PFX | 426 | printk(KERN_WARNING PFX |
368 | "%s: interface MTU (%d) set higher " | 427 | "%s: interface MTU (%d) set higher " |
369 | "than switch port MTU (%d)\n", | 428 | "than switch port MTU (%d)\n", |
370 | enic->netdev->name, enic->netdev->mtu, mtu); | 429 | enic->netdev->name, enic->netdev->mtu, mtu); |
371 | enic->port_mtu = mtu; | ||
372 | } | 430 | } |
373 | } | 431 | } |
374 | 432 | ||
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, | |||
673 | 731 | ||
674 | /* netif_tx_lock held, process context with BHs disabled, or BH */ | 732 | /* netif_tx_lock held, process context with BHs disabled, or BH */ |
675 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | 733 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, |
676 | struct net_device *netdev) | 734 | struct net_device *netdev) |
677 | { | 735 | { |
678 | struct enic *enic = netdev_priv(netdev); | 736 | struct enic *enic = netdev_priv(netdev); |
679 | struct vnic_wq *wq = &enic->wq[0]; | 737 | struct vnic_wq *wq = &enic->wq[0]; |
@@ -764,15 +822,16 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |||
764 | static void enic_set_multicast_list(struct net_device *netdev) | 822 | static void enic_set_multicast_list(struct net_device *netdev) |
765 | { | 823 | { |
766 | struct enic *enic = netdev_priv(netdev); | 824 | struct enic *enic = netdev_priv(netdev); |
767 | struct dev_mc_list *list = netdev->mc_list; | 825 | struct dev_mc_list *list; |
768 | int directed = 1; | 826 | int directed = 1; |
769 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | 827 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; |
770 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | 828 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; |
771 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | 829 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; |
830 | unsigned int mc_count = netdev_mc_count(netdev); | ||
772 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | 831 | int allmulti = (netdev->flags & IFF_ALLMULTI) || |
773 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | 832 | mc_count > ENIC_MULTICAST_PERFECT_FILTERS; |
833 | unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); | ||
774 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | 834 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; |
775 | unsigned int mc_count = netdev->mc_count; | ||
776 | unsigned int i, j; | 835 | unsigned int i, j; |
777 | 836 | ||
778 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | 837 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) |
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
780 | 839 | ||
781 | spin_lock(&enic->devcmd_lock); | 840 | spin_lock(&enic->devcmd_lock); |
782 | 841 | ||
783 | vnic_dev_packet_filter(enic->vdev, directed, | 842 | if (enic->flags != flags) { |
784 | multicast, broadcast, promisc, allmulti); | 843 | enic->flags = flags; |
844 | vnic_dev_packet_filter(enic->vdev, directed, | ||
845 | multicast, broadcast, promisc, allmulti); | ||
846 | } | ||
785 | 847 | ||
786 | /* Is there an easier way? Trying to minimize to | 848 | /* Is there an easier way? Trying to minimize to |
787 | * calls to add/del multicast addrs. We keep the | 849 | * calls to add/del multicast addrs. We keep the |
@@ -789,9 +851,11 @@ static void enic_set_multicast_list(struct net_device *netdev) | |||
789 | * look for changes to add/del. | 851 | * look for changes to add/del. |
790 | */ | 852 | */ |
791 | 853 | ||
792 | for (i = 0; list && i < mc_count; i++) { | 854 | i = 0; |
793 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | 855 | netdev_for_each_mc_addr(list, netdev) { |
794 | list = list->next; | 856 | if (i == mc_count) |
857 | break; | ||
858 | memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); | ||
795 | } | 859 | } |
796 | 860 | ||
797 | for (i = 0; i < enic->mc_count; i++) { | 861 | for (i = 0; i < enic->mc_count; i++) { |
@@ -870,19 +934,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |||
870 | dev_kfree_skb_any(buf->os_buf); | 934 | dev_kfree_skb_any(buf->os_buf); |
871 | } | 935 | } |
872 | 936 | ||
873 | static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev, | ||
874 | unsigned int size) | ||
875 | { | ||
876 | struct sk_buff *skb; | ||
877 | |||
878 | skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN); | ||
879 | |||
880 | if (skb) | ||
881 | skb_reserve(skb, NET_IP_ALIGN); | ||
882 | |||
883 | return skb; | ||
884 | } | ||
885 | |||
886 | static int enic_rq_alloc_buf(struct vnic_rq *rq) | 937 | static int enic_rq_alloc_buf(struct vnic_rq *rq) |
887 | { | 938 | { |
888 | struct enic *enic = vnic_dev_priv(rq->vdev); | 939 | struct enic *enic = vnic_dev_priv(rq->vdev); |
@@ -892,7 +943,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) | |||
892 | unsigned int os_buf_index = 0; | 943 | unsigned int os_buf_index = 0; |
893 | dma_addr_t dma_addr; | 944 | dma_addr_t dma_addr; |
894 | 945 | ||
895 | skb = enic_rq_alloc_skb(netdev, len); | 946 | skb = netdev_alloc_skb_ip_align(netdev, len); |
896 | if (!skb) | 947 | if (!skb) |
897 | return -ENOMEM; | 948 | return -ENOMEM; |
898 | 949 | ||
@@ -1097,34 +1148,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |||
1097 | return 0; | 1148 | return 0; |
1098 | } | 1149 | } |
1099 | 1150 | ||
1100 | static void enic_rq_drop_buf(struct vnic_rq *rq, | ||
1101 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
1102 | int skipped, void *opaque) | ||
1103 | { | ||
1104 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
1105 | struct sk_buff *skb = buf->os_buf; | ||
1106 | |||
1107 | if (skipped) | ||
1108 | return; | ||
1109 | |||
1110 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
1111 | buf->len, PCI_DMA_FROMDEVICE); | ||
1112 | |||
1113 | dev_kfree_skb_any(skb); | ||
1114 | } | ||
1115 | |||
1116 | static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
1117 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
1118 | { | ||
1119 | struct enic *enic = vnic_dev_priv(vdev); | ||
1120 | |||
1121 | vnic_rq_service(&enic->rq[q_number], cq_desc, | ||
1122 | completed_index, VNIC_RQ_RETURN_DESC, | ||
1123 | enic_rq_drop_buf, opaque); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int enic_poll(struct napi_struct *napi, int budget) | 1151 | static int enic_poll(struct napi_struct *napi, int budget) |
1129 | { | 1152 | { |
1130 | struct enic *enic = container_of(napi, struct enic, napi); | 1153 | struct enic *enic = container_of(napi, struct enic, napi); |
@@ -1132,6 +1155,7 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
1132 | unsigned int rq_work_to_do = budget; | 1155 | unsigned int rq_work_to_do = budget; |
1133 | unsigned int wq_work_to_do = -1; /* no limit */ | 1156 | unsigned int wq_work_to_do = -1; /* no limit */ |
1134 | unsigned int work_done, rq_work_done, wq_work_done; | 1157 | unsigned int work_done, rq_work_done, wq_work_done; |
1158 | int err; | ||
1135 | 1159 | ||
1136 | /* Service RQ (first) and WQ | 1160 | /* Service RQ (first) and WQ |
1137 | */ | 1161 | */ |
@@ -1155,16 +1179,19 @@ static int enic_poll(struct napi_struct *napi, int budget) | |||
1155 | 0 /* don't unmask intr */, | 1179 | 0 /* don't unmask intr */, |
1156 | 0 /* don't reset intr timer */); | 1180 | 0 /* don't reset intr timer */); |
1157 | 1181 | ||
1158 | if (rq_work_done > 0) { | 1182 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
1159 | 1183 | ||
1160 | /* Replenish RQ | 1184 | /* Buffer allocation failed. Stay in polling |
1161 | */ | 1185 | * mode so we can try to fill the ring again. |
1186 | */ | ||
1162 | 1187 | ||
1163 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); | 1188 | if (err) |
1189 | rq_work_done = rq_work_to_do; | ||
1164 | 1190 | ||
1165 | } else { | 1191 | if (rq_work_done < rq_work_to_do) { |
1166 | 1192 | ||
1167 | /* If no work done, flush all LROs and exit polling | 1193 | /* Some work done, but not enough to stay in polling, |
1194 | * flush all LROs and exit polling | ||
1168 | */ | 1195 | */ |
1169 | 1196 | ||
1170 | if (netdev->features & NETIF_F_LRO) | 1197 | if (netdev->features & NETIF_F_LRO) |
@@ -1183,6 +1210,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
1183 | struct net_device *netdev = enic->netdev; | 1210 | struct net_device *netdev = enic->netdev; |
1184 | unsigned int work_to_do = budget; | 1211 | unsigned int work_to_do = budget; |
1185 | unsigned int work_done; | 1212 | unsigned int work_done; |
1213 | int err; | ||
1186 | 1214 | ||
1187 | /* Service RQ | 1215 | /* Service RQ |
1188 | */ | 1216 | */ |
@@ -1190,25 +1218,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) | |||
1190 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | 1218 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], |
1191 | work_to_do, enic_rq_service, NULL); | 1219 | work_to_do, enic_rq_service, NULL); |
1192 | 1220 | ||
1193 | if (work_done > 0) { | 1221 | /* Return intr event credits for this polling |
1194 | 1222 | * cycle. An intr event is the completion of a | |
1195 | /* Replenish RQ | 1223 | * RQ packet. |
1196 | */ | 1224 | */ |
1197 | |||
1198 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); | ||
1199 | |||
1200 | /* Return intr event credits for this polling | ||
1201 | * cycle. An intr event is the completion of a | ||
1202 | * RQ packet. | ||
1203 | */ | ||
1204 | 1225 | ||
1226 | if (work_done > 0) | ||
1205 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | 1227 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], |
1206 | work_done, | 1228 | work_done, |
1207 | 0 /* don't unmask intr */, | 1229 | 0 /* don't unmask intr */, |
1208 | 0 /* don't reset intr timer */); | 1230 | 0 /* don't reset intr timer */); |
1209 | } else { | ||
1210 | 1231 | ||
1211 | /* If no work done, flush all LROs and exit polling | 1232 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
1233 | |||
1234 | /* Buffer allocation failed. Stay in polling mode | ||
1235 | * so we can try to fill the ring again. | ||
1236 | */ | ||
1237 | |||
1238 | if (err) | ||
1239 | work_done = work_to_do; | ||
1240 | |||
1241 | if (work_done < work_to_do) { | ||
1242 | |||
1243 | /* Some work done, but not enough to stay in polling, | ||
1244 | * flush all LROs and exit polling | ||
1212 | */ | 1245 | */ |
1213 | 1246 | ||
1214 | if (netdev->features & NETIF_F_LRO) | 1247 | if (netdev->features & NETIF_F_LRO) |
@@ -1317,6 +1350,24 @@ static int enic_request_intr(struct enic *enic) | |||
1317 | return err; | 1350 | return err; |
1318 | } | 1351 | } |
1319 | 1352 | ||
1353 | static void enic_synchronize_irqs(struct enic *enic) | ||
1354 | { | ||
1355 | unsigned int i; | ||
1356 | |||
1357 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1358 | case VNIC_DEV_INTR_MODE_INTX: | ||
1359 | case VNIC_DEV_INTR_MODE_MSI: | ||
1360 | synchronize_irq(enic->pdev->irq); | ||
1361 | break; | ||
1362 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1363 | for (i = 0; i < enic->intr_count; i++) | ||
1364 | synchronize_irq(enic->msix_entry[i].vector); | ||
1365 | break; | ||
1366 | default: | ||
1367 | break; | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1320 | static int enic_notify_set(struct enic *enic) | 1371 | static int enic_notify_set(struct enic *enic) |
1321 | { | 1372 | { |
1322 | int err; | 1373 | int err; |
@@ -1373,11 +1424,13 @@ static int enic_open(struct net_device *netdev) | |||
1373 | } | 1424 | } |
1374 | 1425 | ||
1375 | for (i = 0; i < enic->rq_count; i++) { | 1426 | for (i = 0; i < enic->rq_count; i++) { |
1376 | err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); | 1427 | vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); |
1377 | if (err) { | 1428 | /* Need at least one buffer on ring to get going */ |
1429 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | ||
1378 | printk(KERN_ERR PFX | 1430 | printk(KERN_ERR PFX |
1379 | "%s: Unable to alloc receive buffers.\n", | 1431 | "%s: Unable to alloc receive buffers.\n", |
1380 | netdev->name); | 1432 | netdev->name); |
1433 | err = -ENOMEM; | ||
1381 | goto err_out_notify_unset; | 1434 | goto err_out_notify_unset; |
1382 | } | 1435 | } |
1383 | } | 1436 | } |
@@ -1422,16 +1475,19 @@ static int enic_stop(struct net_device *netdev) | |||
1422 | unsigned int i; | 1475 | unsigned int i; |
1423 | int err; | 1476 | int err; |
1424 | 1477 | ||
1478 | for (i = 0; i < enic->intr_count; i++) | ||
1479 | vnic_intr_mask(&enic->intr[i]); | ||
1480 | |||
1481 | enic_synchronize_irqs(enic); | ||
1482 | |||
1425 | del_timer_sync(&enic->notify_timer); | 1483 | del_timer_sync(&enic->notify_timer); |
1426 | 1484 | ||
1427 | spin_lock(&enic->devcmd_lock); | 1485 | spin_lock(&enic->devcmd_lock); |
1428 | vnic_dev_disable(enic->vdev); | 1486 | vnic_dev_disable(enic->vdev); |
1429 | spin_unlock(&enic->devcmd_lock); | 1487 | spin_unlock(&enic->devcmd_lock); |
1430 | napi_disable(&enic->napi); | 1488 | napi_disable(&enic->napi); |
1431 | netif_stop_queue(netdev); | 1489 | netif_carrier_off(netdev); |
1432 | 1490 | netif_tx_disable(netdev); | |
1433 | for (i = 0; i < enic->intr_count; i++) | ||
1434 | vnic_intr_mask(&enic->intr[i]); | ||
1435 | 1491 | ||
1436 | for (i = 0; i < enic->wq_count; i++) { | 1492 | for (i = 0; i < enic->wq_count; i++) { |
1437 | err = vnic_wq_disable(&enic->wq[i]); | 1493 | err = vnic_wq_disable(&enic->wq[i]); |
@@ -1449,11 +1505,6 @@ static int enic_stop(struct net_device *netdev) | |||
1449 | spin_unlock(&enic->devcmd_lock); | 1505 | spin_unlock(&enic->devcmd_lock); |
1450 | enic_free_intr(enic); | 1506 | enic_free_intr(enic); |
1451 | 1507 | ||
1452 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | ||
1453 | -1, enic_rq_service_drop, NULL); | ||
1454 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | ||
1455 | -1, enic_wq_service, NULL); | ||
1456 | |||
1457 | for (i = 0; i < enic->wq_count; i++) | 1508 | for (i = 0; i < enic->wq_count; i++) |
1458 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | 1509 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); |
1459 | for (i = 0; i < enic->rq_count; i++) | 1510 | for (i = 0; i < enic->rq_count; i++) |
@@ -1775,7 +1826,8 @@ int enic_dev_init(struct enic *enic) | |||
1775 | err = enic_set_intr_mode(enic); | 1826 | err = enic_set_intr_mode(enic); |
1776 | if (err) { | 1827 | if (err) { |
1777 | printk(KERN_ERR PFX | 1828 | printk(KERN_ERR PFX |
1778 | "Failed to set intr mode, aborting.\n"); | 1829 | "Failed to set intr mode based on resource " |
1830 | "counts and system capabilities, aborting.\n"); | ||
1779 | return err; | 1831 | return err; |
1780 | } | 1832 | } |
1781 | 1833 | ||
@@ -1999,6 +2051,9 @@ static int __devinit enic_probe(struct pci_dev *pdev, | |||
1999 | goto err_out_dev_deinit; | 2051 | goto err_out_dev_deinit; |
2000 | } | 2052 | } |
2001 | 2053 | ||
2054 | enic->tx_coalesce_usecs = enic->config.intr_timer_usec; | ||
2055 | enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; | ||
2056 | |||
2002 | netdev->netdev_ops = &enic_netdev_ops; | 2057 | netdev->netdev_ops = &enic_netdev_ops; |
2003 | netdev->watchdog_timeo = 2 * HZ; | 2058 | netdev->watchdog_timeo = 2 * HZ; |
2004 | netdev->ethtool_ops = &enic_ethtool_ops; | 2059 | netdev->ethtool_ops = &enic_ethtool_ops; |