aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorScott Feldman <scofeldm@cisco.com>2009-12-23 08:27:38 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-24 00:03:40 -0500
commit2d6ddced5c99cf79c06b9b6ec1366ab63b970ea9 (patch)
treeb79d03cfecf1100fd6e4eb8598493df2c8269af1 /drivers
parentb3d18d191bb805f3effdfc083c4ce79789470b46 (diff)
enic: Bug fix: try harder to fill Rx ring on skb allocation failures
During dev->open(), make sure we get at least one skb on the Rx ring. Otherwise abort the interface load. Also, if we get skb allocation failures in NAPI poll while trying to replenish the ring, try again later so we don't end up starving out the Rx ring completely. Signed-off-by: Vasanthy Kolluri <vkolluri@cisco.com> Signed-off-by: Scott Feldman <scofeldm@cisco.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/enic/enic_main.c54
1 files changed, 33 insertions, 21 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index b4a11befb3b3..452a6b747e3e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1091,6 +1091,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1091 unsigned int rq_work_to_do = budget; 1091 unsigned int rq_work_to_do = budget;
1092 unsigned int wq_work_to_do = -1; /* no limit */ 1092 unsigned int wq_work_to_do = -1; /* no limit */
1093 unsigned int work_done, rq_work_done, wq_work_done; 1093 unsigned int work_done, rq_work_done, wq_work_done;
1094 int err;
1094 1095
1095 /* Service RQ (first) and WQ 1096 /* Service RQ (first) and WQ
1096 */ 1097 */
@@ -1114,16 +1115,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
1114 0 /* don't unmask intr */, 1115 0 /* don't unmask intr */,
1115 0 /* don't reset intr timer */); 1116 0 /* don't reset intr timer */);
1116 1117
1117 if (rq_work_done > 0) { 1118 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1118 1119
1119 /* Replenish RQ 1120 /* Buffer allocation failed. Stay in polling
1120 */ 1121 * mode so we can try to fill the ring again.
1122 */
1121 1123
1122 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1124 if (err)
1125 rq_work_done = rq_work_to_do;
1123 1126
1124 } else { 1127 if (rq_work_done < rq_work_to_do) {
1125 1128
1126 /* If no work done, flush all LROs and exit polling 1129 /* Some work done, but not enough to stay in polling,
1130 * flush all LROs and exit polling
1127 */ 1131 */
1128 1132
1129 if (netdev->features & NETIF_F_LRO) 1133 if (netdev->features & NETIF_F_LRO)
@@ -1142,6 +1146,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1142 struct net_device *netdev = enic->netdev; 1146 struct net_device *netdev = enic->netdev;
1143 unsigned int work_to_do = budget; 1147 unsigned int work_to_do = budget;
1144 unsigned int work_done; 1148 unsigned int work_done;
1149 int err;
1145 1150
1146 /* Service RQ 1151 /* Service RQ
1147 */ 1152 */
@@ -1149,25 +1154,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1149 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1154 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1150 work_to_do, enic_rq_service, NULL); 1155 work_to_do, enic_rq_service, NULL);
1151 1156
1152 if (work_done > 0) { 1157 /* Return intr event credits for this polling
1153 1158 * cycle. An intr event is the completion of a
1154 /* Replenish RQ 1159 * RQ packet.
1155 */ 1160 */
1156
1157 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1158
1159 /* Return intr event credits for this polling
1160 * cycle. An intr event is the completion of a
1161 * RQ packet.
1162 */
1163 1161
1162 if (work_done > 0)
1164 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1163 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1165 work_done, 1164 work_done,
1166 0 /* don't unmask intr */, 1165 0 /* don't unmask intr */,
1167 0 /* don't reset intr timer */); 1166 0 /* don't reset intr timer */);
1168 } else {
1169 1167
1170 /* If no work done, flush all LROs and exit polling 1168 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1169
1170 /* Buffer allocation failed. Stay in polling mode
1171 * so we can try to fill the ring again.
1172 */
1173
1174 if (err)
1175 work_done = work_to_do;
1176
1177 if (work_done < work_to_do) {
1178
1179 /* Some work done, but not enough to stay in polling,
1180 * flush all LROs and exit polling
1171 */ 1181 */
1172 1182
1173 if (netdev->features & NETIF_F_LRO) 1183 if (netdev->features & NETIF_F_LRO)
@@ -1350,11 +1360,13 @@ static int enic_open(struct net_device *netdev)
1350 } 1360 }
1351 1361
1352 for (i = 0; i < enic->rq_count; i++) { 1362 for (i = 0; i < enic->rq_count; i++) {
1353 err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1363 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1354 if (err) { 1364 /* Need at least one buffer on ring to get going */
1365 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1355 printk(KERN_ERR PFX 1366 printk(KERN_ERR PFX
1356 "%s: Unable to alloc receive buffers.\n", 1367 "%s: Unable to alloc receive buffers.\n",
1357 netdev->name); 1368 netdev->name);
1369 err = -ENOMEM;
1358 goto err_out_notify_unset; 1370 goto err_out_notify_unset;
1359 } 1371 }
1360 } 1372 }