aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-06-04 12:00:27 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-07 08:20:21 -0400
commit91281fd36c7670904e0b315e273e896d907adc36 (patch)
tree61a1912e6cd560c39bd300565f13aa16c9ae8b89 /drivers/net/ixgbe
parentfe49f04aa8c0f74c363cbb1e9852a0d7769b5a99 (diff)
ixgbe: move tx processing into NAPI context
This patch moves the tx cleanup processing out of the MSI-X interrupt processing and gives it it's own napi routine. This allows the driver to process TX cleanup in a polling context instead of in an interrupt context which prevents TX from starving RX. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c141
1 files changed, 107 insertions, 34 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2500e8b236c6..f81fff5a3d34 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1178,17 +1178,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1178 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1178 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1179 for (i = 0; i < q_vector->txr_count; i++) { 1179 for (i = 0; i < q_vector->txr_count; i++) {
1180 tx_ring = &(adapter->tx_ring[r_idx]); 1180 tx_ring = &(adapter->tx_ring[r_idx]);
1181#ifdef CONFIG_IXGBE_DCA
1182 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1183 ixgbe_update_tx_dca(adapter, tx_ring);
1184#endif
1185 tx_ring->total_bytes = 0; 1181 tx_ring->total_bytes = 0;
1186 tx_ring->total_packets = 0; 1182 tx_ring->total_packets = 0;
1187 ixgbe_clean_tx_irq(q_vector, tx_ring);
1188 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1183 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1189 r_idx + 1); 1184 r_idx + 1);
1190 } 1185 }
1191 1186
1187 /* disable interrupts on this vector only */
1188 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1189 napi_schedule(&q_vector->napi);
1190
1192 return IRQ_HANDLED; 1191 return IRQ_HANDLED;
1193} 1192}
1194 1193
@@ -1228,8 +1227,36 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1228 1227
1229static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) 1228static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1230{ 1229{
1231 ixgbe_msix_clean_rx(irq, data); 1230 struct ixgbe_q_vector *q_vector = data;
1232 ixgbe_msix_clean_tx(irq, data); 1231 struct ixgbe_adapter *adapter = q_vector->adapter;
1232 struct ixgbe_ring *ring;
1233 int r_idx;
1234 int i;
1235
1236 if (!q_vector->txr_count && !q_vector->rxr_count)
1237 return IRQ_HANDLED;
1238
1239 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1240 for (i = 0; i < q_vector->txr_count; i++) {
1241 ring = &(adapter->tx_ring[r_idx]);
1242 ring->total_bytes = 0;
1243 ring->total_packets = 0;
1244 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1245 r_idx + 1);
1246 }
1247
1248 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1249 for (i = 0; i < q_vector->rxr_count; i++) {
1250 ring = &(adapter->rx_ring[r_idx]);
1251 ring->total_bytes = 0;
1252 ring->total_packets = 0;
1253 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1254 r_idx + 1);
1255 }
1256
1257 /* disable interrupts on this vector only */
1258 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1259 napi_schedule(&q_vector->napi);
1233 1260
1234 return IRQ_HANDLED; 1261 return IRQ_HANDLED;
1235} 1262}
@@ -1274,21 +1301,34 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1274} 1301}
1275 1302
1276/** 1303/**
1277 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine 1304 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1278 * @napi: napi struct with our devices info in it 1305 * @napi: napi struct with our devices info in it
1279 * @budget: amount of work driver is allowed to do this pass, in packets 1306 * @budget: amount of work driver is allowed to do this pass, in packets
1280 * 1307 *
1281 * This function will clean more than one rx queue associated with a 1308 * This function will clean more than one rx queue associated with a
1282 * q_vector. 1309 * q_vector.
1283 **/ 1310 **/
1284static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) 1311static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1285{ 1312{
1286 struct ixgbe_q_vector *q_vector = 1313 struct ixgbe_q_vector *q_vector =
1287 container_of(napi, struct ixgbe_q_vector, napi); 1314 container_of(napi, struct ixgbe_q_vector, napi);
1288 struct ixgbe_adapter *adapter = q_vector->adapter; 1315 struct ixgbe_adapter *adapter = q_vector->adapter;
1289 struct ixgbe_ring *rx_ring = NULL; 1316 struct ixgbe_ring *ring = NULL;
1290 int work_done = 0, i; 1317 int work_done = 0, i;
1291 long r_idx; 1318 long r_idx;
1319 bool tx_clean_complete = true;
1320
1321 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1322 for (i = 0; i < q_vector->txr_count; i++) {
1323 ring = &(adapter->tx_ring[r_idx]);
1324#ifdef CONFIG_IXGBE_DCA
1325 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1326 ixgbe_update_tx_dca(adapter, ring);
1327#endif
1328 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1329 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1330 r_idx + 1);
1331 }
1292 1332
1293 /* attempt to distribute budget to each queue fairly, but don't allow 1333 /* attempt to distribute budget to each queue fairly, but don't allow
1294 * the budget to go below 1 because we'll exit polling */ 1334 * the budget to go below 1 because we'll exit polling */
@@ -1296,18 +1336,18 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1296 budget = max(budget, 1); 1336 budget = max(budget, 1);
1297 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1337 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1298 for (i = 0; i < q_vector->rxr_count; i++) { 1338 for (i = 0; i < q_vector->rxr_count; i++) {
1299 rx_ring = &(adapter->rx_ring[r_idx]); 1339 ring = &(adapter->rx_ring[r_idx]);
1300#ifdef CONFIG_IXGBE_DCA 1340#ifdef CONFIG_IXGBE_DCA
1301 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1341 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1302 ixgbe_update_rx_dca(adapter, rx_ring); 1342 ixgbe_update_rx_dca(adapter, ring);
1303#endif 1343#endif
1304 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 1344 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1305 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1345 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1306 r_idx + 1); 1346 r_idx + 1);
1307 } 1347 }
1308 1348
1309 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1349 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1310 rx_ring = &(adapter->rx_ring[r_idx]); 1350 ring = &(adapter->rx_ring[r_idx]);
1311 /* If all Rx work done, exit the polling mode */ 1351 /* If all Rx work done, exit the polling mode */
1312 if (work_done < budget) { 1352 if (work_done < budget) {
1313 napi_complete(napi); 1353 napi_complete(napi);
@@ -1321,6 +1361,46 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1321 1361
1322 return work_done; 1362 return work_done;
1323} 1363}
1364
1365/**
1366 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1367 * @napi: napi struct with our devices info in it
1368 * @budget: amount of work driver is allowed to do this pass, in packets
1369 *
1370 * This function is optimized for cleaning one queue only on a single
1371 * q_vector!!!
1372 **/
1373static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1374{
1375 struct ixgbe_q_vector *q_vector =
1376 container_of(napi, struct ixgbe_q_vector, napi);
1377 struct ixgbe_adapter *adapter = q_vector->adapter;
1378 struct ixgbe_ring *tx_ring = NULL;
1379 int work_done = 0;
1380 long r_idx;
1381
1382 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1383 tx_ring = &(adapter->tx_ring[r_idx]);
1384#ifdef CONFIG_IXGBE_DCA
1385 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1386 ixgbe_update_tx_dca(adapter, tx_ring);
1387#endif
1388
1389 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1390 work_done = budget;
1391
1392 /* If all Rx work done, exit the polling mode */
1393 if (work_done < budget) {
1394 napi_complete(napi);
1395 if (adapter->itr_setting & 1)
1396 ixgbe_set_itr_msix(q_vector);
1397 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1398 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1399 }
1400
1401 return work_done;
1402}
1403
1324static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1404static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1325 int r_idx) 1405 int r_idx)
1326{ 1406{
@@ -2213,12 +2293,15 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2213 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2293 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2214 struct napi_struct *napi; 2294 struct napi_struct *napi;
2215 q_vector = adapter->q_vector[q_idx]; 2295 q_vector = adapter->q_vector[q_idx];
2216 if (!q_vector->rxr_count)
2217 continue;
2218 napi = &q_vector->napi; 2296 napi = &q_vector->napi;
2219 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && 2297 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2220 (q_vector->rxr_count > 1)) 2298 if (!q_vector->rxr_count || !q_vector->txr_count) {
2221 napi->poll = &ixgbe_clean_rxonly_many; 2299 if (q_vector->txr_count == 1)
2300 napi->poll = &ixgbe_clean_txonly;
2301 else if (q_vector->rxr_count == 1)
2302 napi->poll = &ixgbe_clean_rxonly;
2303 }
2304 }
2222 2305
2223 napi_enable(napi); 2306 napi_enable(napi);
2224 } 2307 }
@@ -2236,8 +2319,6 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2236 2319
2237 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2320 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2238 q_vector = adapter->q_vector[q_idx]; 2321 q_vector = adapter->q_vector[q_idx];
2239 if (!q_vector->rxr_count)
2240 continue;
2241 napi_disable(&q_vector->napi); 2322 napi_disable(&q_vector->napi);
2242 } 2323 }
2243} 2324}
@@ -3321,7 +3402,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3321 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3402 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3322 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3403 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3323 napi_vectors = adapter->num_rx_queues; 3404 napi_vectors = adapter->num_rx_queues;
3324 poll = &ixgbe_clean_rxonly; 3405 poll = &ixgbe_clean_rxtx_many;
3325 } else { 3406 } else {
3326 num_q_vectors = 1; 3407 num_q_vectors = 1;
3327 napi_vectors = 1; 3408 napi_vectors = 1;
@@ -3335,9 +3416,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3335 q_vector->adapter = adapter; 3416 q_vector->adapter = adapter;
3336 q_vector->eitr = adapter->eitr_param; 3417 q_vector->eitr = adapter->eitr_param;
3337 q_vector->v_idx = q_idx; 3418 q_vector->v_idx = q_idx;
3338 if (q_idx < napi_vectors) 3419 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
3339 netif_napi_add(adapter->netdev, &q_vector->napi,
3340 (*poll), 64);
3341 adapter->q_vector[q_idx] = q_vector; 3420 adapter->q_vector[q_idx] = q_vector;
3342 } 3421 }
3343 3422
@@ -3365,22 +3444,16 @@ err_out:
3365static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 3444static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3366{ 3445{
3367 int q_idx, num_q_vectors; 3446 int q_idx, num_q_vectors;
3368 int napi_vectors;
3369 3447
3370 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3448 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3371 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3449 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3372 napi_vectors = adapter->num_rx_queues; 3450 else
3373 } else {
3374 num_q_vectors = 1; 3451 num_q_vectors = 1;
3375 napi_vectors = 1;
3376 }
3377 3452
3378 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 3453 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3379 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; 3454 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
3380
3381 adapter->q_vector[q_idx] = NULL; 3455 adapter->q_vector[q_idx] = NULL;
3382 if (q_idx < napi_vectors) 3456 netif_napi_del(&q_vector->napi);
3383 netif_napi_del(&q_vector->napi);
3384 kfree(q_vector); 3457 kfree(q_vector);
3385 } 3458 }
3386} 3459}