aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c129
1 files changed, 65 insertions, 64 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 717b12984d1..5ebd545ab04 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -393,9 +393,9 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
393 return 0; 393 return 0;
394} 394}
395 395
396static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, 396static int ehea_proc_rwqes(struct net_device *dev,
397 struct ehea_port_res *pr, 397 struct ehea_port_res *pr,
398 int *budget) 398 int budget)
399{ 399{
400 struct ehea_port *port = pr->port; 400 struct ehea_port *port = pr->port;
401 struct ehea_qp *qp = pr->qp; 401 struct ehea_qp *qp = pr->qp;
@@ -408,18 +408,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
408 int skb_arr_rq2_len = pr->rq2_skba.len; 408 int skb_arr_rq2_len = pr->rq2_skba.len;
409 int skb_arr_rq3_len = pr->rq3_skba.len; 409 int skb_arr_rq3_len = pr->rq3_skba.len;
410 int processed, processed_rq1, processed_rq2, processed_rq3; 410 int processed, processed_rq1, processed_rq2, processed_rq3;
411 int wqe_index, last_wqe_index, rq, my_quota, port_reset; 411 int wqe_index, last_wqe_index, rq, port_reset;
412 412
413 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 413 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
414 last_wqe_index = 0; 414 last_wqe_index = 0;
415 my_quota = min(*budget, dev->quota);
416 415
417 cqe = ehea_poll_rq1(qp, &wqe_index); 416 cqe = ehea_poll_rq1(qp, &wqe_index);
418 while ((my_quota > 0) && cqe) { 417 while ((processed < budget) && cqe) {
419 ehea_inc_rq1(qp); 418 ehea_inc_rq1(qp);
420 processed_rq1++; 419 processed_rq1++;
421 processed++; 420 processed++;
422 my_quota--;
423 if (netif_msg_rx_status(port)) 421 if (netif_msg_rx_status(port))
424 ehea_dump(cqe, sizeof(*cqe), "CQE"); 422 ehea_dump(cqe, sizeof(*cqe), "CQE");
425 423
@@ -434,14 +432,14 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
434 if (netif_msg_rx_err(port)) 432 if (netif_msg_rx_err(port))
435 ehea_error("LL rq1: skb=NULL"); 433 ehea_error("LL rq1: skb=NULL");
436 434
437 skb = netdev_alloc_skb(port->netdev, 435 skb = netdev_alloc_skb(dev,
438 EHEA_L_PKT_SIZE); 436 EHEA_L_PKT_SIZE);
439 if (!skb) 437 if (!skb)
440 break; 438 break;
441 } 439 }
442 skb_copy_to_linear_data(skb, ((char*)cqe) + 64, 440 skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
443 cqe->num_bytes_transfered - 4); 441 cqe->num_bytes_transfered - 4);
444 ehea_fill_skb(port->netdev, skb, cqe); 442 ehea_fill_skb(dev, skb, cqe);
445 } else if (rq == 2) { /* RQ2 */ 443 } else if (rq == 2) { /* RQ2 */
446 skb = get_skb_by_index(skb_arr_rq2, 444 skb = get_skb_by_index(skb_arr_rq2,
447 skb_arr_rq2_len, cqe); 445 skb_arr_rq2_len, cqe);
@@ -450,7 +448,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
450 ehea_error("rq2: skb=NULL"); 448 ehea_error("rq2: skb=NULL");
451 break; 449 break;
452 } 450 }
453 ehea_fill_skb(port->netdev, skb, cqe); 451 ehea_fill_skb(dev, skb, cqe);
454 processed_rq2++; 452 processed_rq2++;
455 } else { /* RQ3 */ 453 } else { /* RQ3 */
456 skb = get_skb_by_index(skb_arr_rq3, 454 skb = get_skb_by_index(skb_arr_rq3,
@@ -460,7 +458,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
460 ehea_error("rq3: skb=NULL"); 458 ehea_error("rq3: skb=NULL");
461 break; 459 break;
462 } 460 }
463 ehea_fill_skb(port->netdev, skb, cqe); 461 ehea_fill_skb(dev, skb, cqe);
464 processed_rq3++; 462 processed_rq3++;
465 } 463 }
466 464
@@ -471,7 +469,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
471 else 469 else
472 netif_receive_skb(skb); 470 netif_receive_skb(skb);
473 471
474 port->netdev->last_rx = jiffies; 472 dev->last_rx = jiffies;
475 } else { 473 } else {
476 pr->p_stats.poll_receive_errors++; 474 pr->p_stats.poll_receive_errors++;
477 port_reset = ehea_treat_poll_error(pr, rq, cqe, 475 port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -484,14 +482,12 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
484 } 482 }
485 483
486 pr->rx_packets += processed; 484 pr->rx_packets += processed;
487 *budget -= processed;
488 485
489 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 486 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
490 ehea_refill_rq2(pr, processed_rq2); 487 ehea_refill_rq2(pr, processed_rq2);
491 ehea_refill_rq3(pr, processed_rq3); 488 ehea_refill_rq3(pr, processed_rq3);
492 489
493 cqe = ehea_poll_rq1(qp, &wqe_index); 490 return processed;
494 return cqe;
495} 491}
496 492
497static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 493static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
@@ -554,22 +550,27 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
554} 550}
555 551
556#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 552#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
553#define EHEA_POLL_MAX_CQES 65535
557 554
558static int ehea_poll(struct net_device *dev, int *budget) 555static int ehea_poll(struct napi_struct *napi, int budget)
559{ 556{
560 struct ehea_port_res *pr = dev->priv; 557 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi);
558 struct net_device *dev = pr->port->netdev;
561 struct ehea_cqe *cqe; 559 struct ehea_cqe *cqe;
562 struct ehea_cqe *cqe_skb = NULL; 560 struct ehea_cqe *cqe_skb = NULL;
563 int force_irq, wqe_index; 561 int force_irq, wqe_index;
564 562 int rx = 0;
565 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
566 cqe_skb = ehea_poll_cq(pr->send_cq);
567 563
568 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); 564 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
565 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
566
567 if (!force_irq)
568 rx += ehea_proc_rwqes(dev, pr, budget - rx);
569 569
570 if ((!cqe && !cqe_skb) || force_irq) { 570 while ((rx != budget) || force_irq) {
571 pr->poll_counter = 0; 571 pr->poll_counter = 0;
572 netif_rx_complete(dev); 572 force_irq = 0;
573 netif_rx_complete(dev, napi);
573 ehea_reset_cq_ep(pr->recv_cq); 574 ehea_reset_cq_ep(pr->recv_cq);
574 ehea_reset_cq_ep(pr->send_cq); 575 ehea_reset_cq_ep(pr->send_cq);
575 ehea_reset_cq_n1(pr->recv_cq); 576 ehea_reset_cq_n1(pr->recv_cq);
@@ -578,43 +579,35 @@ static int ehea_poll(struct net_device *dev, int *budget)
578 cqe_skb = ehea_poll_cq(pr->send_cq); 579 cqe_skb = ehea_poll_cq(pr->send_cq);
579 580
580 if (!cqe && !cqe_skb) 581 if (!cqe && !cqe_skb)
581 return 0; 582 return rx;
582 583
583 if (!netif_rx_reschedule(dev, dev->quota)) 584 if (!netif_rx_reschedule(dev, napi))
584 return 0; 585 return rx;
585 }
586
587 cqe = ehea_proc_rwqes(dev, pr, budget);
588 cqe_skb = ehea_proc_cqes(pr, 300);
589 586
590 if (cqe || cqe_skb) 587 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
591 pr->poll_counter++; 588 rx += ehea_proc_rwqes(dev, pr, budget - rx);
589 }
592 590
593 return 1; 591 pr->poll_counter++;
592 return rx;
594} 593}
595 594
596#ifdef CONFIG_NET_POLL_CONTROLLER 595#ifdef CONFIG_NET_POLL_CONTROLLER
597static void ehea_netpoll(struct net_device *dev) 596static void ehea_netpoll(struct net_device *dev)
598{ 597{
599 struct ehea_port *port = netdev_priv(dev); 598 struct ehea_port *port = netdev_priv(dev);
599 int i;
600 600
601 netif_rx_schedule(port->port_res[0].d_netdev); 601 for (i = 0; i < port->num_def_qps; i++)
602 netif_rx_schedule(dev, &port->port_res[i].napi);
602} 603}
603#endif 604#endif
604 605
605static int ehea_poll_firstqueue(struct net_device *dev, int *budget)
606{
607 struct ehea_port *port = netdev_priv(dev);
608 struct net_device *d_dev = port->port_res[0].d_netdev;
609
610 return ehea_poll(d_dev, budget);
611}
612
613static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 606static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
614{ 607{
615 struct ehea_port_res *pr = param; 608 struct ehea_port_res *pr = param;
616 609
617 netif_rx_schedule(pr->d_netdev); 610 netif_rx_schedule(pr->port->netdev, &pr->napi);
618 611
619 return IRQ_HANDLED; 612 return IRQ_HANDLED;
620} 613}
@@ -1236,14 +1229,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1236 1229
1237 kfree(init_attr); 1230 kfree(init_attr);
1238 1231
1239 pr->d_netdev = alloc_netdev(0, "", ether_setup); 1232 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1240 if (!pr->d_netdev)
1241 goto out_free;
1242 pr->d_netdev->priv = pr;
1243 pr->d_netdev->weight = 64;
1244 pr->d_netdev->poll = ehea_poll;
1245 set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1246 strcpy(pr->d_netdev->name, port->netdev->name);
1247 1233
1248 ret = 0; 1234 ret = 0;
1249 goto out; 1235 goto out;
@@ -1266,8 +1252,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1266{ 1252{
1267 int ret, i; 1253 int ret, i;
1268 1254
1269 free_netdev(pr->d_netdev);
1270
1271 ret = ehea_destroy_qp(pr->qp); 1255 ret = ehea_destroy_qp(pr->qp);
1272 1256
1273 if (!ret) { 1257 if (!ret) {
@@ -2248,6 +2232,22 @@ out:
2248 return ret; 2232 return ret;
2249} 2233}
2250 2234
2235static void port_napi_disable(struct ehea_port *port)
2236{
2237 int i;
2238
2239 for (i = 0; i < port->num_def_qps; i++)
2240 napi_disable(&port->port_res[i].napi);
2241}
2242
2243static void port_napi_enable(struct ehea_port *port)
2244{
2245 int i;
2246
2247 for (i = 0; i < port->num_def_qps; i++)
2248 napi_enable(&port->port_res[i].napi);
2249}
2250
2251static int ehea_open(struct net_device *dev) 2251static int ehea_open(struct net_device *dev)
2252{ 2252{
2253 int ret; 2253 int ret;
@@ -2259,8 +2259,10 @@ static int ehea_open(struct net_device *dev)
2259 ehea_info("enabling port %s", dev->name); 2259 ehea_info("enabling port %s", dev->name);
2260 2260
2261 ret = ehea_up(dev); 2261 ret = ehea_up(dev);
2262 if (!ret) 2262 if (!ret) {
2263 port_napi_enable(port);
2263 netif_start_queue(dev); 2264 netif_start_queue(dev);
2265 }
2264 2266
2265 up(&port->port_lock); 2267 up(&port->port_lock);
2266 2268
@@ -2269,7 +2271,7 @@ static int ehea_open(struct net_device *dev)
2269 2271
2270static int ehea_down(struct net_device *dev) 2272static int ehea_down(struct net_device *dev)
2271{ 2273{
2272 int ret, i; 2274 int ret;
2273 struct ehea_port *port = netdev_priv(dev); 2275 struct ehea_port *port = netdev_priv(dev);
2274 2276
2275 if (port->state == EHEA_PORT_DOWN) 2277 if (port->state == EHEA_PORT_DOWN)
@@ -2278,10 +2280,7 @@ static int ehea_down(struct net_device *dev)
2278 ehea_drop_multicast_list(dev); 2280 ehea_drop_multicast_list(dev);
2279 ehea_free_interrupts(dev); 2281 ehea_free_interrupts(dev);
2280 2282
2281 for (i = 0; i < port->num_def_qps; i++) 2283 port_napi_disable(port);
2282 while (test_bit(__LINK_STATE_RX_SCHED,
2283 &port->port_res[i].d_netdev->state))
2284 msleep(1);
2285 2284
2286 port->state = EHEA_PORT_DOWN; 2285 port->state = EHEA_PORT_DOWN;
2287 2286
@@ -2319,7 +2318,8 @@ static void ehea_reset_port(struct work_struct *work)
2319 port->resets++; 2318 port->resets++;
2320 down(&port->port_lock); 2319 down(&port->port_lock);
2321 netif_stop_queue(dev); 2320 netif_stop_queue(dev);
2322 netif_poll_disable(dev); 2321
2322 port_napi_disable(port);
2323 2323
2324 ehea_down(dev); 2324 ehea_down(dev);
2325 2325
@@ -2330,7 +2330,8 @@ static void ehea_reset_port(struct work_struct *work)
2330 if (netif_msg_timer(port)) 2330 if (netif_msg_timer(port))
2331 ehea_info("Device %s resetted successfully", dev->name); 2331 ehea_info("Device %s resetted successfully", dev->name);
2332 2332
2333 netif_poll_enable(dev); 2333 port_napi_enable(port);
2334
2334 netif_wake_queue(dev); 2335 netif_wake_queue(dev);
2335out: 2336out:
2336 up(&port->port_lock); 2337 up(&port->port_lock);
@@ -2358,7 +2359,9 @@ static void ehea_rereg_mrs(struct work_struct *work)
2358 dev->name); 2359 dev->name);
2359 down(&port->port_lock); 2360 down(&port->port_lock);
2360 netif_stop_queue(dev); 2361 netif_stop_queue(dev);
2361 netif_poll_disable(dev); 2362
2363 port_napi_disable(port);
2364
2362 ehea_down(dev); 2365 ehea_down(dev);
2363 up(&port->port_lock); 2366 up(&port->port_lock);
2364 } 2367 }
@@ -2406,7 +2409,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2406 2409
2407 ret = ehea_up(dev); 2410 ret = ehea_up(dev);
2408 if (!ret) { 2411 if (!ret) {
2409 netif_poll_enable(dev); 2412 port_napi_enable(port);
2410 netif_wake_queue(dev); 2413 netif_wake_queue(dev);
2411 } 2414 }
2412 2415
@@ -2644,11 +2647,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2644 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 2647 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2645 2648
2646 dev->open = ehea_open; 2649 dev->open = ehea_open;
2647 dev->poll = ehea_poll_firstqueue;
2648#ifdef CONFIG_NET_POLL_CONTROLLER 2650#ifdef CONFIG_NET_POLL_CONTROLLER
2649 dev->poll_controller = ehea_netpoll; 2651 dev->poll_controller = ehea_netpoll;
2650#endif 2652#endif
2651 dev->weight = 64;
2652 dev->stop = ehea_stop; 2653 dev->stop = ehea_stop;
2653 dev->hard_start_xmit = ehea_start_xmit; 2654 dev->hard_start_xmit = ehea_start_xmit;
2654 dev->get_stats = ehea_get_stats; 2655 dev->get_stats = ehea_get_stats;