diff options
author | Hayes Wang <hayeswang@realtek.com> | 2019-08-12 23:42:06 -0400 |
---|---|---|
committer | Jakub Kicinski <jakub.kicinski@netronome.com> | 2019-08-13 21:12:08 -0400 |
commit | 252df8b86667fe4640a2d9fb5cfc705ad285d578 (patch) | |
tree | 165b4670b0bc1090adc730f2096795eaea1e2eb9 /drivers/net/usb | |
parent | ec5791c202aca90c1b3b99dff268a995cf2d6aa1 (diff) |
r8152: replace array with linking list for rx information
The original method uses an array to store the rx information. The
new one uses a list to link each rx structure. Then, it is possible
to increase/decrease the number of rx structure dynamically.
Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Diffstat (limited to 'drivers/net/usb')
-rw-r--r-- | drivers/net/usb/r8152.c | 182 |
1 files changed, 125 insertions, 57 deletions
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 94da79028a65..d063c9b358e5 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/mdio.h> | 22 | #include <linux/mdio.h> |
23 | #include <linux/usb/cdc.h> | 23 | #include <linux/usb/cdc.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <linux/atomic.h> | ||
25 | #include <linux/acpi.h> | 26 | #include <linux/acpi.h> |
26 | 27 | ||
27 | /* Information for net-next */ | 28 | /* Information for net-next */ |
@@ -694,7 +695,7 @@ struct tx_desc { | |||
694 | struct r8152; | 695 | struct r8152; |
695 | 696 | ||
696 | struct rx_agg { | 697 | struct rx_agg { |
697 | struct list_head list; | 698 | struct list_head list, info_list; |
698 | struct urb *urb; | 699 | struct urb *urb; |
699 | struct r8152 *context; | 700 | struct r8152 *context; |
700 | void *buffer; | 701 | void *buffer; |
@@ -719,7 +720,7 @@ struct r8152 { | |||
719 | struct net_device *netdev; | 720 | struct net_device *netdev; |
720 | struct urb *intr_urb; | 721 | struct urb *intr_urb; |
721 | struct tx_agg tx_info[RTL8152_MAX_TX]; | 722 | struct tx_agg tx_info[RTL8152_MAX_TX]; |
722 | struct rx_agg rx_info[RTL8152_MAX_RX]; | 723 | struct list_head rx_info; |
723 | struct list_head rx_done, tx_free; | 724 | struct list_head rx_done, tx_free; |
724 | struct sk_buff_head tx_queue, rx_queue; | 725 | struct sk_buff_head tx_queue, rx_queue; |
725 | spinlock_t rx_lock, tx_lock; | 726 | spinlock_t rx_lock, tx_lock; |
@@ -744,6 +745,8 @@ struct r8152 { | |||
744 | void (*autosuspend_en)(struct r8152 *tp, bool enable); | 745 | void (*autosuspend_en)(struct r8152 *tp, bool enable); |
745 | } rtl_ops; | 746 | } rtl_ops; |
746 | 747 | ||
748 | atomic_t rx_count; | ||
749 | |||
747 | int intr_interval; | 750 | int intr_interval; |
748 | u32 saved_wolopts; | 751 | u32 saved_wolopts; |
749 | u32 msg_enable; | 752 | u32 msg_enable; |
@@ -1468,18 +1471,81 @@ static inline void *tx_agg_align(void *data) | |||
1468 | return (void *)ALIGN((uintptr_t)data, TX_ALIGN); | 1471 | return (void *)ALIGN((uintptr_t)data, TX_ALIGN); |
1469 | } | 1472 | } |
1470 | 1473 | ||
1474 | static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) | ||
1475 | { | ||
1476 | list_del(&agg->info_list); | ||
1477 | |||
1478 | usb_free_urb(agg->urb); | ||
1479 | kfree(agg->buffer); | ||
1480 | kfree(agg); | ||
1481 | |||
1482 | atomic_dec(&tp->rx_count); | ||
1483 | } | ||
1484 | |||
1485 | static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) | ||
1486 | { | ||
1487 | struct net_device *netdev = tp->netdev; | ||
1488 | int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; | ||
1489 | struct rx_agg *rx_agg; | ||
1490 | unsigned long flags; | ||
1491 | u8 *buf; | ||
1492 | |||
1493 | rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); | ||
1494 | if (!rx_agg) | ||
1495 | return NULL; | ||
1496 | |||
1497 | buf = kmalloc_node(tp->rx_buf_sz, mflags, node); | ||
1498 | if (!buf) | ||
1499 | goto free_rx; | ||
1500 | |||
1501 | if (buf != rx_agg_align(buf)) { | ||
1502 | kfree(buf); | ||
1503 | buf = kmalloc_node(tp->rx_buf_sz + RX_ALIGN, mflags, | ||
1504 | node); | ||
1505 | if (!buf) | ||
1506 | goto free_rx; | ||
1507 | } | ||
1508 | |||
1509 | rx_agg->buffer = buf; | ||
1510 | rx_agg->head = rx_agg_align(buf); | ||
1511 | |||
1512 | rx_agg->urb = usb_alloc_urb(0, mflags); | ||
1513 | if (!rx_agg->urb) | ||
1514 | goto free_buf; | ||
1515 | |||
1516 | rx_agg->context = tp; | ||
1517 | |||
1518 | INIT_LIST_HEAD(&rx_agg->list); | ||
1519 | INIT_LIST_HEAD(&rx_agg->info_list); | ||
1520 | spin_lock_irqsave(&tp->rx_lock, flags); | ||
1521 | list_add_tail(&rx_agg->info_list, &tp->rx_info); | ||
1522 | spin_unlock_irqrestore(&tp->rx_lock, flags); | ||
1523 | |||
1524 | atomic_inc(&tp->rx_count); | ||
1525 | |||
1526 | return rx_agg; | ||
1527 | |||
1528 | free_buf: | ||
1529 | kfree(rx_agg->buffer); | ||
1530 | free_rx: | ||
1531 | kfree(rx_agg); | ||
1532 | return NULL; | ||
1533 | } | ||
1534 | |||
1471 | static void free_all_mem(struct r8152 *tp) | 1535 | static void free_all_mem(struct r8152 *tp) |
1472 | { | 1536 | { |
1537 | struct rx_agg *agg, *agg_next; | ||
1538 | unsigned long flags; | ||
1473 | int i; | 1539 | int i; |
1474 | 1540 | ||
1475 | for (i = 0; i < RTL8152_MAX_RX; i++) { | 1541 | spin_lock_irqsave(&tp->rx_lock, flags); |
1476 | usb_free_urb(tp->rx_info[i].urb); | ||
1477 | tp->rx_info[i].urb = NULL; | ||
1478 | 1542 | ||
1479 | kfree(tp->rx_info[i].buffer); | 1543 | list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) |
1480 | tp->rx_info[i].buffer = NULL; | 1544 | free_rx_agg(tp, agg); |
1481 | tp->rx_info[i].head = NULL; | 1545 | |
1482 | } | 1546 | spin_unlock_irqrestore(&tp->rx_lock, flags); |
1547 | |||
1548 | WARN_ON(atomic_read(&tp->rx_count)); | ||
1483 | 1549 | ||
1484 | for (i = 0; i < RTL8152_MAX_TX; i++) { | 1550 | for (i = 0; i < RTL8152_MAX_TX; i++) { |
1485 | usb_free_urb(tp->tx_info[i].urb); | 1551 | usb_free_urb(tp->tx_info[i].urb); |
@@ -1503,46 +1569,28 @@ static int alloc_all_mem(struct r8152 *tp) | |||
1503 | struct usb_interface *intf = tp->intf; | 1569 | struct usb_interface *intf = tp->intf; |
1504 | struct usb_host_interface *alt = intf->cur_altsetting; | 1570 | struct usb_host_interface *alt = intf->cur_altsetting; |
1505 | struct usb_host_endpoint *ep_intr = alt->endpoint + 2; | 1571 | struct usb_host_endpoint *ep_intr = alt->endpoint + 2; |
1506 | struct urb *urb; | ||
1507 | int node, i; | 1572 | int node, i; |
1508 | u8 *buf; | ||
1509 | 1573 | ||
1510 | node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; | 1574 | node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; |
1511 | 1575 | ||
1512 | spin_lock_init(&tp->rx_lock); | 1576 | spin_lock_init(&tp->rx_lock); |
1513 | spin_lock_init(&tp->tx_lock); | 1577 | spin_lock_init(&tp->tx_lock); |
1578 | INIT_LIST_HEAD(&tp->rx_info); | ||
1514 | INIT_LIST_HEAD(&tp->tx_free); | 1579 | INIT_LIST_HEAD(&tp->tx_free); |
1515 | INIT_LIST_HEAD(&tp->rx_done); | 1580 | INIT_LIST_HEAD(&tp->rx_done); |
1516 | skb_queue_head_init(&tp->tx_queue); | 1581 | skb_queue_head_init(&tp->tx_queue); |
1517 | skb_queue_head_init(&tp->rx_queue); | 1582 | skb_queue_head_init(&tp->rx_queue); |
1583 | atomic_set(&tp->rx_count, 0); | ||
1518 | 1584 | ||
1519 | for (i = 0; i < RTL8152_MAX_RX; i++) { | 1585 | for (i = 0; i < RTL8152_MAX_RX; i++) { |
1520 | buf = kmalloc_node(tp->rx_buf_sz, GFP_KERNEL, node); | 1586 | if (!alloc_rx_agg(tp, GFP_KERNEL)) |
1521 | if (!buf) | ||
1522 | goto err1; | 1587 | goto err1; |
1523 | |||
1524 | if (buf != rx_agg_align(buf)) { | ||
1525 | kfree(buf); | ||
1526 | buf = kmalloc_node(tp->rx_buf_sz + RX_ALIGN, GFP_KERNEL, | ||
1527 | node); | ||
1528 | if (!buf) | ||
1529 | goto err1; | ||
1530 | } | ||
1531 | |||
1532 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
1533 | if (!urb) { | ||
1534 | kfree(buf); | ||
1535 | goto err1; | ||
1536 | } | ||
1537 | |||
1538 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
1539 | tp->rx_info[i].context = tp; | ||
1540 | tp->rx_info[i].urb = urb; | ||
1541 | tp->rx_info[i].buffer = buf; | ||
1542 | tp->rx_info[i].head = rx_agg_align(buf); | ||
1543 | } | 1588 | } |
1544 | 1589 | ||
1545 | for (i = 0; i < RTL8152_MAX_TX; i++) { | 1590 | for (i = 0; i < RTL8152_MAX_TX; i++) { |
1591 | struct urb *urb; | ||
1592 | u8 *buf; | ||
1593 | |||
1546 | buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); | 1594 | buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); |
1547 | if (!buf) | 1595 | if (!buf) |
1548 | goto err1; | 1596 | goto err1; |
@@ -2331,44 +2379,64 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) | |||
2331 | 2379 | ||
2332 | static int rtl_start_rx(struct r8152 *tp) | 2380 | static int rtl_start_rx(struct r8152 *tp) |
2333 | { | 2381 | { |
2334 | int i, ret = 0; | 2382 | struct rx_agg *agg, *agg_next; |
2383 | struct list_head tmp_list; | ||
2384 | unsigned long flags; | ||
2385 | int ret = 0; | ||
2335 | 2386 | ||
2336 | INIT_LIST_HEAD(&tp->rx_done); | 2387 | INIT_LIST_HEAD(&tmp_list); |
2337 | for (i = 0; i < RTL8152_MAX_RX; i++) { | ||
2338 | INIT_LIST_HEAD(&tp->rx_info[i].list); | ||
2339 | ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); | ||
2340 | if (ret) | ||
2341 | break; | ||
2342 | } | ||
2343 | 2388 | ||
2344 | if (ret && ++i < RTL8152_MAX_RX) { | 2389 | spin_lock_irqsave(&tp->rx_lock, flags); |
2345 | struct list_head rx_queue; | ||
2346 | unsigned long flags; | ||
2347 | 2390 | ||
2348 | INIT_LIST_HEAD(&rx_queue); | 2391 | INIT_LIST_HEAD(&tp->rx_done); |
2349 | 2392 | ||
2350 | do { | 2393 | list_splice_init(&tp->rx_info, &tmp_list); |
2351 | struct rx_agg *agg = &tp->rx_info[i++]; | ||
2352 | struct urb *urb = agg->urb; | ||
2353 | 2394 | ||
2354 | urb->actual_length = 0; | 2395 | spin_unlock_irqrestore(&tp->rx_lock, flags); |
2355 | list_add_tail(&agg->list, &rx_queue); | ||
2356 | } while (i < RTL8152_MAX_RX); | ||
2357 | 2396 | ||
2358 | spin_lock_irqsave(&tp->rx_lock, flags); | 2397 | list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { |
2359 | list_splice_tail(&rx_queue, &tp->rx_done); | 2398 | INIT_LIST_HEAD(&agg->list); |
2360 | spin_unlock_irqrestore(&tp->rx_lock, flags); | 2399 | |
2400 | if (ret < 0) | ||
2401 | list_add_tail(&agg->list, &tp->rx_done); | ||
2402 | else | ||
2403 | ret = r8152_submit_rx(tp, agg, GFP_KERNEL); | ||
2361 | } | 2404 | } |
2362 | 2405 | ||
2406 | spin_lock_irqsave(&tp->rx_lock, flags); | ||
2407 | WARN_ON(!list_empty(&tp->rx_info)); | ||
2408 | list_splice(&tmp_list, &tp->rx_info); | ||
2409 | spin_unlock_irqrestore(&tp->rx_lock, flags); | ||
2410 | |||
2363 | return ret; | 2411 | return ret; |
2364 | } | 2412 | } |
2365 | 2413 | ||
2366 | static int rtl_stop_rx(struct r8152 *tp) | 2414 | static int rtl_stop_rx(struct r8152 *tp) |
2367 | { | 2415 | { |
2368 | int i; | 2416 | struct rx_agg *agg, *agg_next; |
2417 | struct list_head tmp_list; | ||
2418 | unsigned long flags; | ||
2419 | |||
2420 | INIT_LIST_HEAD(&tmp_list); | ||
2421 | |||
2422 | /* The usb_kill_urb() couldn't be used in atomic. | ||
2423 | * Therefore, move the list of rx_info to a tmp one. | ||
2424 | * Then, list_for_each_entry_safe could be used without | ||
2425 | * spin lock. | ||
2426 | */ | ||
2427 | |||
2428 | spin_lock_irqsave(&tp->rx_lock, flags); | ||
2429 | list_splice_init(&tp->rx_info, &tmp_list); | ||
2430 | spin_unlock_irqrestore(&tp->rx_lock, flags); | ||
2431 | |||
2432 | list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) | ||
2433 | usb_kill_urb(agg->urb); | ||
2369 | 2434 | ||
2370 | for (i = 0; i < RTL8152_MAX_RX; i++) | 2435 | /* Move back the list of temp to the rx_info */ |
2371 | usb_kill_urb(tp->rx_info[i].urb); | 2436 | spin_lock_irqsave(&tp->rx_lock, flags); |
2437 | WARN_ON(!list_empty(&tp->rx_info)); | ||
2438 | list_splice(&tmp_list, &tp->rx_info); | ||
2439 | spin_unlock_irqrestore(&tp->rx_lock, flags); | ||
2372 | 2440 | ||
2373 | while (!skb_queue_empty(&tp->rx_queue)) | 2441 | while (!skb_queue_empty(&tp->rx_queue)) |
2374 | dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); | 2442 | dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); |