aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/tile/tilegx.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-01 11:36:42 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-01 17:35:50 -0400
commit2628e8af31a0ee4d28304d96a72fdf4d7822508c (patch)
tree384c3f522e5510d2706472545a6cbae387bbb803 /drivers/net/ethernet/tile/tilegx.c
parent48f2a4e1e83992af6c721c6c93a6b012910e255f (diff)
tile: support jumbo frames in the tilegx network driver
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/tile/tilegx.c')
-rw-r--r--drivers/net/ethernet/tile/tilegx.c347
1 files changed, 188 insertions, 159 deletions
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 60855717c5df..39c1e9e83845 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -76,6 +76,9 @@
76 76
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1) 77#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78 78
79/* The "kinds" of buffer stacks (small/large/jumbo). */
80#define MAX_KINDS 3
81
79/* Size of completions data to allocate. 82/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels. 83 * ISSUE: Probably more than needed since we don't use all the channels.
81 */ 84 */
@@ -141,10 +144,8 @@ struct tile_net_info {
141 /* NAPI flags. */ 144 /* NAPI flags. */
142 bool napi_added; 145 bool napi_added;
143 bool napi_enabled; 146 bool napi_enabled;
144 /* Number of small sk_buffs which must still be provided. */ 147 /* Number of buffers (by kind) which must still be provided. */
145 unsigned int num_needed_small_buffers; 148 unsigned int num_needed_buffers[MAX_KINDS];
146 /* Number of large sk_buffs which must still be provided. */
147 unsigned int num_needed_large_buffers;
148 /* A timer for handling egress completions. */ 149 /* A timer for handling egress completions. */
149 struct hrtimer egress_timer; 150 struct hrtimer egress_timer;
150 /* True if "egress_timer" is scheduled. */ 151 /* True if "egress_timer" is scheduled. */
@@ -200,24 +201,25 @@ static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
200/* The "context" for all devices. */ 201/* The "context" for all devices. */
201static gxio_mpipe_context_t context; 202static gxio_mpipe_context_t context;
202 203
203/* Buffer sizes and mpipe enum codes for buffer stacks. 204/* The buffer size enums for each buffer stack.
204 * See arch/tile/include/gxio/mpipe.h for the set of possible values. 205 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
206 * We avoid the "10384" size because it can induce "false chaining"
207 * on "cut-through" jumbo packets.
205 */ 208 */
206#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 209static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
207#define BUFFER_SIZE_SMALL 128 210 GXIO_MPIPE_BUFFER_SIZE_128,
208#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 211 GXIO_MPIPE_BUFFER_SIZE_1664,
209#define BUFFER_SIZE_LARGE 1664 212 GXIO_MPIPE_BUFFER_SIZE_16384
213};
210 214
211/* The small/large "buffer stacks". */ 215/* The actual memory allocated for the buffer stacks. */
212static int small_buffer_stack = -1; 216static void *buffer_stack_vas[MAX_KINDS];
213static int large_buffer_stack = -1;
214 217
215/* Amount of memory allocated for each buffer stack. */ 218/* The amount of memory allocated for each buffer stack. */
216static size_t buffer_stack_size; 219static size_t buffer_stack_bytes[MAX_KINDS];
217 220
218/* The actual memory allocated for the buffer stacks. */ 221/* The first buffer stack index (small = +0, large = +1, jumbo = +2). */
219static void *small_buffer_stack_va; 222static int first_buffer_stack = -1;
220static void *large_buffer_stack_va;
221 223
222/* The buckets. */ 224/* The buckets. */
223static int first_bucket = -1; 225static int first_bucket = -1;
@@ -238,6 +240,9 @@ static char *loopify_link_name;
238/* If "tile_net.custom" was specified, this is non-NULL. */ 240/* If "tile_net.custom" was specified, this is non-NULL. */
239static char *custom_str; 241static char *custom_str;
240 242
243/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
244static uint jumbo_num;
245
241/* The "tile_net.cpus" argument specifies the cpus that are dedicated 246/* The "tile_net.cpus" argument specifies the cpus that are dedicated
242 * to handle ingress packets. 247 * to handle ingress packets.
243 * 248 *
@@ -292,6 +297,12 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
292module_param_named(custom, custom_str, charp, 0444); 297module_param_named(custom, custom_str, charp, 0444);
293MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); 298MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
294 299
300/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
301 * and to allocate the given number of "jumbo" buffers.
302 */
303module_param_named(jumbo, jumbo_num, uint, 0444);
304MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
305
295/* Atomically update a statistics field. 306/* Atomically update a statistics field.
296 * Note that on TILE-Gx, this operation is fire-and-forget on the 307 * Note that on TILE-Gx, this operation is fire-and-forget on the
297 * issuing core (single-cycle dispatch) and takes only a few cycles 308 * issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +316,15 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
305} 316}
306 317
307/* Allocate and push a buffer. */ 318/* Allocate and push a buffer. */
308static bool tile_net_provide_buffer(bool small) 319static bool tile_net_provide_buffer(int kind)
309{ 320{
310 int stack = small ? small_buffer_stack : large_buffer_stack; 321 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
322 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
311 const unsigned long buffer_alignment = 128; 323 const unsigned long buffer_alignment = 128;
312 struct sk_buff *skb; 324 struct sk_buff *skb;
313 int len; 325 int len;
314 326
315 len = sizeof(struct sk_buff **) + buffer_alignment; 327 len = sizeof(struct sk_buff **) + buffer_alignment + bs;
316 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
317 skb = dev_alloc_skb(len); 328 skb = dev_alloc_skb(len);
318 if (skb == NULL) 329 if (skb == NULL)
319 return false; 330 return false;
@@ -328,7 +339,7 @@ static bool tile_net_provide_buffer(bool small)
328 /* Make sure "skb" and the back-pointer have been flushed. */ 339 /* Make sure "skb" and the back-pointer have been flushed. */
329 wmb(); 340 wmb();
330 341
331 gxio_mpipe_push_buffer(&context, stack, 342 gxio_mpipe_push_buffer(&context, first_buffer_stack + kind,
332 (void *)va_to_tile_io_addr(skb->data)); 343 (void *)va_to_tile_io_addr(skb->data));
333 344
334 return true; 345 return true;
@@ -369,24 +380,19 @@ static void tile_net_pop_all_buffers(int stack)
369static void tile_net_provide_needed_buffers(void) 380static void tile_net_provide_needed_buffers(void)
370{ 381{
371 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 382 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
372 383 int kind;
373 while (info->num_needed_small_buffers != 0) { 384
374 if (!tile_net_provide_buffer(true)) 385 for (kind = 0; kind < MAX_KINDS; kind++) {
375 goto oops; 386 while (info->num_needed_buffers[kind] != 0) {
376 info->num_needed_small_buffers--; 387 if (!tile_net_provide_buffer(kind)) {
377 } 388 /* Add info to the allocation failure dump. */
378 389 pr_notice("Tile %d still needs some buffers\n",
379 while (info->num_needed_large_buffers != 0) { 390 info->my_cpu);
380 if (!tile_net_provide_buffer(false)) 391 return;
381 goto oops; 392 }
382 info->num_needed_large_buffers--; 393 info->num_needed_buffers[kind]--;
394 }
383 } 395 }
384
385 return;
386
387oops:
388 /* Add a description to the page allocation failure dump. */
389 pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
390} 396}
391 397
392static inline bool filter_packet(struct net_device *dev, void *buf) 398static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -426,10 +432,12 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
426 tile_net_stats_add(len, &priv->stats.rx_bytes); 432 tile_net_stats_add(len, &priv->stats.rx_bytes);
427 433
428 /* Need a new buffer. */ 434 /* Need a new buffer. */
429 if (idesc->size == BUFFER_SIZE_SMALL_ENUM) 435 if (idesc->size == buffer_size_enums[0])
430 info->num_needed_small_buffers++; 436 info->num_needed_buffers[0]++;
437 else if (idesc->size == buffer_size_enums[1])
438 info->num_needed_buffers[1]++;
431 else 439 else
432 info->num_needed_large_buffers++; 440 info->num_needed_buffers[2]++;
433} 441}
434 442
435/* Handle a packet. Return true if "processed", false if "filtered". */ 443/* Handle a packet. Return true if "processed", false if "filtered". */
@@ -437,29 +445,29 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
437{ 445{
438 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 446 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
439 struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; 447 struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
448 struct tile_net_priv *priv = netdev_priv(dev);
440 uint8_t l2_offset; 449 uint8_t l2_offset;
441 void *va; 450 void *va;
442 void *buf; 451 void *buf;
443 unsigned long len; 452 unsigned long len;
444 bool filter; 453 bool filter;
445 454
446 /* Drop packets for which no buffer was available. 455 /* Drop packets for which no buffer was available (which can
447 * NOTE: This happens under heavy load. 456 * happen under heavy load), or for which the me/tr/ce flags
457 * are set (which can happen for jumbo cut-through packets,
458 * or with a customized classifier).
448 */ 459 */
449 if (idesc->be) { 460 if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
450 struct tile_net_priv *priv = netdev_priv(dev); 461 if (dev)
451 tile_net_stats_add(1, &priv->stats.rx_dropped); 462 tile_net_stats_add(1, &priv->stats.rx_errors);
452 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 463 goto drop;
453 if (net_ratelimit())
454 pr_info("Dropping packet (insufficient buffers).\n");
455 return false;
456 } 464 }
457 465
458 /* Get the "l2_offset", if allowed. */ 466 /* Get the "l2_offset", if allowed. */
459 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); 467 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
460 468
461 /* Get the raw buffer VA (includes "headroom"). */ 469 /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
462 va = tile_io_addr_to_va((unsigned long)(long)idesc->va); 470 va = tile_io_addr_to_va((unsigned long)idesc->va);
463 471
464 /* Get the actual packet start/length. */ 472 /* Get the actual packet start/length. */
465 buf = va + l2_offset; 473 buf = va + l2_offset;
@@ -470,6 +478,9 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
470 478
471 filter = filter_packet(dev, buf); 479 filter = filter_packet(dev, buf);
472 if (filter) { 480 if (filter) {
481 if (dev)
482 tile_net_stats_add(1, &priv->stats.rx_dropped);
483drop:
473 gxio_mpipe_iqueue_drop(&info->iqueue, idesc); 484 gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
474 } else { 485 } else {
475 struct sk_buff *skb = mpipe_buf_to_skb(va); 486 struct sk_buff *skb = mpipe_buf_to_skb(va);
@@ -722,86 +733,95 @@ static int tile_net_update(struct net_device *dev)
722 return 0; 733 return 0;
723} 734}
724 735
725/* Allocate and initialize mpipe buffer stacks, and register them in 736/* Initialize a buffer stack. */
726 * the mPIPE TLBs, for both small and large packet sizes. 737static int create_buffer_stack(struct net_device *dev,
727 * This routine supports tile_net_init_mpipe(), below. 738 int kind, size_t num_buffers)
728 */
729static int init_buffer_stacks(struct net_device *dev, int num_buffers)
730{ 739{
731 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 740 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
732 int rc; 741 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
742 int stack_idx = first_buffer_stack + kind;
743 void *va;
744 int i, rc;
733 745
734 /* Compute stack bytes; we round up to 64KB and then use 746 /* Round up to 64KB and then use alloc_pages() so we get the
735 * alloc_pages() so we get the required 64KB alignment as well. 747 * required 64KB alignment.
736 */ 748 */
737 buffer_stack_size = 749 buffer_stack_bytes[kind] = ALIGN(needed, 64 * 1024);
738 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
739 64 * 1024);
740
741 /* Allocate two buffer stack indices. */
742 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
743 if (rc < 0) {
744 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
745 rc);
746 return rc;
747 }
748 small_buffer_stack = rc;
749 large_buffer_stack = rc + 1;
750 750
751 /* Allocate the small memory stack. */ 751 va = alloc_pages_exact(buffer_stack_bytes[kind], GFP_KERNEL);
752 small_buffer_stack_va = 752 if (va == NULL) {
753 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
754 if (small_buffer_stack_va == NULL) {
755 netdev_err(dev, 753 netdev_err(dev,
756 "Could not alloc %zd bytes for buffer stacks\n", 754 "Could not alloc %zd bytes for buffer stack %d\n",
757 buffer_stack_size); 755 buffer_stack_bytes[kind], kind);
758 return -ENOMEM; 756 return -ENOMEM;
759 } 757 }
760 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, 758
761 BUFFER_SIZE_SMALL_ENUM, 759 /* Initialize the buffer stack. */
762 small_buffer_stack_va, 760 rc = gxio_mpipe_init_buffer_stack(&context, stack_idx,
763 buffer_stack_size, 0); 761 buffer_size_enums[kind],
762 va, buffer_stack_bytes[kind], 0);
764 if (rc != 0) { 763 if (rc != 0) {
765 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); 764 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
765 free_pages_exact(va, buffer_stack_bytes[kind]);
766 return rc; 766 return rc;
767 } 767 }
768 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, 768
769 buffer_stack_vas[kind] = va;
770
771 rc = gxio_mpipe_register_client_memory(&context, stack_idx,
769 hash_pte, 0); 772 hash_pte, 0);
770 if (rc != 0) { 773 if (rc != 0) {
771 netdev_err(dev, 774 netdev_err(dev, "gxio_mpipe_register_client_memory: %d\n", rc);
772 "gxio_mpipe_register_buffer_memory failed: %d\n",
773 rc);
774 return rc; 775 return rc;
775 } 776 }
776 777
777 /* Allocate the large buffer stack. */ 778 /* Provide initial buffers. */
778 large_buffer_stack_va = 779 for (i = 0; i < num_buffers; i++) {
779 alloc_pages_exact(buffer_stack_size, GFP_KERNEL); 780 if (!tile_net_provide_buffer(kind)) {
780 if (large_buffer_stack_va == NULL) { 781 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
781 netdev_err(dev, 782 return -ENOMEM;
782 "Could not alloc %zd bytes for buffer stacks\n", 783 }
783 buffer_stack_size);
784 return -ENOMEM;
785 }
786 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
787 BUFFER_SIZE_LARGE_ENUM,
788 large_buffer_stack_va,
789 buffer_stack_size, 0);
790 if (rc != 0) {
791 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
792 rc);
793 return rc;
794 } 784 }
795 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, 785
796 hash_pte, 0); 786 return 0;
797 if (rc != 0) { 787}
798 netdev_err(dev, 788
799 "gxio_mpipe_register_buffer_memory failed: %d\n", 789/* Allocate and initialize mpipe buffer stacks, and register them in
800 rc); 790 * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
791 * This routine supports tile_net_init_mpipe(), below.
792 */
793static int init_buffer_stacks(struct net_device *dev,
794 int network_cpus_count)
795{
796 int num_kinds = MAX_KINDS - (jumbo_num == 0);
797 size_t num_buffers;
798 int rc;
799
800 /* Allocate the buffer stacks. */
801 rc = gxio_mpipe_alloc_buffer_stacks(&context, num_kinds, 0, 0);
802 if (rc < 0) {
803 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks: %d\n", rc);
801 return rc; 804 return rc;
802 } 805 }
806 first_buffer_stack = rc;
803 807
804 return 0; 808 /* Enough small/large buffers to (normally) avoid buffer errors. */
809 num_buffers =
810 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
811
812 /* Allocate the small memory stack. */
813 if (rc >= 0)
814 rc = create_buffer_stack(dev, 0, num_buffers);
815
816 /* Allocate the large buffer stack. */
817 if (rc >= 0)
818 rc = create_buffer_stack(dev, 1, num_buffers);
819
820 /* Allocate the jumbo buffer stack if needed. */
821 if (rc >= 0 && jumbo_num != 0)
822 rc = create_buffer_stack(dev, 2, jumbo_num);
823
824 return rc;
805} 825}
806 826
807/* Allocate per-cpu resources (memory for completions and idescs). 827/* Allocate per-cpu resources (memory for completions and idescs).
@@ -940,13 +960,14 @@ static int tile_net_setup_interrupts(struct net_device *dev)
940/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 960/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
941static void tile_net_init_mpipe_fail(void) 961static void tile_net_init_mpipe_fail(void)
942{ 962{
943 int cpu; 963 int kind, cpu;
944 964
945 /* Do cleanups that require the mpipe context first. */ 965 /* Do cleanups that require the mpipe context first. */
946 if (small_buffer_stack >= 0) 966 for (kind = 0; kind < MAX_KINDS; kind++) {
947 tile_net_pop_all_buffers(small_buffer_stack); 967 if (buffer_stack_vas[kind] != NULL) {
948 if (large_buffer_stack >= 0) 968 tile_net_pop_all_buffers(first_buffer_stack + kind);
949 tile_net_pop_all_buffers(large_buffer_stack); 969 }
970 }
950 971
951 /* Destroy mpipe context so the hardware no longer owns any memory. */ 972 /* Destroy mpipe context so the hardware no longer owns any memory. */
952 gxio_mpipe_destroy(&context); 973 gxio_mpipe_destroy(&context);
@@ -961,15 +982,15 @@ static void tile_net_init_mpipe_fail(void)
961 info->iqueue.idescs = NULL; 982 info->iqueue.idescs = NULL;
962 } 983 }
963 984
964 if (small_buffer_stack_va) 985 for (kind = 0; kind < MAX_KINDS; kind++) {
965 free_pages_exact(small_buffer_stack_va, buffer_stack_size); 986 if (buffer_stack_vas[kind] != NULL) {
966 if (large_buffer_stack_va) 987 free_pages_exact(buffer_stack_vas[kind],
967 free_pages_exact(large_buffer_stack_va, buffer_stack_size); 988 buffer_stack_bytes[kind]);
989 buffer_stack_vas[kind] = NULL;
990 }
991 }
968 992
969 small_buffer_stack_va = NULL; 993 first_buffer_stack = -1;
970 large_buffer_stack_va = NULL;
971 large_buffer_stack = -1;
972 small_buffer_stack = -1;
973 first_bucket = -1; 994 first_bucket = -1;
974} 995}
975 996
@@ -984,7 +1005,7 @@ static void tile_net_init_mpipe_fail(void)
984 */ 1005 */
985static int tile_net_init_mpipe(struct net_device *dev) 1006static int tile_net_init_mpipe(struct net_device *dev)
986{ 1007{
987 int i, num_buffers, rc; 1008 int rc;
988 int cpu; 1009 int cpu;
989 int first_ring, ring; 1010 int first_ring, ring;
990 int network_cpus_count = cpus_weight(network_cpus_map); 1011 int network_cpus_count = cpus_weight(network_cpus_map);
@@ -1001,27 +1022,10 @@ static int tile_net_init_mpipe(struct net_device *dev)
1001 } 1022 }
1002 1023
1003 /* Set up the buffer stacks. */ 1024 /* Set up the buffer stacks. */
1004 num_buffers = 1025 rc = init_buffer_stacks(dev, network_cpus_count);
1005 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1006 rc = init_buffer_stacks(dev, num_buffers);
1007 if (rc != 0) 1026 if (rc != 0)
1008 goto fail; 1027 goto fail;
1009 1028
1010 /* Provide initial buffers. */
1011 rc = -ENOMEM;
1012 for (i = 0; i < num_buffers; i++) {
1013 if (!tile_net_provide_buffer(true)) {
1014 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1015 goto fail;
1016 }
1017 }
1018 for (i = 0; i < num_buffers; i++) {
1019 if (!tile_net_provide_buffer(false)) {
1020 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1021 goto fail;
1022 }
1023 }
1024
1025 /* Allocate one NotifRing for each network cpu. */ 1029 /* Allocate one NotifRing for each network cpu. */
1026 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); 1030 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
1027 if (rc < 0) { 1031 if (rc < 0) {
@@ -1063,13 +1067,13 @@ fail:
1063 */ 1067 */
1064static int tile_net_init_egress(struct net_device *dev, int echannel) 1068static int tile_net_init_egress(struct net_device *dev, int echannel)
1065{ 1069{
1070 static int ering = -1;
1066 struct page *headers_page, *edescs_page, *equeue_page; 1071 struct page *headers_page, *edescs_page, *equeue_page;
1067 gxio_mpipe_edesc_t *edescs; 1072 gxio_mpipe_edesc_t *edescs;
1068 gxio_mpipe_equeue_t *equeue; 1073 gxio_mpipe_equeue_t *equeue;
1069 unsigned char *headers; 1074 unsigned char *headers;
1070 int headers_order, edescs_order, equeue_order; 1075 int headers_order, edescs_order, equeue_order;
1071 size_t edescs_size; 1076 size_t edescs_size;
1072 int edma;
1073 int rc = -ENOMEM; 1077 int rc = -ENOMEM;
1074 1078
1075 /* Only initialize once. */ 1079 /* Only initialize once. */
@@ -1110,25 +1114,37 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1110 } 1114 }
1111 equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); 1115 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1112 1116
1113 /* Allocate an edma ring. Note that in practice this can't 1117 /* Allocate an edma ring (using a one entry "free list"). */
1114 * fail, which is good, because we will leak an edma ring if so. 1118 if (ering < 0) {
1115 */ 1119 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
1116 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); 1120 if (rc < 0) {
1117 if (rc < 0) { 1121 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: %d\n",
1118 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", 1122 rc);
1119 rc); 1123 goto fail_equeue;
1120 goto fail_equeue; 1124 }
1125 ering = rc;
1121 } 1126 }
1122 edma = rc;
1123 1127
1124 /* Initialize the equeue. */ 1128 /* Initialize the equeue. */
1125 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, 1129 rc = gxio_mpipe_equeue_init(equeue, &context, ering, echannel,
1126 edescs, edescs_size, 0); 1130 edescs, edescs_size, 0);
1127 if (rc != 0) { 1131 if (rc != 0) {
1128 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); 1132 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
1129 goto fail_equeue; 1133 goto fail_equeue;
1130 } 1134 }
1131 1135
1136 /* Don't reuse the ering later. */
1137 ering = -1;
1138
1139 if (jumbo_num != 0) {
1140 /* Make sure "jumbo" packets can be egressed safely. */
1141 if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
1142 /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
1143 netdev_warn(dev, "Jumbo packets may not be egressed"
1144 " properly on channel %d\n", echannel);
1145 }
1146 }
1147
1132 /* Done. */ 1148 /* Done. */
1133 egress_for_echannel[echannel].equeue = equeue; 1149 egress_for_echannel[echannel].equeue = equeue;
1134 egress_for_echannel[echannel].headers = headers; 1150 egress_for_echannel[echannel].headers = headers;
@@ -1156,6 +1172,17 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1156 netdev_err(dev, "Failed to open '%s'\n", link_name); 1172 netdev_err(dev, "Failed to open '%s'\n", link_name);
1157 return rc; 1173 return rc;
1158 } 1174 }
1175 if (jumbo_num != 0) {
1176 u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
1177 rc = gxio_mpipe_link_set_attr(link, attr, 1);
1178 if (rc != 0) {
1179 netdev_err(dev,
1180 "Cannot receive jumbo packets on '%s'\n",
1181 link_name);
1182 gxio_mpipe_link_close(link);
1183 return rc;
1184 }
1185 }
1159 rc = gxio_mpipe_link_channel(link); 1186 rc = gxio_mpipe_link_channel(link);
1160 if (rc < 0 || rc >= TILE_NET_CHANNELS) { 1187 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1161 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); 1188 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1499,8 +1526,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1499 edesc_head.xfer_size = sh_len; 1526 edesc_head.xfer_size = sh_len;
1500 1527
1501 /* This is only used to specify the TLB. */ 1528 /* This is only used to specify the TLB. */
1502 edesc_head.stack_idx = large_buffer_stack; 1529 edesc_head.stack_idx = first_buffer_stack;
1503 edesc_body.stack_idx = large_buffer_stack; 1530 edesc_body.stack_idx = first_buffer_stack;
1504 1531
1505 /* Egress all the edescs. */ 1532 /* Egress all the edescs. */
1506 for (segment = 0; segment < sh->gso_segs; segment++) { 1533 for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1660,7 +1687,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1660 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 1687 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1661 1688
1662 /* This is only used to specify the TLB. */ 1689 /* This is only used to specify the TLB. */
1663 edesc.stack_idx = large_buffer_stack; 1690 edesc.stack_idx = first_buffer_stack;
1664 1691
1665 /* Prepare the edescs. */ 1692 /* Prepare the edescs. */
1666 for (i = 0; i < num_edescs; i++) { 1693 for (i = 0; i < num_edescs; i++) {
@@ -1740,7 +1767,9 @@ static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1740/* Change the MTU. */ 1767/* Change the MTU. */
1741static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 1768static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1742{ 1769{
1743 if ((new_mtu < 68) || (new_mtu > 1500)) 1770 if (new_mtu < 68)
1771 return -EINVAL;
1772 if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
1744 return -EINVAL; 1773 return -EINVAL;
1745 dev->mtu = new_mtu; 1774 dev->mtu = new_mtu;
1746 return 0; 1775 return 0;