aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-01 11:36:42 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-01 17:35:50 -0400
commitf3286a3af89d6db7a488f3e8f02b98d67d50f00c (patch)
treeebe37457c3676eb8cb06ba17597134975958845d
parent6ab4ae9aadef65e2f7aca44fd963c302dcb5849e (diff)
tile: support multiple mPIPE shims in tilegx network driver
The initial driver support was for a single mPIPE shim on the chip (as is the case for the Gx36 hardware). The Gx72 chip has two mPIPE shims, so we extend the driver to handle that case. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c18
-rw-r--r--arch/tile/gxio/mpipe.c25
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h4
-rw-r--r--arch/tile/include/gxio/mpipe.h28
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h3
-rw-r--r--drivers/net/ethernet/tile/tilegx.c551
6 files changed, 417 insertions, 212 deletions
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index d0254aa60cba..64883aabeb9c 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -16,6 +16,24 @@
16#include "gxio/iorpc_mpipe_info.h" 16#include "gxio/iorpc_mpipe_info.h"
17 17
18 18
19struct instance_aux_param {
20 _gxio_mpipe_link_name_t name;
21};
22
23int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
24 _gxio_mpipe_link_name_t name)
25{
26 struct instance_aux_param temp;
27 struct instance_aux_param *params = &temp;
28
29 params->name = name;
30
31 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
32 sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
33}
34
35EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
36
19struct enumerate_aux_param { 37struct enumerate_aux_param {
20 _gxio_mpipe_link_name_t name; 38 _gxio_mpipe_link_name_t name;
21 _gxio_mpipe_link_mac_t mac; 39 _gxio_mpipe_link_mac_t mac;
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index 0567cf0cd29e..5301a9ffbae1 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
36 int fd; 36 int fd;
37 int i; 37 int i;
38 38
39 if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
40 return -EINVAL;
41
39 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); 42 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
40 fd = hv_dev_open((HV_VirtAddr) file, 0); 43 fd = hv_dev_open((HV_VirtAddr) file, 0);
44
45 context->fd = fd;
46
41 if (fd < 0) { 47 if (fd < 0) {
42 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) 48 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
43 return fd; 49 return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
45 return -ENODEV; 51 return -ENODEV;
46 } 52 }
47 53
48 context->fd = fd;
49
50 /* Map in the MMIO space. */ 54 /* Map in the MMIO space. */
51 context->mmio_cfg_base = (void __force *) 55 context->mmio_cfg_base = (void __force *)
52 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, 56 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
64 for (i = 0; i < 8; i++) 68 for (i = 0; i < 8; i++)
65 context->__stacks.stacks[i] = 255; 69 context->__stacks.stacks[i] = 255;
66 70
71 context->instance = mpipe_index;
72
67 return 0; 73 return 0;
68 74
69 fast_failed: 75 fast_failed:
70 iounmap((void __force __iomem *)(context->mmio_cfg_base)); 76 iounmap((void __force __iomem *)(context->mmio_cfg_base));
71 cfg_failed: 77 cfg_failed:
72 hv_dev_close(context->fd); 78 hv_dev_close(context->fd);
79 context->fd = -1;
73 return -ENODEV; 80 return -ENODEV;
74} 81}
75 82
@@ -496,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
496 return contextp; 503 return contextp;
497} 504}
498 505
506int gxio_mpipe_link_instance(const char *link_name)
507{
508 _gxio_mpipe_link_name_t name;
509 gxio_mpipe_context_t *context = _gxio_get_link_context();
510
511 if (!context)
512 return GXIO_ERR_NO_DEVICE;
513
514 strncpy(name.name, link_name, sizeof(name.name));
515 name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
516
517 return gxio_mpipe_info_instance_aux(context, name);
518}
519
499int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) 520int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
500{ 521{
501 int rv; 522 int rv;
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 0bcf3f71ce8b..476c5e5ca22c 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -27,11 +27,15 @@
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28 28
29 29
30#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
30#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) 31#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
31#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 32#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
32#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 33#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
33 34
34 35
36int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
37 _gxio_mpipe_link_name_t name);
38
35int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 39int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
36 unsigned int idx, 40 unsigned int idx,
37 _gxio_mpipe_link_name_t * name, 41 _gxio_mpipe_link_name_t * name,
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
index ed742e3f9562..eb7fee41c9b6 100644
--- a/arch/tile/include/gxio/mpipe.h
+++ b/arch/tile/include/gxio/mpipe.h
@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
220 */ 220 */
221typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; 221typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
222 222
223/*
224 * Max # of mpipe instances. 2 currently.
225 */
226#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
227
228#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
229
223/* Get the "va" field from an "idesc". 230/* Get the "va" field from an "idesc".
224 * 231 *
225 * This is the address at which the ingress hardware copied the first 232 * This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
311 /* File descriptor for calling up to Linux (and thus the HV). */ 318 /* File descriptor for calling up to Linux (and thus the HV). */
312 int fd; 319 int fd;
313 320
321 /* Corresponding mpipe instance #. */
322 int instance;
323
314 /* The VA at which configuration registers are mapped. */ 324 /* The VA at which configuration registers are mapped. */
315 char *mmio_cfg_base; 325 char *mmio_cfg_base;
316 326
@@ -1716,6 +1726,24 @@ typedef struct {
1716 uint8_t mac; 1726 uint8_t mac;
1717} gxio_mpipe_link_t; 1727} gxio_mpipe_link_t;
1718 1728
1729/* Translate a link name to the instance number of the mPIPE shim which is
1730 * connected to that link. This call does not verify whether the link is
1731 * currently available, and does not reserve any link resources;
1732 * gxio_mpipe_link_open() must be called to perform those functions.
1733 *
1734 * Typically applications will call this function to translate a link name
1735 * to an mPIPE instance number; call gxio_mpipe_init(), passing it that
1736 * instance number, to initialize the mPIPE shim; and then call
1737 * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
1738 * context, to configure the link.
1739 *
1740 * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
1741 * @return The mPIPE instance number which is associated with the named
1742 * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
1743 * not exist.
1744 */
1745extern int gxio_mpipe_link_instance(const char *link_name);
1746
1719/* Retrieve one of this system's legal link names, and its MAC address. 1747/* Retrieve one of this system's legal link names, and its MAC address.
1720 * 1748 *
1721 * @param index Link name index. If a system supports N legal link names, 1749 * @param index Link name index. If a system supports N legal link names,
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index 6cdae3bf046e..c97e416dd963 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -23,6 +23,9 @@
23#include <arch/mpipe_constants.h> 23#include <arch/mpipe_constants.h>
24 24
25 25
26/** Number of mPIPE instances supported */
27#define HV_MPIPE_INSTANCE_MAX (2)
28
26/** Number of buffer stacks (32). */ 29/** Number of buffer stacks (32). */
27#define HV_MPIPE_NUM_BUFFER_STACKS \ 30#define HV_MPIPE_NUM_BUFFER_STACKS \
28 (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) 31 (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 2b1c31f51b92..b80a91f0561f 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -133,27 +133,31 @@ struct tile_net_tx_wake {
133 133
134/* Info for a specific cpu. */ 134/* Info for a specific cpu. */
135struct tile_net_info { 135struct tile_net_info {
136 /* The NAPI struct. */
137 struct napi_struct napi;
138 /* Packet queue. */
139 gxio_mpipe_iqueue_t iqueue;
140 /* Our cpu. */ 136 /* Our cpu. */
141 int my_cpu; 137 int my_cpu;
142 /* True if iqueue is valid. */
143 bool has_iqueue;
144 /* NAPI flags. */
145 bool napi_added;
146 bool napi_enabled;
147 /* Number of buffers (by kind) which must still be provided. */
148 unsigned int num_needed_buffers[MAX_KINDS];
149 /* A timer for handling egress completions. */ 138 /* A timer for handling egress completions. */
150 struct hrtimer egress_timer; 139 struct hrtimer egress_timer;
151 /* True if "egress_timer" is scheduled. */ 140 /* True if "egress_timer" is scheduled. */
152 bool egress_timer_scheduled; 141 bool egress_timer_scheduled;
153 /* Comps for each egress channel. */ 142 struct info_mpipe {
154 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; 143 /* Packet queue. */
155 /* Transmit wake timer for each egress channel. */ 144 gxio_mpipe_iqueue_t iqueue;
156 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; 145 /* The NAPI struct. */
146 struct napi_struct napi;
147 /* Number of buffers (by kind) which must still be provided. */
148 unsigned int num_needed_buffers[MAX_KINDS];
149 /* instance id. */
150 int instance;
151 /* True if iqueue is valid. */
152 bool has_iqueue;
153 /* NAPI flags. */
154 bool napi_added;
155 bool napi_enabled;
156 /* Comps for each egress channel. */
157 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
158 /* Transmit wake timer for each egress channel. */
159 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
160 } mpipe[NR_MPIPE_MAX];
157}; 161};
158 162
159/* Info for egress on a particular egress channel. */ 163/* Info for egress on a particular egress channel. */
@@ -178,17 +182,54 @@ struct tile_net_priv {
178 int loopify_channel; 182 int loopify_channel;
179 /* The egress channel (channel or loopify_channel). */ 183 /* The egress channel (channel or loopify_channel). */
180 int echannel; 184 int echannel;
185 /* mPIPE instance, 0 or 1. */
186 int instance;
181}; 187};
182 188
183/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ 189static struct mpipe_data {
184static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; 190 /* The ingress irq. */
191 int ingress_irq;
185 192
186/* Devices currently associated with each channel. 193 /* The "context" for all devices. */
187 * NOTE: The array entry can become NULL after ifconfig down, but 194 gxio_mpipe_context_t context;
188 * we do not free the underlying net_device structures, so it is 195
189 * safe to use a pointer after reading it from this array. 196 /* Egress info, indexed by "priv->echannel"
190 */ 197 * (lazily created as needed).
191static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; 198 */
199 struct tile_net_egress
200 egress_for_echannel[TILE_NET_CHANNELS];
201
202 /* Devices currently associated with each channel.
203 * NOTE: The array entry can become NULL after ifconfig down, but
204 * we do not free the underlying net_device structures, so it is
205 * safe to use a pointer after reading it from this array.
206 */
207 struct net_device
208 *tile_net_devs_for_channel[TILE_NET_CHANNELS];
209
210 /* The actual memory allocated for the buffer stacks. */
211 void *buffer_stack_vas[MAX_KINDS];
212
213 /* The amount of memory allocated for each buffer stack. */
214 size_t buffer_stack_bytes[MAX_KINDS];
215
216 /* The first buffer stack index
217 * (small = +0, large = +1, jumbo = +2).
218 */
219 int first_buffer_stack;
220
221 /* The buckets. */
222 int first_bucket;
223 int num_buckets;
224
225} mpipe_data[NR_MPIPE_MAX] = {
226 [0 ... (NR_MPIPE_MAX - 1)] {
227 .ingress_irq = -1,
228 .first_buffer_stack = -1,
229 .first_bucket = -1,
230 .num_buckets = 1
231 }
232};
192 233
193/* A mutex for "tile_net_devs_for_channel". */ 234/* A mutex for "tile_net_devs_for_channel". */
194static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); 235static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -196,8 +237,6 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
196/* The per-cpu info. */ 237/* The per-cpu info. */
197static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); 238static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
198 239
199/* The "context" for all devices. */
200static gxio_mpipe_context_t context;
201 240
202/* The buffer size enums for each buffer stack. 241/* The buffer size enums for each buffer stack.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values. 242 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
@@ -210,22 +249,6 @@ static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
210 GXIO_MPIPE_BUFFER_SIZE_16384 249 GXIO_MPIPE_BUFFER_SIZE_16384
211}; 250};
212 251
213/* The actual memory allocated for the buffer stacks. */
214static void *buffer_stack_vas[MAX_KINDS];
215
216/* The amount of memory allocated for each buffer stack. */
217static size_t buffer_stack_bytes[MAX_KINDS];
218
219/* The first buffer stack index (small = +0, large = +1, jumbo = +2). */
220static int first_buffer_stack = -1;
221
222/* The buckets. */
223static int first_bucket = -1;
224static int num_buckets = 1;
225
226/* The ingress irq. */
227static int ingress_irq = -1;
228
229/* Text value of tile_net.cpus if passed as a module parameter. */ 252/* Text value of tile_net.cpus if passed as a module parameter. */
230static char *network_cpus_string; 253static char *network_cpus_string;
231 254
@@ -241,6 +264,13 @@ static char *custom_str;
241/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ 264/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
242static uint jumbo_num; 265static uint jumbo_num;
243 266
267/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
268static inline int mpipe_instance(struct net_device *dev)
269{
270 struct tile_net_priv *priv = netdev_priv(dev);
271 return priv->instance;
272}
273
244/* The "tile_net.cpus" argument specifies the cpus that are dedicated 274/* The "tile_net.cpus" argument specifies the cpus that are dedicated
245 * to handle ingress packets. 275 * to handle ingress packets.
246 * 276 *
@@ -314,8 +344,9 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
314} 344}
315 345
316/* Allocate and push a buffer. */ 346/* Allocate and push a buffer. */
317static bool tile_net_provide_buffer(int kind) 347static bool tile_net_provide_buffer(int instance, int kind)
318{ 348{
349 struct mpipe_data *md = &mpipe_data[instance];
319 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; 350 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
320 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); 351 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
321 const unsigned long buffer_alignment = 128; 352 const unsigned long buffer_alignment = 128;
@@ -337,7 +368,7 @@ static bool tile_net_provide_buffer(int kind)
337 /* Make sure "skb" and the back-pointer have been flushed. */ 368 /* Make sure "skb" and the back-pointer have been flushed. */
338 wmb(); 369 wmb();
339 370
340 gxio_mpipe_push_buffer(&context, first_buffer_stack + kind, 371 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
341 (void *)va_to_tile_io_addr(skb->data)); 372 (void *)va_to_tile_io_addr(skb->data));
342 373
343 return true; 374 return true;
@@ -363,11 +394,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
363 return skb; 394 return skb;
364} 395}
365 396
366static void tile_net_pop_all_buffers(int stack) 397static void tile_net_pop_all_buffers(int instance, int stack)
367{ 398{
399 struct mpipe_data *md = &mpipe_data[instance];
400
368 for (;;) { 401 for (;;) {
369 tile_io_addr_t addr = 402 tile_io_addr_t addr =
370 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); 403 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
404 stack);
371 if (addr == 0) 405 if (addr == 0)
372 break; 406 break;
373 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); 407 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -378,17 +412,21 @@ static void tile_net_pop_all_buffers(int stack)
378static void tile_net_provide_needed_buffers(void) 412static void tile_net_provide_needed_buffers(void)
379{ 413{
380 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 414 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
381 int kind; 415 int instance, kind;
382 416 for (instance = 0; instance < NR_MPIPE_MAX &&
383 for (kind = 0; kind < MAX_KINDS; kind++) { 417 info->mpipe[instance].has_iqueue; instance++) {
384 while (info->num_needed_buffers[kind] != 0) { 418 for (kind = 0; kind < MAX_KINDS; kind++) {
385 if (!tile_net_provide_buffer(kind)) { 419 while (info->mpipe[instance].num_needed_buffers[kind]
386 /* Add info to the allocation failure dump. */ 420 != 0) {
387 pr_notice("Tile %d still needs some buffers\n", 421 if (!tile_net_provide_buffer(instance, kind)) {
388 info->my_cpu); 422 pr_notice("Tile %d still needs"
389 return; 423 " some buffers\n",
424 info->my_cpu);
425 return;
426 }
427 info->mpipe[instance].
428 num_needed_buffers[kind]--;
390 } 429 }
391 info->num_needed_buffers[kind]--;
392 } 430 }
393 } 431 }
394} 432}
@@ -412,6 +450,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
412 gxio_mpipe_idesc_t *idesc, unsigned long len) 450 gxio_mpipe_idesc_t *idesc, unsigned long len)
413{ 451{
414 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 452 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
453 int instance = mpipe_instance(dev);
415 454
416 /* Encode the actual packet length. */ 455 /* Encode the actual packet length. */
417 skb_put(skb, len); 456 skb_put(skb, len);
@@ -422,7 +461,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
422 if (idesc->cs && idesc->csum_seed_val == 0xFFFF) 461 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
423 skb->ip_summed = CHECKSUM_UNNECESSARY; 462 skb->ip_summed = CHECKSUM_UNNECESSARY;
424 463
425 napi_gro_receive(&info->napi, skb); 464 napi_gro_receive(&info->mpipe[instance].napi, skb);
426 465
427 /* Update stats. */ 466 /* Update stats. */
428 tile_net_stats_add(1, &dev->stats.rx_packets); 467 tile_net_stats_add(1, &dev->stats.rx_packets);
@@ -430,18 +469,19 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
430 469
431 /* Need a new buffer. */ 470 /* Need a new buffer. */
432 if (idesc->size == buffer_size_enums[0]) 471 if (idesc->size == buffer_size_enums[0])
433 info->num_needed_buffers[0]++; 472 info->mpipe[instance].num_needed_buffers[0]++;
434 else if (idesc->size == buffer_size_enums[1]) 473 else if (idesc->size == buffer_size_enums[1])
435 info->num_needed_buffers[1]++; 474 info->mpipe[instance].num_needed_buffers[1]++;
436 else 475 else
437 info->num_needed_buffers[2]++; 476 info->mpipe[instance].num_needed_buffers[2]++;
438} 477}
439 478
440/* Handle a packet. Return true if "processed", false if "filtered". */ 479/* Handle a packet. Return true if "processed", false if "filtered". */
441static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) 480static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
442{ 481{
443 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 482 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
444 struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; 483 struct mpipe_data *md = &mpipe_data[instance];
484 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
445 uint8_t l2_offset; 485 uint8_t l2_offset;
446 void *va; 486 void *va;
447 void *buf; 487 void *buf;
@@ -477,7 +517,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
477 if (dev) 517 if (dev)
478 tile_net_stats_add(1, &dev->stats.rx_dropped); 518 tile_net_stats_add(1, &dev->stats.rx_dropped);
479drop: 519drop:
480 gxio_mpipe_iqueue_drop(&info->iqueue, idesc); 520 gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
481 } else { 521 } else {
482 struct sk_buff *skb = mpipe_buf_to_skb(va); 522 struct sk_buff *skb = mpipe_buf_to_skb(va);
483 523
@@ -487,7 +527,7 @@ drop:
487 tile_net_receive_skb(dev, skb, idesc, len); 527 tile_net_receive_skb(dev, skb, idesc, len);
488 } 528 }
489 529
490 gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 530 gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
491 return !filter; 531 return !filter;
492} 532}
493 533
@@ -508,14 +548,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
508 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 548 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
509 unsigned int work = 0; 549 unsigned int work = 0;
510 gxio_mpipe_idesc_t *idesc; 550 gxio_mpipe_idesc_t *idesc;
511 int i, n; 551 int instance, i, n;
512 552 struct mpipe_data *md;
513 /* Process packets. */ 553 struct info_mpipe *info_mpipe =
514 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { 554 container_of(napi, struct info_mpipe, napi);
555
556 instance = info_mpipe->instance;
557 while ((n = gxio_mpipe_iqueue_try_peek(
558 &info_mpipe->iqueue,
559 &idesc)) > 0) {
515 for (i = 0; i < n; i++) { 560 for (i = 0; i < n; i++) {
516 if (i == TILE_NET_BATCH) 561 if (i == TILE_NET_BATCH)
517 goto done; 562 goto done;
518 if (tile_net_handle_packet(idesc + i)) { 563 if (tile_net_handle_packet(instance,
564 idesc + i)) {
519 if (++work >= budget) 565 if (++work >= budget)
520 goto done; 566 goto done;
521 } 567 }
@@ -523,14 +569,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
523 } 569 }
524 570
525 /* There are no packets left. */ 571 /* There are no packets left. */
526 napi_complete(&info->napi); 572 napi_complete(&info_mpipe->napi);
527 573
574 md = &mpipe_data[instance];
528 /* Re-enable hypervisor interrupts. */ 575 /* Re-enable hypervisor interrupts. */
529 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); 576 gxio_mpipe_enable_notif_ring_interrupt(
577 &md->context, info->mpipe[instance].iqueue.ring);
530 578
531 /* HACK: Avoid the "rotting packet" problem. */ 579 /* HACK: Avoid the "rotting packet" problem. */
532 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) 580 if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
533 napi_schedule(&info->napi); 581 napi_schedule(&info_mpipe->napi);
534 582
535 /* ISSUE: Handle completions? */ 583 /* ISSUE: Handle completions? */
536 584
@@ -540,11 +588,11 @@ done:
540 return work; 588 return work;
541} 589}
542 590
543/* Handle an ingress interrupt on the current cpu. */ 591/* Handle an ingress interrupt from an instance on the current cpu. */
544static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) 592static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
545{ 593{
546 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 594 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
547 napi_schedule(&info->napi); 595 napi_schedule(&info->mpipe[(uint64_t)id].napi);
548 return IRQ_HANDLED; 596 return IRQ_HANDLED;
549} 597}
550 598
@@ -586,7 +634,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
586{ 634{
587 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); 635 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
588 struct tile_net_priv *priv = netdev_priv(dev); 636 struct tile_net_priv *priv = netdev_priv(dev);
589 struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; 637 int instance = priv->instance;
638 struct tile_net_tx_wake *tx_wake =
639 &info->mpipe[instance].tx_wake[priv->echannel];
590 640
591 hrtimer_start(&tx_wake->timer, 641 hrtimer_start(&tx_wake->timer,
592 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 642 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -624,7 +674,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
624 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 674 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
625 unsigned long irqflags; 675 unsigned long irqflags;
626 bool pending = false; 676 bool pending = false;
627 int i; 677 int i, instance;
628 678
629 local_irq_save(irqflags); 679 local_irq_save(irqflags);
630 680
@@ -632,13 +682,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
632 info->egress_timer_scheduled = false; 682 info->egress_timer_scheduled = false;
633 683
634 /* Free all possible comps for this tile. */ 684 /* Free all possible comps for this tile. */
635 for (i = 0; i < TILE_NET_CHANNELS; i++) { 685 for (instance = 0; instance < NR_MPIPE_MAX &&
636 struct tile_net_egress *egress = &egress_for_echannel[i]; 686 info->mpipe[instance].has_iqueue; instance++) {
637 struct tile_net_comps *comps = info->comps_for_echannel[i]; 687 for (i = 0; i < TILE_NET_CHANNELS; i++) {
638 if (comps->comp_last >= comps->comp_next) 688 struct tile_net_egress *egress =
639 continue; 689 &mpipe_data[instance].egress_for_echannel[i];
640 tile_net_free_comps(egress->equeue, comps, -1, true); 690 struct tile_net_comps *comps =
641 pending = pending || (comps->comp_last < comps->comp_next); 691 info->mpipe[instance].comps_for_echannel[i];
692 if (!egress || comps->comp_last >= comps->comp_next)
693 continue;
694 tile_net_free_comps(egress->equeue, comps, -1, true);
695 pending = pending ||
696 (comps->comp_last < comps->comp_next);
697 }
642 } 698 }
643 699
644 /* Reschedule timer if needed. */ 700 /* Reschedule timer if needed. */
@@ -650,13 +706,15 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
650 return HRTIMER_NORESTART; 706 return HRTIMER_NORESTART;
651} 707}
652 708
653/* Helper function for "tile_net_update()". */ 709/* Helper functions for "tile_net_update()". */
654static void manage_ingress_irq(void *enable) 710static void enable_ingress_irq(void *irq)
655{ 711{
656 if (enable) 712 enable_percpu_irq((long)irq, 0);
657 enable_percpu_irq(ingress_irq, 0); 713}
658 else 714
659 disable_percpu_irq(ingress_irq); 715static void disable_ingress_irq(void *irq)
716{
717 disable_percpu_irq((long)irq);
660} 718}
661 719
662/* Helper function for tile_net_open() and tile_net_stop(). 720/* Helper function for tile_net_open() and tile_net_stop().
@@ -666,19 +724,22 @@ static int tile_net_update(struct net_device *dev)
666{ 724{
667 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ 725 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
668 bool saw_channel = false; 726 bool saw_channel = false;
727 int instance = mpipe_instance(dev);
728 struct mpipe_data *md = &mpipe_data[instance];
669 int channel; 729 int channel;
670 int rc; 730 int rc;
671 int cpu; 731 int cpu;
672 732
673 gxio_mpipe_rules_init(&rules, &context); 733 saw_channel = false;
734 gxio_mpipe_rules_init(&rules, &md->context);
674 735
675 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { 736 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
676 if (tile_net_devs_for_channel[channel] == NULL) 737 if (md->tile_net_devs_for_channel[channel] == NULL)
677 continue; 738 continue;
678 if (!saw_channel) { 739 if (!saw_channel) {
679 saw_channel = true; 740 saw_channel = true;
680 gxio_mpipe_rules_begin(&rules, first_bucket, 741 gxio_mpipe_rules_begin(&rules, md->first_bucket,
681 num_buckets, NULL); 742 md->num_buckets, NULL);
682 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); 743 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
683 } 744 }
684 gxio_mpipe_rules_add_channel(&rules, channel); 745 gxio_mpipe_rules_add_channel(&rules, channel);
@@ -689,7 +750,8 @@ static int tile_net_update(struct net_device *dev)
689 */ 750 */
690 rc = gxio_mpipe_rules_commit(&rules); 751 rc = gxio_mpipe_rules_commit(&rules);
691 if (rc != 0) { 752 if (rc != 0) {
692 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); 753 netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
754 instance, rc);
693 return -EIO; 755 return -EIO;
694 } 756 }
695 757
@@ -697,35 +759,38 @@ static int tile_net_update(struct net_device *dev)
697 * We use on_each_cpu to handle the IPI mask or unmask. 759 * We use on_each_cpu to handle the IPI mask or unmask.
698 */ 760 */
699 if (!saw_channel) 761 if (!saw_channel)
700 on_each_cpu(manage_ingress_irq, (void *)0, 1); 762 on_each_cpu(disable_ingress_irq,
763 (void *)(long)(md->ingress_irq), 1);
701 for_each_online_cpu(cpu) { 764 for_each_online_cpu(cpu) {
702 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 765 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
703 if (!info->has_iqueue) 766
767 if (!info->mpipe[instance].has_iqueue)
704 continue; 768 continue;
705 if (saw_channel) { 769 if (saw_channel) {
706 if (!info->napi_added) { 770 if (!info->mpipe[instance].napi_added) {
707 netif_napi_add(dev, &info->napi, 771 netif_napi_add(dev, &info->mpipe[instance].napi,
708 tile_net_poll, TILE_NET_WEIGHT); 772 tile_net_poll, TILE_NET_WEIGHT);
709 info->napi_added = true; 773 info->mpipe[instance].napi_added = true;
710 } 774 }
711 if (!info->napi_enabled) { 775 if (!info->mpipe[instance].napi_enabled) {
712 napi_enable(&info->napi); 776 napi_enable(&info->mpipe[instance].napi);
713 info->napi_enabled = true; 777 info->mpipe[instance].napi_enabled = true;
714 } 778 }
715 } else { 779 } else {
716 if (info->napi_enabled) { 780 if (info->mpipe[instance].napi_enabled) {
717 napi_disable(&info->napi); 781 napi_disable(&info->mpipe[instance].napi);
718 info->napi_enabled = false; 782 info->mpipe[instance].napi_enabled = false;
719 } 783 }
720 /* FIXME: Drain the iqueue. */ 784 /* FIXME: Drain the iqueue. */
721 } 785 }
722 } 786 }
723 if (saw_channel) 787 if (saw_channel)
724 on_each_cpu(manage_ingress_irq, (void *)1, 1); 788 on_each_cpu(enable_ingress_irq,
789 (void *)(long)(md->ingress_irq), 1);
725 790
726 /* HACK: Allow packets to flow in the simulator. */ 791 /* HACK: Allow packets to flow in the simulator. */
727 if (saw_channel) 792 if (saw_channel)
728 sim_enable_mpipe_links(0, -1); 793 sim_enable_mpipe_links(instance, -1);
729 794
730 return 0; 795 return 0;
731} 796}
@@ -735,46 +800,52 @@ static int create_buffer_stack(struct net_device *dev,
735 int kind, size_t num_buffers) 800 int kind, size_t num_buffers)
736{ 801{
737 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 802 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
803 int instance = mpipe_instance(dev);
804 struct mpipe_data *md = &mpipe_data[instance];
738 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); 805 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
739 int stack_idx = first_buffer_stack + kind; 806 int stack_idx = md->first_buffer_stack + kind;
740 void *va; 807 void *va;
741 int i, rc; 808 int i, rc;
742 809
743 /* Round up to 64KB and then use alloc_pages() so we get the 810 /* Round up to 64KB and then use alloc_pages() so we get the
744 * required 64KB alignment. 811 * required 64KB alignment.
745 */ 812 */
746 buffer_stack_bytes[kind] = ALIGN(needed, 64 * 1024); 813 md->buffer_stack_bytes[kind] =
814 ALIGN(needed, 64 * 1024);
747 815
748 va = alloc_pages_exact(buffer_stack_bytes[kind], GFP_KERNEL); 816 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
749 if (va == NULL) { 817 if (va == NULL) {
750 netdev_err(dev, 818 netdev_err(dev,
751 "Could not alloc %zd bytes for buffer stack %d\n", 819 "Could not alloc %zd bytes for buffer stack %d\n",
752 buffer_stack_bytes[kind], kind); 820 md->buffer_stack_bytes[kind], kind);
753 return -ENOMEM; 821 return -ENOMEM;
754 } 822 }
755 823
756 /* Initialize the buffer stack. */ 824 /* Initialize the buffer stack. */
757 rc = gxio_mpipe_init_buffer_stack(&context, stack_idx, 825 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
758 buffer_size_enums[kind], 826 buffer_size_enums[kind], va,
759 va, buffer_stack_bytes[kind], 0); 827 md->buffer_stack_bytes[kind], 0);
760 if (rc != 0) { 828 if (rc != 0) {
761 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); 829 netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
762 free_pages_exact(va, buffer_stack_bytes[kind]); 830 instance, rc);
831 free_pages_exact(va, md->buffer_stack_bytes[kind]);
763 return rc; 832 return rc;
764 } 833 }
765 834
766 buffer_stack_vas[kind] = va; 835 md->buffer_stack_vas[kind] = va;
767 836
768 rc = gxio_mpipe_register_client_memory(&context, stack_idx, 837 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
769 hash_pte, 0); 838 hash_pte, 0);
770 if (rc != 0) { 839 if (rc != 0) {
771 netdev_err(dev, "gxio_mpipe_register_client_memory: %d\n", rc); 840 netdev_err(dev,
841 "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
842 instance, rc);
772 return rc; 843 return rc;
773 } 844 }
774 845
775 /* Provide initial buffers. */ 846 /* Provide initial buffers. */
776 for (i = 0; i < num_buffers; i++) { 847 for (i = 0; i < num_buffers; i++) {
777 if (!tile_net_provide_buffer(kind)) { 848 if (!tile_net_provide_buffer(instance, kind)) {
778 netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); 849 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
779 return -ENOMEM; 850 return -ENOMEM;
780 } 851 }
@@ -793,14 +864,18 @@ static int init_buffer_stacks(struct net_device *dev,
793 int num_kinds = MAX_KINDS - (jumbo_num == 0); 864 int num_kinds = MAX_KINDS - (jumbo_num == 0);
794 size_t num_buffers; 865 size_t num_buffers;
795 int rc; 866 int rc;
867 int instance = mpipe_instance(dev);
868 struct mpipe_data *md = &mpipe_data[instance];
796 869
797 /* Allocate the buffer stacks. */ 870 /* Allocate the buffer stacks. */
798 rc = gxio_mpipe_alloc_buffer_stacks(&context, num_kinds, 0, 0); 871 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
799 if (rc < 0) { 872 if (rc < 0) {
800 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks: %d\n", rc); 873 netdev_err(dev,
874 "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
875 instance, rc);
801 return rc; 876 return rc;
802 } 877 }
803 first_buffer_stack = rc; 878 md->first_buffer_stack = rc;
804 879
805 /* Enough small/large buffers to (normally) avoid buffer errors. */ 880 /* Enough small/large buffers to (normally) avoid buffer errors. */
806 num_buffers = 881 num_buffers =
@@ -829,6 +904,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
829{ 904{
830 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 905 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
831 int order, i, rc; 906 int order, i, rc;
907 int instance = mpipe_instance(dev);
908 struct mpipe_data *md = &mpipe_data[instance];
832 struct page *page; 909 struct page *page;
833 void *addr; 910 void *addr;
834 911
@@ -843,7 +920,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
843 addr = pfn_to_kaddr(page_to_pfn(page)); 920 addr = pfn_to_kaddr(page_to_pfn(page));
844 memset(addr, 0, COMPS_SIZE); 921 memset(addr, 0, COMPS_SIZE);
845 for (i = 0; i < TILE_NET_CHANNELS; i++) 922 for (i = 0; i < TILE_NET_CHANNELS; i++)
846 info->comps_for_echannel[i] = 923 info->mpipe[instance].comps_for_echannel[i] =
847 addr + i * sizeof(struct tile_net_comps); 924 addr + i * sizeof(struct tile_net_comps);
848 925
849 /* If this is a network cpu, create an iqueue. */ 926 /* If this is a network cpu, create an iqueue. */
@@ -857,14 +934,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
857 return -ENOMEM; 934 return -ENOMEM;
858 } 935 }
859 addr = pfn_to_kaddr(page_to_pfn(page)); 936 addr = pfn_to_kaddr(page_to_pfn(page));
860 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, 937 rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
861 addr, NOTIF_RING_SIZE, 0); 938 &md->context, ring++, addr,
939 NOTIF_RING_SIZE, 0);
862 if (rc < 0) { 940 if (rc < 0) {
863 netdev_err(dev, 941 netdev_err(dev,
864 "gxio_mpipe_iqueue_init failed: %d\n", rc); 942 "gxio_mpipe_iqueue_init failed: %d\n", rc);
865 return rc; 943 return rc;
866 } 944 }
867 info->has_iqueue = true; 945 info->mpipe[instance].has_iqueue = true;
868 } 946 }
869 947
870 return ring; 948 return ring;
@@ -877,40 +955,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
877 int ring, int network_cpus_count) 955 int ring, int network_cpus_count)
878{ 956{
879 int group, rc; 957 int group, rc;
958 int instance = mpipe_instance(dev);
959 struct mpipe_data *md = &mpipe_data[instance];
880 960
881 /* Allocate one NotifGroup. */ 961 /* Allocate one NotifGroup. */
882 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); 962 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
883 if (rc < 0) { 963 if (rc < 0) {
884 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", 964 netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
885 rc); 965 instance, rc);
886 return rc; 966 return rc;
887 } 967 }
888 group = rc; 968 group = rc;
889 969
890 /* Initialize global num_buckets value. */ 970 /* Initialize global num_buckets value. */
891 if (network_cpus_count > 4) 971 if (network_cpus_count > 4)
892 num_buckets = 256; 972 md->num_buckets = 256;
893 else if (network_cpus_count > 1) 973 else if (network_cpus_count > 1)
894 num_buckets = 16; 974 md->num_buckets = 16;
895 975
896 /* Allocate some buckets, and set global first_bucket value. */ 976 /* Allocate some buckets, and set global first_bucket value. */
897 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); 977 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
898 if (rc < 0) { 978 if (rc < 0) {
899 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); 979 netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
980 instance, rc);
900 return rc; 981 return rc;
901 } 982 }
902 first_bucket = rc; 983 md->first_bucket = rc;
903 984
904 /* Init group and buckets. */ 985 /* Init group and buckets. */
905 rc = gxio_mpipe_init_notif_group_and_buckets( 986 rc = gxio_mpipe_init_notif_group_and_buckets(
906 &context, group, ring, network_cpus_count, 987 &md->context, group, ring, network_cpus_count,
907 first_bucket, num_buckets, 988 md->first_bucket, md->num_buckets,
908 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); 989 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
909 if (rc != 0) { 990 if (rc != 0) {
910 netdev_err( 991 netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
911 dev, 992 "mpipe[%d] %d\n", instance, rc);
912 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
913 rc);
914 return rc; 993 return rc;
915 } 994 }
916 995
@@ -924,30 +1003,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
924 */ 1003 */
925static int tile_net_setup_interrupts(struct net_device *dev) 1004static int tile_net_setup_interrupts(struct net_device *dev)
926{ 1005{
927 int cpu, rc; 1006 int cpu, rc, irq;
1007 int instance = mpipe_instance(dev);
1008 struct mpipe_data *md = &mpipe_data[instance];
1009
1010 irq = md->ingress_irq;
1011 if (irq < 0) {
1012 irq = create_irq();
1013 if (irq < 0) {
1014 netdev_err(dev,
1015 "create_irq failed: mpipe[%d] %d\n",
1016 instance, irq);
1017 return irq;
1018 }
1019 tile_irq_activate(irq, TILE_IRQ_PERCPU);
928 1020
929 rc = create_irq(); 1021 rc = request_irq(irq, tile_net_handle_ingress_irq,
930 if (rc < 0) { 1022 0, "tile_net", (void *)((uint64_t)instance));
931 netdev_err(dev, "create_irq failed: %d\n", rc); 1023
932 return rc; 1024 if (rc != 0) {
933 } 1025 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
934 ingress_irq = rc; 1026 instance, rc);
935 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 1027 destroy_irq(irq);
936 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 1028 return rc;
937 0, "tile_net", NULL); 1029 }
938 if (rc != 0) { 1030 md->ingress_irq = irq;
939 netdev_err(dev, "request_irq failed: %d\n", rc);
940 destroy_irq(ingress_irq);
941 ingress_irq = -1;
942 return rc;
943 } 1031 }
944 1032
945 for_each_online_cpu(cpu) { 1033 for_each_online_cpu(cpu) {
946 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1034 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
947 if (info->has_iqueue) { 1035 if (info->mpipe[instance].has_iqueue) {
948 gxio_mpipe_request_notif_ring_interrupt( 1036 gxio_mpipe_request_notif_ring_interrupt(&md->context,
949 &context, cpu_x(cpu), cpu_y(cpu), 1037 cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
950 KERNEL_PL, ingress_irq, info->iqueue.ring); 1038 info->mpipe[instance].iqueue.ring);
951 } 1039 }
952 } 1040 }
953 1041
@@ -955,40 +1043,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
955} 1043}
956 1044
957/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 1045/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
958static void tile_net_init_mpipe_fail(void) 1046static void tile_net_init_mpipe_fail(int instance)
959{ 1047{
960 int kind, cpu; 1048 int kind, cpu;
1049 struct mpipe_data *md = &mpipe_data[instance];
961 1050
962 /* Do cleanups that require the mpipe context first. */ 1051 /* Do cleanups that require the mpipe context first. */
963 for (kind = 0; kind < MAX_KINDS; kind++) { 1052 for (kind = 0; kind < MAX_KINDS; kind++) {
964 if (buffer_stack_vas[kind] != NULL) { 1053 if (md->buffer_stack_vas[kind] != NULL) {
965 tile_net_pop_all_buffers(first_buffer_stack + kind); 1054 tile_net_pop_all_buffers(instance,
1055 md->first_buffer_stack +
1056 kind);
966 } 1057 }
967 } 1058 }
968 1059
969 /* Destroy mpipe context so the hardware no longer owns any memory. */ 1060 /* Destroy mpipe context so the hardware no longer owns any memory. */
970 gxio_mpipe_destroy(&context); 1061 gxio_mpipe_destroy(&md->context);
971 1062
972 for_each_online_cpu(cpu) { 1063 for_each_online_cpu(cpu) {
973 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1064 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
974 free_pages((unsigned long)(info->comps_for_echannel[0]), 1065 free_pages(
975 get_order(COMPS_SIZE)); 1066 (unsigned long)(
976 info->comps_for_echannel[0] = NULL; 1067 info->mpipe[instance].comps_for_echannel[0]),
977 free_pages((unsigned long)(info->iqueue.idescs), 1068 get_order(COMPS_SIZE));
1069 info->mpipe[instance].comps_for_echannel[0] = NULL;
1070 free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
978 get_order(NOTIF_RING_SIZE)); 1071 get_order(NOTIF_RING_SIZE));
979 info->iqueue.idescs = NULL; 1072 info->mpipe[instance].iqueue.idescs = NULL;
980 } 1073 }
981 1074
982 for (kind = 0; kind < MAX_KINDS; kind++) { 1075 for (kind = 0; kind < MAX_KINDS; kind++) {
983 if (buffer_stack_vas[kind] != NULL) { 1076 if (md->buffer_stack_vas[kind] != NULL) {
984 free_pages_exact(buffer_stack_vas[kind], 1077 free_pages_exact(md->buffer_stack_vas[kind],
985 buffer_stack_bytes[kind]); 1078 md->buffer_stack_bytes[kind]);
986 buffer_stack_vas[kind] = NULL; 1079 md->buffer_stack_vas[kind] = NULL;
987 } 1080 }
988 } 1081 }
989 1082
990 first_buffer_stack = -1; 1083 md->first_buffer_stack = -1;
991 first_bucket = -1; 1084 md->first_bucket = -1;
992} 1085}
993 1086
994/* The first time any tilegx network device is opened, we initialize 1087/* The first time any tilegx network device is opened, we initialize
@@ -1005,6 +1098,8 @@ static int tile_net_init_mpipe(struct net_device *dev)
1005 int rc; 1098 int rc;
1006 int cpu; 1099 int cpu;
1007 int first_ring, ring; 1100 int first_ring, ring;
1101 int instance = mpipe_instance(dev);
1102 struct mpipe_data *md = &mpipe_data[instance];
1008 int network_cpus_count = cpus_weight(network_cpus_map); 1103 int network_cpus_count = cpus_weight(network_cpus_map);
1009 1104
1010 if (!hash_default) { 1105 if (!hash_default) {
@@ -1012,9 +1107,10 @@ static int tile_net_init_mpipe(struct net_device *dev)
1012 return -EIO; 1107 return -EIO;
1013 } 1108 }
1014 1109
1015 rc = gxio_mpipe_init(&context, 0); 1110 rc = gxio_mpipe_init(&md->context, instance);
1016 if (rc != 0) { 1111 if (rc != 0) {
1017 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); 1112 netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
1113 instance, rc);
1018 return -EIO; 1114 return -EIO;
1019 } 1115 }
1020 1116
@@ -1024,7 +1120,8 @@ static int tile_net_init_mpipe(struct net_device *dev)
1024 goto fail; 1120 goto fail;
1025 1121
1026 /* Allocate one NotifRing for each network cpu. */ 1122 /* Allocate one NotifRing for each network cpu. */
1027 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); 1123 rc = gxio_mpipe_alloc_notif_rings(&md->context,
1124 network_cpus_count, 0, 0);
1028 if (rc < 0) { 1125 if (rc < 0) {
1029 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", 1126 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1030 rc); 1127 rc);
@@ -1054,7 +1151,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
1054 return 0; 1151 return 0;
1055 1152
1056fail: 1153fail:
1057 tile_net_init_mpipe_fail(); 1154 tile_net_init_mpipe_fail(instance);
1058 return rc; 1155 return rc;
1059} 1156}
1060 1157
@@ -1072,9 +1169,11 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1072 int headers_order, edescs_order, equeue_order; 1169 int headers_order, edescs_order, equeue_order;
1073 size_t edescs_size; 1170 size_t edescs_size;
1074 int rc = -ENOMEM; 1171 int rc = -ENOMEM;
1172 int instance = mpipe_instance(dev);
1173 struct mpipe_data *md = &mpipe_data[instance];
1075 1174
1076 /* Only initialize once. */ 1175 /* Only initialize once. */
1077 if (egress_for_echannel[echannel].equeue != NULL) 1176 if (md->egress_for_echannel[echannel].equeue != NULL)
1078 return 0; 1177 return 0;
1079 1178
1080 /* Allocate memory for the "headers". */ 1179 /* Allocate memory for the "headers". */
@@ -1113,20 +1212,21 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1113 1212
1114 /* Allocate an edma ring (using a one entry "free list"). */ 1213 /* Allocate an edma ring (using a one entry "free list"). */
1115 if (ering < 0) { 1214 if (ering < 0) {
1116 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); 1215 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
1117 if (rc < 0) { 1216 if (rc < 0) {
1118 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: %d\n", 1217 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
1119 rc); 1218 "mpipe[%d] %d\n", instance, rc);
1120 goto fail_equeue; 1219 goto fail_equeue;
1121 } 1220 }
1122 ering = rc; 1221 ering = rc;
1123 } 1222 }
1124 1223
1125 /* Initialize the equeue. */ 1224 /* Initialize the equeue. */
1126 rc = gxio_mpipe_equeue_init(equeue, &context, ering, echannel, 1225 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
1127 edescs, edescs_size, 0); 1226 edescs, edescs_size, 0);
1128 if (rc != 0) { 1227 if (rc != 0) {
1129 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); 1228 netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
1229 instance, rc);
1130 goto fail_equeue; 1230 goto fail_equeue;
1131 } 1231 }
1132 1232
@@ -1143,8 +1243,8 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
1143 } 1243 }
1144 1244
1145 /* Done. */ 1245 /* Done. */
1146 egress_for_echannel[echannel].equeue = equeue; 1246 md->egress_for_echannel[echannel].equeue = equeue;
1147 egress_for_echannel[echannel].headers = headers; 1247 md->egress_for_echannel[echannel].headers = headers;
1148 return 0; 1248 return 0;
1149 1249
1150fail_equeue: 1250fail_equeue:
@@ -1164,9 +1264,12 @@ fail:
1164static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, 1264static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1165 const char *link_name) 1265 const char *link_name)
1166{ 1266{
1167 int rc = gxio_mpipe_link_open(link, &context, link_name, 0); 1267 int instance = mpipe_instance(dev);
1268 struct mpipe_data *md = &mpipe_data[instance];
1269 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
1168 if (rc < 0) { 1270 if (rc < 0) {
1169 netdev_err(dev, "Failed to open '%s'\n", link_name); 1271 netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
1272 link_name, instance, rc);
1170 return rc; 1273 return rc;
1171 } 1274 }
1172 if (jumbo_num != 0) { 1275 if (jumbo_num != 0) {
@@ -1193,12 +1296,21 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1193static int tile_net_open(struct net_device *dev) 1296static int tile_net_open(struct net_device *dev)
1194{ 1297{
1195 struct tile_net_priv *priv = netdev_priv(dev); 1298 struct tile_net_priv *priv = netdev_priv(dev);
1196 int cpu, rc; 1299 int cpu, rc, instance;
1197 1300
1198 mutex_lock(&tile_net_devs_for_channel_mutex); 1301 mutex_lock(&tile_net_devs_for_channel_mutex);
1199 1302
1200 /* Do one-time initialization the first time any device is opened. */ 1303 /* Get the instance info. */
1201 if (ingress_irq < 0) { 1304 rc = gxio_mpipe_link_instance(dev->name);
1305 if (rc < 0 || rc >= NR_MPIPE_MAX)
1306 return -EIO;
1307
1308 priv->instance = rc;
1309 instance = rc;
1310 if (!mpipe_data[rc].context.mmio_fast_base) {
1311 /* Do one-time initialization per instance the first time
1312 * any device is opened.
1313 */
1202 rc = tile_net_init_mpipe(dev); 1314 rc = tile_net_init_mpipe(dev);
1203 if (rc != 0) 1315 if (rc != 0)
1204 goto fail; 1316 goto fail;
@@ -1229,7 +1341,7 @@ static int tile_net_open(struct net_device *dev)
1229 if (rc != 0) 1341 if (rc != 0)
1230 goto fail; 1342 goto fail;
1231 1343
1232 tile_net_devs_for_channel[priv->channel] = dev; 1344 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
1233 1345
1234 rc = tile_net_update(dev); 1346 rc = tile_net_update(dev);
1235 if (rc != 0) 1347 if (rc != 0)
@@ -1241,7 +1353,7 @@ static int tile_net_open(struct net_device *dev)
1241 for_each_online_cpu(cpu) { 1353 for_each_online_cpu(cpu) {
1242 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1354 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1243 struct tile_net_tx_wake *tx_wake = 1355 struct tile_net_tx_wake *tx_wake =
1244 &info->tx_wake[priv->echannel]; 1356 &info->mpipe[instance].tx_wake[priv->echannel];
1245 1357
1246 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, 1358 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1247 HRTIMER_MODE_REL); 1359 HRTIMER_MODE_REL);
@@ -1267,7 +1379,7 @@ fail:
1267 priv->channel = -1; 1379 priv->channel = -1;
1268 } 1380 }
1269 priv->echannel = -1; 1381 priv->echannel = -1;
1270 tile_net_devs_for_channel[priv->channel] = NULL; 1382 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
1271 mutex_unlock(&tile_net_devs_for_channel_mutex); 1383 mutex_unlock(&tile_net_devs_for_channel_mutex);
1272 1384
1273 /* Don't return raw gxio error codes to generic Linux. */ 1385 /* Don't return raw gxio error codes to generic Linux. */
@@ -1279,18 +1391,20 @@ static int tile_net_stop(struct net_device *dev)
1279{ 1391{
1280 struct tile_net_priv *priv = netdev_priv(dev); 1392 struct tile_net_priv *priv = netdev_priv(dev);
1281 int cpu; 1393 int cpu;
1394 int instance = priv->instance;
1395 struct mpipe_data *md = &mpipe_data[instance];
1282 1396
1283 for_each_online_cpu(cpu) { 1397 for_each_online_cpu(cpu) {
1284 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1398 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1285 struct tile_net_tx_wake *tx_wake = 1399 struct tile_net_tx_wake *tx_wake =
1286 &info->tx_wake[priv->echannel]; 1400 &info->mpipe[instance].tx_wake[priv->echannel];
1287 1401
1288 hrtimer_cancel(&tx_wake->timer); 1402 hrtimer_cancel(&tx_wake->timer);
1289 netif_stop_subqueue(dev, cpu); 1403 netif_stop_subqueue(dev, cpu);
1290 } 1404 }
1291 1405
1292 mutex_lock(&tile_net_devs_for_channel_mutex); 1406 mutex_lock(&tile_net_devs_for_channel_mutex);
1293 tile_net_devs_for_channel[priv->channel] = NULL; 1407 md->tile_net_devs_for_channel[priv->channel] = NULL;
1294 (void)tile_net_update(dev); 1408 (void)tile_net_update(dev);
1295 if (priv->loopify_channel >= 0) { 1409 if (priv->loopify_channel >= 0) {
1296 if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1410 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1500,6 +1614,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1500 struct sk_buff *skb, unsigned char *headers, s64 slot) 1614 struct sk_buff *skb, unsigned char *headers, s64 slot)
1501{ 1615{
1502 struct skb_shared_info *sh = skb_shinfo(skb); 1616 struct skb_shared_info *sh = skb_shinfo(skb);
1617 int instance = mpipe_instance(dev);
1618 struct mpipe_data *md = &mpipe_data[instance];
1503 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1619 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1504 unsigned int data_len = skb->len - sh_len; 1620 unsigned int data_len = skb->len - sh_len;
1505 unsigned int p_len = sh->gso_size; 1621 unsigned int p_len = sh->gso_size;
@@ -1522,8 +1638,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1522 edesc_head.xfer_size = sh_len; 1638 edesc_head.xfer_size = sh_len;
1523 1639
1524 /* This is only used to specify the TLB. */ 1640 /* This is only used to specify the TLB. */
1525 edesc_head.stack_idx = first_buffer_stack; 1641 edesc_head.stack_idx = md->first_buffer_stack;
1526 edesc_body.stack_idx = first_buffer_stack; 1642 edesc_body.stack_idx = md->first_buffer_stack;
1527 1643
1528 /* Egress all the edescs. */ 1644 /* Egress all the edescs. */
1529 for (segment = 0; segment < sh->gso_segs; segment++) { 1645 for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1598,8 +1714,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1598 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1714 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1599 struct tile_net_priv *priv = netdev_priv(dev); 1715 struct tile_net_priv *priv = netdev_priv(dev);
1600 int channel = priv->echannel; 1716 int channel = priv->echannel;
1601 struct tile_net_egress *egress = &egress_for_echannel[channel]; 1717 int instance = priv->instance;
1602 struct tile_net_comps *comps = info->comps_for_echannel[channel]; 1718 struct mpipe_data *md = &mpipe_data[instance];
1719 struct tile_net_egress *egress = &md->egress_for_echannel[channel];
1720 struct tile_net_comps *comps =
1721 info->mpipe[instance].comps_for_echannel[channel];
1603 gxio_mpipe_equeue_t *equeue = egress->equeue; 1722 gxio_mpipe_equeue_t *equeue = egress->equeue;
1604 unsigned long irqflags; 1723 unsigned long irqflags;
1605 int num_edescs; 1724 int num_edescs;
@@ -1663,10 +1782,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1663{ 1782{
1664 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1783 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1665 struct tile_net_priv *priv = netdev_priv(dev); 1784 struct tile_net_priv *priv = netdev_priv(dev);
1666 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; 1785 int instance = priv->instance;
1786 struct mpipe_data *md = &mpipe_data[instance];
1787 struct tile_net_egress *egress =
1788 &md->egress_for_echannel[priv->echannel];
1667 gxio_mpipe_equeue_t *equeue = egress->equeue; 1789 gxio_mpipe_equeue_t *equeue = egress->equeue;
1668 struct tile_net_comps *comps = 1790 struct tile_net_comps *comps =
1669 info->comps_for_echannel[priv->echannel]; 1791 info->mpipe[instance].comps_for_echannel[priv->echannel];
1670 unsigned int len = skb->len; 1792 unsigned int len = skb->len;
1671 unsigned char *data = skb->data; 1793 unsigned char *data = skb->data;
1672 unsigned int num_edescs; 1794 unsigned int num_edescs;
@@ -1683,7 +1805,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1683 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 1805 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1684 1806
1685 /* This is only used to specify the TLB. */ 1807 /* This is only used to specify the TLB. */
1686 edesc.stack_idx = first_buffer_stack; 1808 edesc.stack_idx = md->first_buffer_stack;
1687 1809
1688 /* Prepare the edescs. */ 1810 /* Prepare the edescs. */
1689 for (i = 0; i < num_edescs; i++) { 1811 for (i = 0; i < num_edescs; i++) {
@@ -1790,9 +1912,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
1790 */ 1912 */
1791static void tile_net_netpoll(struct net_device *dev) 1913static void tile_net_netpoll(struct net_device *dev)
1792{ 1914{
1793 disable_percpu_irq(ingress_irq); 1915 int instance = mpipe_instance(dev);
1794 tile_net_handle_ingress_irq(ingress_irq, NULL); 1916 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1795 enable_percpu_irq(ingress_irq, 0); 1917 struct mpipe_data *md = &mpipe_data[instance];
1918
1919 disable_percpu_irq(md->ingress_irq);
1920 napi_schedule(&info->mpipe[instance].napi);
1921 enable_percpu_irq(md->ingress_irq, 0);
1796} 1922}
1797#endif 1923#endif
1798 1924
@@ -1895,9 +2021,12 @@ static void tile_net_init_module_percpu(void *unused)
1895{ 2021{
1896 struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 2022 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1897 int my_cpu = smp_processor_id(); 2023 int my_cpu = smp_processor_id();
2024 int instance;
1898 2025
1899 info->has_iqueue = false; 2026 for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
1900 2027 info->mpipe[instance].has_iqueue = false;
2028 info->mpipe[instance].instance = instance;
2029 }
1901 info->my_cpu = my_cpu; 2030 info->my_cpu = my_cpu;
1902 2031
1903 /* Initialize the egress timer. */ 2032 /* Initialize the egress timer. */
@@ -1914,6 +2043,8 @@ static int __init tile_net_init_module(void)
1914 2043
1915 pr_info("Tilera Network Driver\n"); 2044 pr_info("Tilera Network Driver\n");
1916 2045
2046 BUILD_BUG_ON(NR_MPIPE_MAX != 2);
2047
1917 mutex_init(&tile_net_devs_for_channel_mutex); 2048 mutex_init(&tile_net_devs_for_channel_mutex);
1918 2049
1919 /* Initialize each CPU. */ 2050 /* Initialize each CPU. */