diff options
Diffstat (limited to 'drivers/net/niu.c')
| -rw-r--r-- | drivers/net/niu.c | 203 |
1 files changed, 183 insertions, 20 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 918f802fe089..8ee7d7bb951b 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
| @@ -3236,10 +3236,14 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | |||
| 3236 | 3236 | ||
| 3237 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | 3237 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
| 3238 | { | 3238 | { |
| 3239 | struct netdev_queue *txq; | ||
| 3239 | u16 pkt_cnt, tmp; | 3240 | u16 pkt_cnt, tmp; |
| 3240 | int cons; | 3241 | int cons, index; |
| 3241 | u64 cs; | 3242 | u64 cs; |
| 3242 | 3243 | ||
| 3244 | index = (rp - np->tx_rings); | ||
| 3245 | txq = netdev_get_tx_queue(np->dev, index); | ||
| 3246 | |||
| 3243 | cs = rp->tx_cs; | 3247 | cs = rp->tx_cs; |
| 3244 | if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) | 3248 | if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) |
| 3245 | goto out; | 3249 | goto out; |
| @@ -3262,13 +3266,13 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | |||
| 3262 | smp_mb(); | 3266 | smp_mb(); |
| 3263 | 3267 | ||
| 3264 | out: | 3268 | out: |
| 3265 | if (unlikely(netif_queue_stopped(np->dev) && | 3269 | if (unlikely(netif_tx_queue_stopped(txq) && |
| 3266 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | 3270 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
| 3267 | netif_tx_lock(np->dev); | 3271 | __netif_tx_lock(txq, smp_processor_id()); |
| 3268 | if (netif_queue_stopped(np->dev) && | 3272 | if (netif_tx_queue_stopped(txq) && |
| 3269 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) | 3273 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) |
| 3270 | netif_wake_queue(np->dev); | 3274 | netif_tx_wake_queue(txq); |
| 3271 | netif_tx_unlock(np->dev); | 3275 | __netif_tx_unlock(txq); |
| 3272 | } | 3276 | } |
| 3273 | } | 3277 | } |
| 3274 | 3278 | ||
| @@ -4061,6 +4065,8 @@ static int niu_alloc_channels(struct niu *np) | |||
| 4061 | np->num_rx_rings = parent->rxchan_per_port[port]; | 4065 | np->num_rx_rings = parent->rxchan_per_port[port]; |
| 4062 | np->num_tx_rings = parent->txchan_per_port[port]; | 4066 | np->num_tx_rings = parent->txchan_per_port[port]; |
| 4063 | 4067 | ||
| 4068 | np->dev->real_num_tx_queues = np->num_tx_rings; | ||
| 4069 | |||
| 4064 | np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), | 4070 | np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), |
| 4065 | GFP_KERNEL); | 4071 | GFP_KERNEL); |
| 4066 | err = -ENOMEM; | 4072 | err = -ENOMEM; |
| @@ -5686,7 +5692,7 @@ static int niu_open(struct net_device *dev) | |||
| 5686 | goto out_free_irq; | 5692 | goto out_free_irq; |
| 5687 | } | 5693 | } |
| 5688 | 5694 | ||
| 5689 | netif_start_queue(dev); | 5695 | netif_tx_start_all_queues(dev); |
| 5690 | 5696 | ||
| 5691 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) | 5697 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) |
| 5692 | netif_carrier_on(dev); | 5698 | netif_carrier_on(dev); |
| @@ -5710,7 +5716,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev) | |||
| 5710 | cancel_work_sync(&np->reset_task); | 5716 | cancel_work_sync(&np->reset_task); |
| 5711 | 5717 | ||
| 5712 | niu_disable_napi(np); | 5718 | niu_disable_napi(np); |
| 5713 | netif_stop_queue(dev); | 5719 | netif_tx_stop_all_queues(dev); |
| 5714 | 5720 | ||
| 5715 | del_timer_sync(&np->timer); | 5721 | del_timer_sync(&np->timer); |
| 5716 | 5722 | ||
| @@ -5971,7 +5977,7 @@ static void niu_netif_start(struct niu *np) | |||
| 5971 | * so long as all callers are assured to have free tx slots | 5977 | * so long as all callers are assured to have free tx slots |
| 5972 | * (such as after niu_init_hw). | 5978 | * (such as after niu_init_hw). |
| 5973 | */ | 5979 | */ |
| 5974 | netif_wake_queue(np->dev); | 5980 | netif_tx_wake_all_queues(np->dev); |
| 5975 | 5981 | ||
| 5976 | niu_enable_napi(np); | 5982 | niu_enable_napi(np); |
| 5977 | 5983 | ||
| @@ -6097,15 +6103,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, | |||
| 6097 | return ret; | 6103 | return ret; |
| 6098 | } | 6104 | } |
| 6099 | 6105 | ||
| 6100 | static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb) | ||
| 6101 | { | ||
| 6102 | return &np->tx_rings[0]; | ||
| 6103 | } | ||
| 6104 | |||
| 6105 | static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) | 6106 | static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 6106 | { | 6107 | { |
| 6107 | struct niu *np = netdev_priv(dev); | 6108 | struct niu *np = netdev_priv(dev); |
| 6108 | unsigned long align, headroom; | 6109 | unsigned long align, headroom; |
| 6110 | struct netdev_queue *txq; | ||
| 6109 | struct tx_ring_info *rp; | 6111 | struct tx_ring_info *rp; |
| 6110 | struct tx_pkt_hdr *tp; | 6112 | struct tx_pkt_hdr *tp; |
| 6111 | unsigned int len, nfg; | 6113 | unsigned int len, nfg; |
| @@ -6113,10 +6115,12 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 6113 | int prod, i, tlen; | 6115 | int prod, i, tlen; |
| 6114 | u64 mapping, mrk; | 6116 | u64 mapping, mrk; |
| 6115 | 6117 | ||
| 6116 | rp = tx_ring_select(np, skb); | 6118 | i = skb_get_queue_mapping(skb); |
| 6119 | rp = &np->tx_rings[i]; | ||
| 6120 | txq = netdev_get_tx_queue(dev, i); | ||
| 6117 | 6121 | ||
| 6118 | if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { | 6122 | if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
| 6119 | netif_stop_queue(dev); | 6123 | netif_tx_stop_queue(txq); |
| 6120 | dev_err(np->device, PFX "%s: BUG! Tx ring full when " | 6124 | dev_err(np->device, PFX "%s: BUG! Tx ring full when " |
| 6121 | "queue awake!\n", dev->name); | 6125 | "queue awake!\n", dev->name); |
| 6122 | rp->tx_errors++; | 6126 | rp->tx_errors++; |
| @@ -6215,9 +6219,9 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 6215 | nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); | 6219 | nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); |
| 6216 | 6220 | ||
| 6217 | if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { | 6221 | if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { |
| 6218 | netif_stop_queue(dev); | 6222 | netif_tx_stop_queue(txq); |
| 6219 | if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) | 6223 | if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) |
| 6220 | netif_wake_queue(dev); | 6224 | netif_tx_wake_queue(txq); |
| 6221 | } | 6225 | } |
| 6222 | 6226 | ||
| 6223 | dev->trans_start = jiffies; | 6227 | dev->trans_start = jiffies; |
| @@ -6275,7 +6279,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu) | |||
| 6275 | spin_unlock_irq(&np->lock); | 6279 | spin_unlock_irq(&np->lock); |
| 6276 | 6280 | ||
| 6277 | if (!err) { | 6281 | if (!err) { |
| 6278 | netif_start_queue(dev); | 6282 | netif_tx_start_all_queues(dev); |
| 6279 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) | 6283 | if (np->link_config.loopback_mode != LOOPBACK_DISABLED) |
| 6280 | netif_carrier_on(dev); | 6284 | netif_carrier_on(dev); |
| 6281 | 6285 | ||
| @@ -6385,6 +6389,162 @@ static int niu_get_eeprom(struct net_device *dev, | |||
| 6385 | return 0; | 6389 | return 0; |
| 6386 | } | 6390 | } |
| 6387 | 6391 | ||
| 6392 | static int niu_ethflow_to_class(int flow_type, u64 *class) | ||
| 6393 | { | ||
| 6394 | switch (flow_type) { | ||
| 6395 | case TCP_V4_FLOW: | ||
| 6396 | *class = CLASS_CODE_TCP_IPV4; | ||
| 6397 | break; | ||
| 6398 | case UDP_V4_FLOW: | ||
| 6399 | *class = CLASS_CODE_UDP_IPV4; | ||
| 6400 | break; | ||
| 6401 | case AH_ESP_V4_FLOW: | ||
| 6402 | *class = CLASS_CODE_AH_ESP_IPV4; | ||
| 6403 | break; | ||
| 6404 | case SCTP_V4_FLOW: | ||
| 6405 | *class = CLASS_CODE_SCTP_IPV4; | ||
| 6406 | break; | ||
| 6407 | case TCP_V6_FLOW: | ||
| 6408 | *class = CLASS_CODE_TCP_IPV6; | ||
| 6409 | break; | ||
| 6410 | case UDP_V6_FLOW: | ||
| 6411 | *class = CLASS_CODE_UDP_IPV6; | ||
| 6412 | break; | ||
| 6413 | case AH_ESP_V6_FLOW: | ||
| 6414 | *class = CLASS_CODE_AH_ESP_IPV6; | ||
| 6415 | break; | ||
| 6416 | case SCTP_V6_FLOW: | ||
| 6417 | *class = CLASS_CODE_SCTP_IPV6; | ||
| 6418 | break; | ||
| 6419 | default: | ||
| 6420 | return -1; | ||
| 6421 | } | ||
| 6422 | |||
| 6423 | return 1; | ||
| 6424 | } | ||
| 6425 | |||
| 6426 | static u64 niu_flowkey_to_ethflow(u64 flow_key) | ||
| 6427 | { | ||
| 6428 | u64 ethflow = 0; | ||
| 6429 | |||
| 6430 | if (flow_key & FLOW_KEY_PORT) | ||
| 6431 | ethflow |= RXH_DEV_PORT; | ||
| 6432 | if (flow_key & FLOW_KEY_L2DA) | ||
| 6433 | ethflow |= RXH_L2DA; | ||
| 6434 | if (flow_key & FLOW_KEY_VLAN) | ||
| 6435 | ethflow |= RXH_VLAN; | ||
| 6436 | if (flow_key & FLOW_KEY_IPSA) | ||
| 6437 | ethflow |= RXH_IP_SRC; | ||
| 6438 | if (flow_key & FLOW_KEY_IPDA) | ||
| 6439 | ethflow |= RXH_IP_DST; | ||
| 6440 | if (flow_key & FLOW_KEY_PROTO) | ||
| 6441 | ethflow |= RXH_L3_PROTO; | ||
| 6442 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) | ||
| 6443 | ethflow |= RXH_L4_B_0_1; | ||
| 6444 | if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) | ||
| 6445 | ethflow |= RXH_L4_B_2_3; | ||
| 6446 | |||
| 6447 | return ethflow; | ||
| 6448 | |||
| 6449 | } | ||
| 6450 | |||
| 6451 | static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) | ||
| 6452 | { | ||
| 6453 | u64 key = 0; | ||
| 6454 | |||
| 6455 | if (ethflow & RXH_DEV_PORT) | ||
| 6456 | key |= FLOW_KEY_PORT; | ||
| 6457 | if (ethflow & RXH_L2DA) | ||
| 6458 | key |= FLOW_KEY_L2DA; | ||
| 6459 | if (ethflow & RXH_VLAN) | ||
| 6460 | key |= FLOW_KEY_VLAN; | ||
| 6461 | if (ethflow & RXH_IP_SRC) | ||
| 6462 | key |= FLOW_KEY_IPSA; | ||
| 6463 | if (ethflow & RXH_IP_DST) | ||
| 6464 | key |= FLOW_KEY_IPDA; | ||
| 6465 | if (ethflow & RXH_L3_PROTO) | ||
| 6466 | key |= FLOW_KEY_PROTO; | ||
| 6467 | if (ethflow & RXH_L4_B_0_1) | ||
| 6468 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); | ||
| 6469 | if (ethflow & RXH_L4_B_2_3) | ||
| 6470 | key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); | ||
| 6471 | |||
| 6472 | *flow_key = key; | ||
| 6473 | |||
| 6474 | return 1; | ||
| 6475 | |||
| 6476 | } | ||
| 6477 | |||
| 6478 | static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) | ||
| 6479 | { | ||
| 6480 | struct niu *np = netdev_priv(dev); | ||
| 6481 | u64 class; | ||
| 6482 | |||
| 6483 | cmd->data = 0; | ||
| 6484 | |||
| 6485 | if (!niu_ethflow_to_class(cmd->flow_type, &class)) | ||
| 6486 | return -EINVAL; | ||
| 6487 | |||
| 6488 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & | ||
| 6489 | TCAM_KEY_DISC) | ||
| 6490 | cmd->data = RXH_DISCARD; | ||
| 6491 | else | ||
| 6492 | |||
| 6493 | cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - | ||
| 6494 | CLASS_CODE_USER_PROG1]); | ||
| 6495 | return 0; | ||
| 6496 | } | ||
| 6497 | |||
| 6498 | static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) | ||
| 6499 | { | ||
| 6500 | struct niu *np = netdev_priv(dev); | ||
| 6501 | u64 class; | ||
| 6502 | u64 flow_key = 0; | ||
| 6503 | unsigned long flags; | ||
| 6504 | |||
| 6505 | if (!niu_ethflow_to_class(cmd->flow_type, &class)) | ||
| 6506 | return -EINVAL; | ||
| 6507 | |||
| 6508 | if (class < CLASS_CODE_USER_PROG1 || | ||
| 6509 | class > CLASS_CODE_SCTP_IPV6) | ||
| 6510 | return -EINVAL; | ||
| 6511 | |||
| 6512 | if (cmd->data & RXH_DISCARD) { | ||
| 6513 | niu_lock_parent(np, flags); | ||
| 6514 | flow_key = np->parent->tcam_key[class - | ||
| 6515 | CLASS_CODE_USER_PROG1]; | ||
| 6516 | flow_key |= TCAM_KEY_DISC; | ||
| 6517 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); | ||
| 6518 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; | ||
| 6519 | niu_unlock_parent(np, flags); | ||
| 6520 | return 0; | ||
| 6521 | } else { | ||
| 6522 | /* Discard was set before, but is not set now */ | ||
| 6523 | if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & | ||
| 6524 | TCAM_KEY_DISC) { | ||
| 6525 | niu_lock_parent(np, flags); | ||
| 6526 | flow_key = np->parent->tcam_key[class - | ||
| 6527 | CLASS_CODE_USER_PROG1]; | ||
| 6528 | flow_key &= ~TCAM_KEY_DISC; | ||
| 6529 | nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), | ||
| 6530 | flow_key); | ||
| 6531 | np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = | ||
| 6532 | flow_key; | ||
| 6533 | niu_unlock_parent(np, flags); | ||
| 6534 | } | ||
| 6535 | } | ||
| 6536 | |||
| 6537 | if (!niu_ethflow_to_flowkey(cmd->data, &flow_key)) | ||
| 6538 | return -EINVAL; | ||
| 6539 | |||
| 6540 | niu_lock_parent(np, flags); | ||
| 6541 | nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); | ||
| 6542 | np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; | ||
| 6543 | niu_unlock_parent(np, flags); | ||
| 6544 | |||
| 6545 | return 0; | ||
| 6546 | } | ||
| 6547 | |||
| 6388 | static const struct { | 6548 | static const struct { |
| 6389 | const char string[ETH_GSTRING_LEN]; | 6549 | const char string[ETH_GSTRING_LEN]; |
| 6390 | } niu_xmac_stat_keys[] = { | 6550 | } niu_xmac_stat_keys[] = { |
| @@ -6615,6 +6775,8 @@ static const struct ethtool_ops niu_ethtool_ops = { | |||
| 6615 | .get_stats_count = niu_get_stats_count, | 6775 | .get_stats_count = niu_get_stats_count, |
| 6616 | .get_ethtool_stats = niu_get_ethtool_stats, | 6776 | .get_ethtool_stats = niu_get_ethtool_stats, |
| 6617 | .phys_id = niu_phys_id, | 6777 | .phys_id = niu_phys_id, |
| 6778 | .get_rxhash = niu_get_hash_opts, | ||
| 6779 | .set_rxhash = niu_set_hash_opts, | ||
| 6618 | }; | 6780 | }; |
| 6619 | 6781 | ||
| 6620 | static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, | 6782 | static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, |
| @@ -8374,9 +8536,10 @@ static struct net_device * __devinit niu_alloc_and_init( | |||
| 8374 | struct of_device *op, const struct niu_ops *ops, | 8536 | struct of_device *op, const struct niu_ops *ops, |
| 8375 | u8 port) | 8537 | u8 port) |
| 8376 | { | 8538 | { |
| 8377 | struct net_device *dev = alloc_etherdev(sizeof(struct niu)); | 8539 | struct net_device *dev; |
| 8378 | struct niu *np; | 8540 | struct niu *np; |
| 8379 | 8541 | ||
| 8542 | dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); | ||
| 8380 | if (!dev) { | 8543 | if (!dev) { |
| 8381 | dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); | 8544 | dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); |
| 8382 | return NULL; | 8545 | return NULL; |
