aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2011-10-14 01:30:59 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-17 19:00:54 -0400
commitb95644685d530de5e9f9658bd8087e50840b831d (patch)
tree42e51c1e3fbfd52063972c61691b62fb6f3197f0 /drivers/net/ethernet/ibm
parent3f7947b9f069c125ffdedc75ac9c4e3101fc2c6a (diff)
ehea: Update multiqueue support
The ehea driver had some multiqueue support but was missing the last few years of networking stack improvements: - Use skb_record_rx_queue to record which queue an skb came in on. - Remove the driver specific netif_queue lock and use the networking stack transmit lock instead. - Remove the driver specific transmit queue hashing and use skb_get_queue_mapping instead. - Use netif_tx_{start|stop|wake}_queue where appropriate. We can also remove pr->queue_stopped and just check the queue status directly. - Print all 16 queues in the ethtool stats. We now enable multiqueue by default since it is a clear win on all my testing so far. v3: [cascardo] fixed use_mcs parameter description [cascardo] set ehea_ethtool_stats_keys as const Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c17
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c92
3 files changed, 49 insertions, 62 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 5b5c1b5ce31f..e247927139ba 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -375,8 +375,6 @@ struct ehea_port_res {
375 struct ehea_q_skb_arr rq3_skba; 375 struct ehea_q_skb_arr rq3_skba;
376 struct ehea_q_skb_arr sq_skba; 376 struct ehea_q_skb_arr sq_skba;
377 int sq_skba_size; 377 int sq_skba_size;
378 spinlock_t netif_queue;
379 int queue_stopped;
380 int swqe_refill_th; 378 int swqe_refill_th;
381 atomic_t swqe_avail; 379 atomic_t swqe_avail;
382 int swqe_ll_count; 380 int swqe_ll_count;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 7f642aef5e82..d185016c79ef 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -180,7 +180,7 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value)
180 port->msg_enable = value; 180 port->msg_enable = value;
181} 181}
182 182
183static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { 183static const char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
184 {"sig_comp_iv"}, 184 {"sig_comp_iv"},
185 {"swqe_refill_th"}, 185 {"swqe_refill_th"},
186 {"port resets"}, 186 {"port resets"},
@@ -189,7 +189,6 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
189 {"IP cksum errors"}, 189 {"IP cksum errors"},
190 {"Frame cksum errors"}, 190 {"Frame cksum errors"},
191 {"num SQ stopped"}, 191 {"num SQ stopped"},
192 {"SQ stopped"},
193 {"PR0 free_swqes"}, 192 {"PR0 free_swqes"},
194 {"PR1 free_swqes"}, 193 {"PR1 free_swqes"},
195 {"PR2 free_swqes"}, 194 {"PR2 free_swqes"},
@@ -198,6 +197,14 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
198 {"PR5 free_swqes"}, 197 {"PR5 free_swqes"},
199 {"PR6 free_swqes"}, 198 {"PR6 free_swqes"},
200 {"PR7 free_swqes"}, 199 {"PR7 free_swqes"},
200 {"PR8 free_swqes"},
201 {"PR9 free_swqes"},
202 {"PR10 free_swqes"},
203 {"PR11 free_swqes"},
204 {"PR12 free_swqes"},
205 {"PR13 free_swqes"},
206 {"PR14 free_swqes"},
207 {"PR15 free_swqes"},
201 {"LRO aggregated"}, 208 {"LRO aggregated"},
202 {"LRO flushed"}, 209 {"LRO flushed"},
203 {"LRO no_desc"}, 210 {"LRO no_desc"},
@@ -255,11 +262,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
255 tmp += port->port_res[k].p_stats.queue_stopped; 262 tmp += port->port_res[k].p_stats.queue_stopped;
256 data[i++] = tmp; 263 data[i++] = tmp;
257 264
258 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) 265 for (k = 0; k < 16; k++)
259 tmp |= port->port_res[k].queue_stopped;
260 data[i++] = tmp;
261
262 for (k = 0; k < 8; k++)
263 data[i++] = atomic_read(&port->port_res[k].swqe_avail); 266 data[i++] = atomic_read(&port->port_res[k].swqe_avail);
264 267
265 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) 268 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ce9a67032724..a6c4192e12f4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -61,7 +61,7 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 61static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 62static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63static int sq_entries = EHEA_DEF_ENTRIES_SQ; 63static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64static int use_mcs; 64static int use_mcs = 1;
65static int use_lro; 65static int use_lro;
66static int lro_max_aggr = EHEA_LRO_MAX_AGGR; 66static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
67static int num_tx_qps = EHEA_NUM_TX_QP; 67static int num_tx_qps = EHEA_NUM_TX_QP;
@@ -94,7 +94,8 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
94MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 94MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
95 "[2^x - 1], x = [6..14]. Default = " 95 "[2^x - 1], x = [6..14]. Default = "
96 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 96 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
97MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); 97MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
98 "Default = 1");
98 99
99MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " 100MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
100 __MODULE_STRING(EHEA_LRO_MAX_AGGR)); 101 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
@@ -551,7 +552,8 @@ static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
551} 552}
552 553
553static inline void ehea_fill_skb(struct net_device *dev, 554static inline void ehea_fill_skb(struct net_device *dev,
554 struct sk_buff *skb, struct ehea_cqe *cqe) 555 struct sk_buff *skb, struct ehea_cqe *cqe,
556 struct ehea_port_res *pr)
555{ 557{
556 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 558 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
557 559
@@ -565,6 +567,8 @@ static inline void ehea_fill_skb(struct net_device *dev,
565 skb->csum = csum_unfold(~cqe->inet_checksum_value); 567 skb->csum = csum_unfold(~cqe->inet_checksum_value);
566 } else 568 } else
567 skb->ip_summed = CHECKSUM_UNNECESSARY; 569 skb->ip_summed = CHECKSUM_UNNECESSARY;
570
571 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
568} 572}
569 573
570static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, 574static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -750,7 +754,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
750 } 754 }
751 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 755 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
752 cqe->num_bytes_transfered - 4); 756 cqe->num_bytes_transfered - 4);
753 ehea_fill_skb(dev, skb, cqe); 757 ehea_fill_skb(dev, skb, cqe, pr);
754 } else if (rq == 2) { 758 } else if (rq == 2) {
755 /* RQ2 */ 759 /* RQ2 */
756 skb = get_skb_by_index(skb_arr_rq2, 760 skb = get_skb_by_index(skb_arr_rq2,
@@ -760,7 +764,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
760 "rq2: skb=NULL\n"); 764 "rq2: skb=NULL\n");
761 break; 765 break;
762 } 766 }
763 ehea_fill_skb(dev, skb, cqe); 767 ehea_fill_skb(dev, skb, cqe, pr);
764 processed_rq2++; 768 processed_rq2++;
765 } else { 769 } else {
766 /* RQ3 */ 770 /* RQ3 */
@@ -771,7 +775,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
771 "rq3: skb=NULL\n"); 775 "rq3: skb=NULL\n");
772 break; 776 break;
773 } 777 }
774 ehea_fill_skb(dev, skb, cqe); 778 ehea_fill_skb(dev, skb, cqe, pr);
775 processed_rq3++; 779 processed_rq3++;
776 } 780 }
777 781
@@ -857,7 +861,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
857 int cqe_counter = 0; 861 int cqe_counter = 0;
858 int swqe_av = 0; 862 int swqe_av = 0;
859 int index; 863 int index;
860 unsigned long flags; 864 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
865 pr - &pr->port->port_res[0]);
861 866
862 cqe = ehea_poll_cq(send_cq); 867 cqe = ehea_poll_cq(send_cq);
863 while (cqe && (quota > 0)) { 868 while (cqe && (quota > 0)) {
@@ -907,14 +912,15 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
907 ehea_update_feca(send_cq, cqe_counter); 912 ehea_update_feca(send_cq, cqe_counter);
908 atomic_add(swqe_av, &pr->swqe_avail); 913 atomic_add(swqe_av, &pr->swqe_avail);
909 914
910 spin_lock_irqsave(&pr->netif_queue, flags); 915 if (unlikely(netif_tx_queue_stopped(txq) &&
911 916 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
912 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) 917 __netif_tx_lock(txq, smp_processor_id());
913 >= pr->swqe_refill_th)) { 918 if (netif_tx_queue_stopped(txq) &&
914 netif_wake_queue(pr->port->netdev); 919 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
915 pr->queue_stopped = 0; 920 netif_tx_wake_queue(txq);
921 __netif_tx_unlock(txq);
916 } 922 }
917 spin_unlock_irqrestore(&pr->netif_queue, flags); 923
918 wake_up(&pr->port->swqe_avail_wq); 924 wake_up(&pr->port->swqe_avail_wq);
919 925
920 return cqe; 926 return cqe;
@@ -1251,7 +1257,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1251 netif_info(port, link, dev, 1257 netif_info(port, link, dev,
1252 "Logical port down\n"); 1258 "Logical port down\n");
1253 netif_carrier_off(dev); 1259 netif_carrier_off(dev);
1254 netif_stop_queue(dev); 1260 netif_tx_disable(dev);
1255 } 1261 }
1256 1262
1257 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1263 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
@@ -1282,7 +1288,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1282 case EHEA_EC_PORT_MALFUNC: 1288 case EHEA_EC_PORT_MALFUNC:
1283 netdev_info(dev, "Port malfunction\n"); 1289 netdev_info(dev, "Port malfunction\n");
1284 netif_carrier_off(dev); 1290 netif_carrier_off(dev);
1285 netif_stop_queue(dev); 1291 netif_tx_disable(dev);
1286 break; 1292 break;
1287 default: 1293 default:
1288 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); 1294 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
@@ -1534,7 +1540,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1534 pr->rx_packets = rx_packets; 1540 pr->rx_packets = rx_packets;
1535 1541
1536 pr->port = port; 1542 pr->port = port;
1537 spin_lock_init(&pr->netif_queue);
1538 1543
1539 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1544 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1540 if (!pr->eq) { 1545 if (!pr->eq) {
@@ -2226,35 +2231,17 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2226 dev_kfree_skb(skb); 2231 dev_kfree_skb(skb);
2227} 2232}
2228 2233
2229static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2230{
2231 struct tcphdr *tcp;
2232 u32 tmp;
2233
2234 if ((skb->protocol == htons(ETH_P_IP)) &&
2235 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2236 tcp = (struct tcphdr *)(skb_network_header(skb) +
2237 (ip_hdr(skb)->ihl * 4));
2238 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2239 tmp += ip_hdr(skb)->daddr % 31;
2240 return tmp % num_qps;
2241 } else
2242 return 0;
2243}
2244
2245static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 2234static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2246{ 2235{
2247 struct ehea_port *port = netdev_priv(dev); 2236 struct ehea_port *port = netdev_priv(dev);
2248 struct ehea_swqe *swqe; 2237 struct ehea_swqe *swqe;
2249 unsigned long flags;
2250 u32 lkey; 2238 u32 lkey;
2251 int swqe_index; 2239 int swqe_index;
2252 struct ehea_port_res *pr; 2240 struct ehea_port_res *pr;
2241 struct netdev_queue *txq;
2253 2242
2254 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; 2243 pr = &port->port_res[skb_get_queue_mapping(skb)];
2255 2244 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2256 if (pr->queue_stopped)
2257 return NETDEV_TX_BUSY;
2258 2245
2259 swqe = ehea_get_swqe(pr->qp, &swqe_index); 2246 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2260 memset(swqe, 0, SWQE_HEADER_SIZE); 2247 memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -2304,20 +2291,15 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2304 ehea_dump(swqe, 512, "swqe"); 2291 ehea_dump(swqe, 512, "swqe");
2305 2292
2306 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2293 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2307 netif_stop_queue(dev); 2294 netif_tx_stop_queue(txq);
2308 swqe->tx_control |= EHEA_SWQE_PURGE; 2295 swqe->tx_control |= EHEA_SWQE_PURGE;
2309 } 2296 }
2310 2297
2311 ehea_post_swqe(pr->qp, swqe); 2298 ehea_post_swqe(pr->qp, swqe);
2312 2299
2313 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2300 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2314 spin_lock_irqsave(&pr->netif_queue, flags); 2301 pr->p_stats.queue_stopped++;
2315 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2302 netif_tx_stop_queue(txq);
2316 pr->p_stats.queue_stopped++;
2317 netif_stop_queue(dev);
2318 pr->queue_stopped = 1;
2319 }
2320 spin_unlock_irqrestore(&pr->netif_queue, flags);
2321 } 2303 }
2322 2304
2323 return NETDEV_TX_OK; 2305 return NETDEV_TX_OK;
@@ -2642,7 +2624,7 @@ static int ehea_open(struct net_device *dev)
2642 ret = ehea_up(dev); 2624 ret = ehea_up(dev);
2643 if (!ret) { 2625 if (!ret) {
2644 port_napi_enable(port); 2626 port_napi_enable(port);
2645 netif_start_queue(dev); 2627 netif_tx_start_all_queues(dev);
2646 } 2628 }
2647 2629
2648 mutex_unlock(&port->port_lock); 2630 mutex_unlock(&port->port_lock);
@@ -2688,7 +2670,7 @@ static int ehea_stop(struct net_device *dev)
2688 cancel_work_sync(&port->reset_task); 2670 cancel_work_sync(&port->reset_task);
2689 cancel_delayed_work_sync(&port->stats_work); 2671 cancel_delayed_work_sync(&port->stats_work);
2690 mutex_lock(&port->port_lock); 2672 mutex_lock(&port->port_lock);
2691 netif_stop_queue(dev); 2673 netif_tx_stop_all_queues(dev);
2692 port_napi_disable(port); 2674 port_napi_disable(port);
2693 ret = ehea_down(dev); 2675 ret = ehea_down(dev);
2694 mutex_unlock(&port->port_lock); 2676 mutex_unlock(&port->port_lock);
@@ -2912,7 +2894,7 @@ static void ehea_reset_port(struct work_struct *work)
2912 mutex_lock(&dlpar_mem_lock); 2894 mutex_lock(&dlpar_mem_lock);
2913 port->resets++; 2895 port->resets++;
2914 mutex_lock(&port->port_lock); 2896 mutex_lock(&port->port_lock);
2915 netif_stop_queue(dev); 2897 netif_tx_disable(dev);
2916 2898
2917 port_napi_disable(port); 2899 port_napi_disable(port);
2918 2900
@@ -2928,7 +2910,7 @@ static void ehea_reset_port(struct work_struct *work)
2928 2910
2929 port_napi_enable(port); 2911 port_napi_enable(port);
2930 2912
2931 netif_wake_queue(dev); 2913 netif_tx_wake_all_queues(dev);
2932out: 2914out:
2933 mutex_unlock(&port->port_lock); 2915 mutex_unlock(&port->port_lock);
2934 mutex_unlock(&dlpar_mem_lock); 2916 mutex_unlock(&dlpar_mem_lock);
@@ -2955,7 +2937,7 @@ static void ehea_rereg_mrs(void)
2955 2937
2956 if (dev->flags & IFF_UP) { 2938 if (dev->flags & IFF_UP) {
2957 mutex_lock(&port->port_lock); 2939 mutex_lock(&port->port_lock);
2958 netif_stop_queue(dev); 2940 netif_tx_disable(dev);
2959 ehea_flush_sq(port); 2941 ehea_flush_sq(port);
2960 ret = ehea_stop_qps(dev); 2942 ret = ehea_stop_qps(dev);
2961 if (ret) { 2943 if (ret) {
@@ -3000,7 +2982,7 @@ static void ehea_rereg_mrs(void)
3000 if (!ret) { 2982 if (!ret) {
3001 check_sqs(port); 2983 check_sqs(port);
3002 port_napi_enable(port); 2984 port_napi_enable(port);
3003 netif_wake_queue(dev); 2985 netif_tx_wake_all_queues(dev);
3004 } else { 2986 } else {
3005 netdev_err(dev, "Unable to restart QPS\n"); 2987 netdev_err(dev, "Unable to restart QPS\n");
3006 } 2988 }
@@ -3176,7 +3158,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3176 int jumbo; 3158 int jumbo;
3177 3159
3178 /* allocate memory for the port structures */ 3160 /* allocate memory for the port structures */
3179 dev = alloc_etherdev(sizeof(struct ehea_port)); 3161 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
3180 3162
3181 if (!dev) { 3163 if (!dev) {
3182 pr_err("no mem for net_device\n"); 3164 pr_err("no mem for net_device\n");
@@ -3208,6 +3190,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3208 if (ret) 3190 if (ret)
3209 goto out_free_mc_list; 3191 goto out_free_mc_list;
3210 3192
3193 netif_set_real_num_rx_queues(dev, port->num_def_qps);
3194 netif_set_real_num_tx_queues(dev, port->num_def_qps +
3195 port->num_add_tx_qps);
3196
3211 port_dev = ehea_register_port(port, dn); 3197 port_dev = ehea_register_port(port, dn);
3212 if (!port_dev) 3198 if (!port_dev)
3213 goto out_free_mc_list; 3199 goto out_free_mc_list;