summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
5 files changed, 1054 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 000000000000..ce1317cdabd7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/ip.h>
34#include <linux/ipv6.h>
35#include <linux/tcp.h>
36#include "en.h"
37
38static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
39 struct mlx5e_rx_wqe *wqe, u16 ix)
40{
41 struct sk_buff *skb;
42 dma_addr_t dma_addr;
43
44 skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
45 if (unlikely(!skb))
46 return -ENOMEM;
47
48 skb_reserve(skb, MLX5E_NET_IP_ALIGN);
49
50 dma_addr = dma_map_single(rq->pdev,
51 /* hw start padding */
52 skb->data - MLX5E_NET_IP_ALIGN,
53 /* hw end padding */
54 rq->wqe_sz,
55 DMA_FROM_DEVICE);
56
57 if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
58 goto err_free_skb;
59
60 *((dma_addr_t *)skb->cb) = dma_addr;
61 wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
62
63 rq->skb[ix] = skb;
64
65 return 0;
66
67err_free_skb:
68 dev_kfree_skb(skb);
69
70 return -ENOMEM;
71}
72
73bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
74{
75 struct mlx5_wq_ll *wq = &rq->wq;
76
77 if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
78 return false;
79
80 while (!mlx5_wq_ll_is_full(wq)) {
81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
82
83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
84 break;
85
86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
87 }
88
89 /* ensure wqes are visible to device before updating doorbell record */
90 dma_wmb();
91
92 mlx5_wq_ll_update_db_record(wq);
93
94 return !mlx5_wq_ll_is_full(wq);
95}
96
97static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
98{
99 struct ethhdr *eth = (struct ethhdr *)(skb->data);
100 struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
101 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
102 struct tcphdr *tcp;
103
104 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
105 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
106 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
107
108 u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
109
110 if (eth->h_proto == htons(ETH_P_IP)) {
111 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
112 sizeof(struct iphdr));
113 ipv6 = NULL;
114 } else {
115 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
116 sizeof(struct ipv6hdr));
117 ipv4 = NULL;
118 }
119
120 if (get_cqe_lro_tcppsh(cqe))
121 tcp->psh = 1;
122
123 if (tcp_ack) {
124 tcp->ack = 1;
125 tcp->ack_seq = cqe->lro_ack_seq_num;
126 tcp->window = cqe->lro_tcp_win;
127 }
128
129 if (ipv4) {
130 ipv4->ttl = cqe->lro_min_ttl;
131 ipv4->tot_len = cpu_to_be16(tot_len);
132 ipv4->check = 0;
133 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
134 ipv4->ihl);
135 } else {
136 ipv6->hop_limit = cqe->lro_min_ttl;
137 ipv6->payload_len = cpu_to_be16(tot_len -
138 sizeof(struct ipv6hdr));
139 }
140}
141
142static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
143 struct sk_buff *skb)
144{
145 u8 cht = cqe->rss_hash_type;
146 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
147 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
148 PKT_HASH_TYPE_NONE;
149 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
150}
151
152static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
153 struct mlx5e_rq *rq,
154 struct sk_buff *skb)
155{
156 struct net_device *netdev = rq->netdev;
157 u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
158 int lro_num_seg;
159
160 skb_put(skb, cqe_bcnt);
161
162 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
163 if (lro_num_seg > 1) {
164 mlx5e_lro_update_hdr(skb, cqe);
165 skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
166 rq->stats.lro_packets++;
167 rq->stats.lro_bytes += cqe_bcnt;
168 }
169
170 if (likely(netdev->features & NETIF_F_RXCSUM) &&
171 (cqe->hds_ip_ext & CQE_L2_OK) &&
172 (cqe->hds_ip_ext & CQE_L3_OK) &&
173 (cqe->hds_ip_ext & CQE_L4_OK)) {
174 skb->ip_summed = CHECKSUM_UNNECESSARY;
175 } else {
176 skb->ip_summed = CHECKSUM_NONE;
177 rq->stats.csum_none++;
178 }
179
180 skb->protocol = eth_type_trans(skb, netdev);
181
182 skb_record_rx_queue(skb, rq->ix);
183
184 if (likely(netdev->features & NETIF_F_RXHASH))
185 mlx5e_skb_set_hash(cqe, skb);
186
187 if (cqe_has_vlan(cqe))
188 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
189 be16_to_cpu(cqe->vlan_info));
190}
191
192bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
193{
194 struct mlx5e_rq *rq = cq->sqrq;
195 int i;
196
197 /* avoid accessing cq (dma coherent memory) if not needed */
198 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
199 return false;
200
201 for (i = 0; i < budget; i++) {
202 struct mlx5e_rx_wqe *wqe;
203 struct mlx5_cqe64 *cqe;
204 struct sk_buff *skb;
205 __be16 wqe_counter_be;
206 u16 wqe_counter;
207
208 cqe = mlx5e_get_cqe(cq);
209 if (!cqe)
210 break;
211
212 wqe_counter_be = cqe->wqe_counter;
213 wqe_counter = be16_to_cpu(wqe_counter_be);
214 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
215 skb = rq->skb[wqe_counter];
216 rq->skb[wqe_counter] = NULL;
217
218 dma_unmap_single(rq->pdev,
219 *((dma_addr_t *)skb->cb),
220 skb_end_offset(skb),
221 DMA_FROM_DEVICE);
222
223 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
224 rq->stats.wqe_err++;
225 dev_kfree_skb(skb);
226 goto wq_ll_pop;
227 }
228
229 mlx5e_build_rx_skb(cqe, rq, skb);
230 rq->stats.packets++;
231 napi_gro_receive(cq->napi, skb);
232
233wq_ll_pop:
234 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
235 &wqe->next.next_wqe_index);
236 }
237
238 mlx5_cqwq_update_db_record(&cq->wq);
239
240 /* ensure cq space is freed before enabling more cqes */
241 wmb();
242
243 if (i == budget) {
244 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
245 return true;
246 }
247
248 return false;
249}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 000000000000..8020986cdaf6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,344 @@
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include "en.h"
36
37static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
38 u32 *size)
39{
40 sq->dma_fifo_pc--;
41 *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
42 *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
43}
44
45static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
46{
47 dma_addr_t addr;
48 u32 size;
49 int i;
50
51 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
52 mlx5e_dma_pop_last_pushed(sq, &addr, &size);
53 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
54 }
55}
56
57static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
58 u32 size)
59{
60 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
61 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
62 sq->dma_fifo_pc++;
63}
64
65static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
66 u32 *size)
67{
68 *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
69 *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
70}
71
72u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
73 void *accel_priv, select_queue_fallback_t fallback)
74{
75 struct mlx5e_priv *priv = netdev_priv(dev);
76 int channel_ix = fallback(dev, skb);
77 int up = skb_vlan_tag_present(skb) ?
78 skb->vlan_tci >> VLAN_PRIO_SHIFT :
79 priv->default_vlan_prio;
80 int tc = netdev_get_prio_tc_map(dev, up);
81
82 return (tc << priv->order_base_2_num_channels) | channel_ix;
83}
84
85static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
86 struct sk_buff *skb)
87{
88#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
89 return MLX5E_MIN_INLINE;
90}
91
92static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
93{
94 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
95 int cpy1_sz = 2 * ETH_ALEN;
96 int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
97
98 skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
99 skb_pull_inline(skb, cpy1_sz);
100 vhdr->h_vlan_proto = skb->vlan_proto;
101 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
102 skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
103 cpy2_sz);
104 skb_pull_inline(skb, cpy2_sz);
105}
106
107static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
108{
109 struct mlx5_wq_cyc *wq = &sq->wq;
110
111 u16 pi = sq->pc & wq->sz_m1;
112 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
113
114 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
115 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
116 struct mlx5_wqe_data_seg *dseg;
117
118 u8 opcode = MLX5_OPCODE_SEND;
119 dma_addr_t dma_addr = 0;
120 u16 headlen;
121 u16 ds_cnt;
122 u16 ihs;
123 int i;
124
125 memset(wqe, 0, sizeof(*wqe));
126
127 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
128 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
129 else
130 sq->stats.csum_offload_none++;
131
132 if (skb_is_gso(skb)) {
133 u32 payload_len;
134 int num_pkts;
135
136 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
137 opcode = MLX5_OPCODE_LSO;
138 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
139 payload_len = skb->len - ihs;
140 num_pkts = (payload_len / skb_shinfo(skb)->gso_size) +
141 !!(payload_len % skb_shinfo(skb)->gso_size);
142 MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
143 (num_pkts - 1) * ihs;
144 sq->stats.tso_packets++;
145 sq->stats.tso_bytes += payload_len;
146 } else {
147 ihs = mlx5e_get_inline_hdr_size(sq, skb);
148 MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
149 ETH_ZLEN);
150 }
151
152 if (skb_vlan_tag_present(skb)) {
153 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
154 } else {
155 skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
156 skb_pull_inline(skb, ihs);
157 }
158
159 eseg->inline_hdr_sz = cpu_to_be16(ihs);
160
161 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
162 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
163 MLX5_SEND_WQE_DS);
164 dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
165
166 MLX5E_TX_SKB_CB(skb)->num_dma = 0;
167
168 headlen = skb_headlen(skb);
169 if (headlen) {
170 dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
171 DMA_TO_DEVICE);
172 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
173 goto dma_unmap_wqe_err;
174
175 dseg->addr = cpu_to_be64(dma_addr);
176 dseg->lkey = sq->mkey_be;
177 dseg->byte_count = cpu_to_be32(headlen);
178
179 mlx5e_dma_push(sq, dma_addr, headlen);
180 MLX5E_TX_SKB_CB(skb)->num_dma++;
181
182 dseg++;
183 }
184
185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
186 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
187 int fsz = skb_frag_size(frag);
188
189 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
190 DMA_TO_DEVICE);
191 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
192 goto dma_unmap_wqe_err;
193
194 dseg->addr = cpu_to_be64(dma_addr);
195 dseg->lkey = sq->mkey_be;
196 dseg->byte_count = cpu_to_be32(fsz);
197
198 mlx5e_dma_push(sq, dma_addr, fsz);
199 MLX5E_TX_SKB_CB(skb)->num_dma++;
200
201 dseg++;
202 }
203
204 ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
205
206 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
207 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
208 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
209
210 sq->skb[pi] = skb;
211
212 MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
213 MLX5_SEND_WQEBB_NUM_DS);
214 sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
215
216 netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
217
218 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
219 netif_tx_stop_queue(sq->txq);
220 sq->stats.stopped++;
221 }
222
223 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
224 mlx5e_tx_notify_hw(sq, wqe);
225
226 sq->stats.packets++;
227 return NETDEV_TX_OK;
228
229dma_unmap_wqe_err:
230 sq->stats.dropped++;
231 mlx5e_dma_unmap_wqe_err(sq, skb);
232
233 dev_kfree_skb_any(skb);
234
235 return NETDEV_TX_OK;
236}
237
238netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
239{
240 struct mlx5e_priv *priv = netdev_priv(dev);
241 int ix = skb->queue_mapping;
242 int tc = 0;
243 struct mlx5e_channel *c = priv->channel[ix];
244 struct mlx5e_sq *sq = &c->sq[tc];
245
246 return mlx5e_sq_xmit(sq, skb);
247}
248
249netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
250{
251 struct mlx5e_priv *priv = netdev_priv(dev);
252 int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
253 int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
254 struct mlx5e_channel *c = priv->channel[ix];
255 struct mlx5e_sq *sq = &c->sq[tc];
256
257 return mlx5e_sq_xmit(sq, skb);
258}
259
260bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
261{
262 struct mlx5e_sq *sq;
263 u32 dma_fifo_cc;
264 u32 nbytes;
265 u16 npkts;
266 u16 sqcc;
267 int i;
268
269 /* avoid accessing cq (dma coherent memory) if not needed */
270 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
271 return false;
272
273 sq = cq->sqrq;
274
275 npkts = 0;
276 nbytes = 0;
277
278 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
279 * otherwise a cq overrun may occur
280 */
281 sqcc = sq->cc;
282
283 /* avoid dirtying sq cache line every cqe */
284 dma_fifo_cc = sq->dma_fifo_cc;
285
286 for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
287 struct mlx5_cqe64 *cqe;
288 struct sk_buff *skb;
289 u16 ci;
290 int j;
291
292 cqe = mlx5e_get_cqe(cq);
293 if (!cqe)
294 break;
295
296 ci = sqcc & sq->wq.sz_m1;
297 skb = sq->skb[ci];
298
299 if (unlikely(!skb)) { /* nop */
300 sq->stats.nop++;
301 sqcc++;
302 goto free_skb;
303 }
304
305 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
306 dma_addr_t addr;
307 u32 size;
308
309 mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
310 dma_fifo_cc++;
311 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
312 }
313
314 npkts++;
315 nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
316 sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
317
318free_skb:
319 dev_kfree_skb(skb);
320 }
321
322 mlx5_cqwq_update_db_record(&cq->wq);
323
324 /* ensure cq space is freed before enabling more cqes */
325 wmb();
326
327 sq->dma_fifo_cc = dma_fifo_cc;
328 sq->cc = sqcc;
329
330 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
331
332 if (netif_tx_queue_stopped(sq->txq) &&
333 mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
334 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
335 netif_tx_wake_queue(sq->txq);
336 sq->stats.wake++;
337 }
338 if (i == MLX5E_TX_CQ_POLL_BUDGET) {
339 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
340 return true;
341 }
342
343 return false;
344}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 000000000000..088bc424157c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "en.h"
34
35struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
36{
37 struct mlx5_cqwq *wq = &cq->wq;
38 u32 ci = mlx5_cqwq_get_ci(wq);
39 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
40 int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
41 int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
42
43 if (cqe_ownership_bit != sw_ownership_val)
44 return NULL;
45
46 mlx5_cqwq_pop(wq);
47
48 /* ensure cqe content is read after cqe ownership bit */
49 rmb();
50
51 return cqe;
52}
53
54int mlx5e_napi_poll(struct napi_struct *napi, int budget)
55{
56 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
57 napi);
58 bool busy = false;
59 int i;
60
61 clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
62
63 for (i = 0; i < c->num_tc; i++)
64 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
65
66 busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
67
68 busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
69
70 if (busy)
71 return budget;
72
73 napi_complete(napi);
74
75 /* avoid losing completion event during/after polling cqs */
76 if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
77 napi_schedule(napi);
78 return 0;
79 }
80
81 for (i = 0; i < c->num_tc; i++)
82 mlx5e_cq_arm(&c->sq[i].cq);
83 mlx5e_cq_arm(&c->rq.cq);
84
85 return 0;
86}
87
88void mlx5e_completion_event(struct mlx5_core_cq *mcq)
89{
90 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
91
92 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
93 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
94 barrier();
95 napi_schedule(cq->napi);
96}
97
98void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
99{
100 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
101 struct mlx5e_channel *c = cq->channel;
102 struct mlx5e_priv *priv = c->priv;
103 struct net_device *netdev = priv->netdev;
104
105 netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
106 __func__, mcq->cqn, event);
107}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 000000000000..8388411582cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include "wq.h"
35#include "mlx5_core.h"
36
37u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
38{
39 return (u32)wq->sz_m1 + 1;
40}
41
42u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
43{
44 return wq->sz_m1 + 1;
45}
46
47u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
48{
49 return (u32)wq->sz_m1 + 1;
50}
51
52static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
53{
54 return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
55}
56
57static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
58{
59 return mlx5_cqwq_get_size(wq) << wq->log_stride;
60}
61
62static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
63{
64 return mlx5_wq_ll_get_size(wq) << wq->log_stride;
65}
66
67int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
68 void *wqc, struct mlx5_wq_cyc *wq,
69 struct mlx5_wq_ctrl *wq_ctrl)
70{
71 int err;
72
73 wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
74 wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
75
76 err = mlx5_db_alloc(mdev, &wq_ctrl->db);
77 if (err) {
78 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
79 return err;
80 }
81
82 err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
83 if (err) {
84 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
85 goto err_db_free;
86 }
87
88 wq->buf = wq_ctrl->buf.direct.buf;
89 wq->db = wq_ctrl->db.db;
90
91 wq_ctrl->mdev = mdev;
92
93 return 0;
94
95err_db_free:
96 mlx5_db_free(mdev, &wq_ctrl->db);
97
98 return err;
99}
100
101int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
102 void *cqc, struct mlx5_cqwq *wq,
103 struct mlx5_wq_ctrl *wq_ctrl)
104{
105 int err;
106
107 wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
108 wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
109 wq->sz_m1 = (1 << wq->log_sz) - 1;
110
111 err = mlx5_db_alloc(mdev, &wq_ctrl->db);
112 if (err) {
113 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
114 return err;
115 }
116
117 err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
118 if (err) {
119 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
120 goto err_db_free;
121 }
122
123 wq->buf = wq_ctrl->buf.direct.buf;
124 wq->db = wq_ctrl->db.db;
125
126 wq_ctrl->mdev = mdev;
127
128 return 0;
129
130err_db_free:
131 mlx5_db_free(mdev, &wq_ctrl->db);
132
133 return err;
134}
135
136int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
137 void *wqc, struct mlx5_wq_ll *wq,
138 struct mlx5_wq_ctrl *wq_ctrl)
139{
140 struct mlx5_wqe_srq_next_seg *next_seg;
141 int err;
142 int i;
143
144 wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
145 wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
146
147 err = mlx5_db_alloc(mdev, &wq_ctrl->db);
148 if (err) {
149 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
150 return err;
151 }
152
153 err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
154 if (err) {
155 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
156 goto err_db_free;
157 }
158
159 wq->buf = wq_ctrl->buf.direct.buf;
160 wq->db = wq_ctrl->db.db;
161
162 for (i = 0; i < wq->sz_m1; i++) {
163 next_seg = mlx5_wq_ll_get_wqe(wq, i);
164 next_seg->next_wqe_index = cpu_to_be16(i + 1);
165 }
166 next_seg = mlx5_wq_ll_get_wqe(wq, i);
167 wq->tail_next = &next_seg->next_wqe_index;
168
169 wq_ctrl->mdev = mdev;
170
171 return 0;
172
173err_db_free:
174 mlx5_db_free(mdev, &wq_ctrl->db);
175
176 return err;
177}
178
179void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
180{
181 mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
182 mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
183}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 000000000000..e0ddd69fb429
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_WQ_H__
34#define __MLX5_WQ_H__
35
36#include <linux/mlx5/mlx5_ifc.h>
37
38struct mlx5_wq_param {
39 int linear;
40 int numa;
41};
42
43struct mlx5_wq_ctrl {
44 struct mlx5_core_dev *mdev;
45 struct mlx5_buf buf;
46 struct mlx5_db db;
47};
48
49struct mlx5_wq_cyc {
50 void *buf;
51 __be32 *db;
52 u16 sz_m1;
53 u8 log_stride;
54};
55
56struct mlx5_cqwq {
57 void *buf;
58 __be32 *db;
59 u32 sz_m1;
60 u32 cc; /* consumer counter */
61 u8 log_sz;
62 u8 log_stride;
63};
64
65struct mlx5_wq_ll {
66 void *buf;
67 __be32 *db;
68 __be16 *tail_next;
69 u16 sz_m1;
70 u16 head;
71 u16 wqe_ctr;
72 u16 cur_sz;
73 u8 log_stride;
74};
75
76int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
77 void *wqc, struct mlx5_wq_cyc *wq,
78 struct mlx5_wq_ctrl *wq_ctrl);
79u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
80
81int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
82 void *cqc, struct mlx5_cqwq *wq,
83 struct mlx5_wq_ctrl *wq_ctrl);
84u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
85
86int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
87 void *wqc, struct mlx5_wq_ll *wq,
88 struct mlx5_wq_ctrl *wq_ctrl);
89u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
90
91void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
92
93static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
94{
95 return ctr & wq->sz_m1;
96}
97
98static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
99{
100 return wq->buf + (ix << wq->log_stride);
101}
102
103static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
104{
105 int equal = (cc1 == cc2);
106 int smaller = 0x8000 & (cc1 - cc2);
107
108 return !equal && !smaller;
109}
110
111static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
112{
113 return wq->cc & wq->sz_m1;
114}
115
116static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
117{
118 return wq->buf + (ix << wq->log_stride);
119}
120
121static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
122{
123 return wq->cc >> wq->log_sz;
124}
125
126static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
127{
128 wq->cc++;
129}
130
131static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
132{
133 *wq->db = cpu_to_be32(wq->cc & 0xffffff);
134}
135
136static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
137{
138 return wq->cur_sz == wq->sz_m1;
139}
140
141static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
142{
143 return !wq->cur_sz;
144}
145
146static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
147{
148 return wq->buf + (ix << wq->log_stride);
149}
150
151static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
152{
153 wq->head = head_next;
154 wq->wqe_ctr++;
155 wq->cur_sz++;
156}
157
158static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
159 __be16 *next_tail_next)
160{
161 *wq->tail_next = ix;
162 wq->tail_next = next_tail_next;
163 wq->cur_sz--;
164}
165
166static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
167{
168 *wq->db = cpu_to_be32(wq->wqe_ctr);
169}
170
171#endif /* __MLX5_WQ_H__ */