diff options
author | Jan-Bernd Themann <ossthema@de.ibm.com> | 2006-09-13 11:44:31 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-13 13:23:52 -0400 |
commit | 7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (patch) | |
tree | c87a93ee7d5c1c63ce98dc90a62cd0b4dfc4318f /drivers | |
parent | 7de745e56244156233e5cdd62b462e52e638d408 (diff) |
[PATCH] ehea: IBM eHEA Ethernet Device Driver
Hi Jeff,
I fixed the __iomem issue and tested the driver with sparse. Looks good so far.
Thanks for your effort.
Jan-Bernd Themann
Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com>
drivers/net/Kconfig | 9
drivers/net/Makefile | 1
drivers/net/ehea/Makefile | 6
drivers/net/ehea/ehea.h | 447 ++++++
drivers/net/ehea/ehea_ethtool.c | 294 ++++
drivers/net/ehea/ehea_hcall.h | 51
drivers/net/ehea/ehea_hw.h | 287 ++++
drivers/net/ehea/ehea_main.c | 2654 ++++++++++++++++++++++++++++++++++++++++
drivers/net/ehea/ehea_phyp.c | 705 ++++++++++
drivers/net/ehea/ehea_phyp.h | 455 ++++++
drivers/net/ehea/ehea_qmr.c | 582 ++++++++
drivers/net/ehea/ehea_qmr.h | 358 +++++
12 files changed, 5849 insertions(+)
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/Kconfig | 9 | ||||
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ehea/Makefile | 6 | ||||
-rw-r--r-- | drivers/net/ehea/ehea.h | 447 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_ethtool.c | 294 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_hcall.h | 51 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_hw.h | 287 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 2654 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.c | 705 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.h | 455 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 582 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.h | 358 |
12 files changed, 5849 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a36fc60bd889..d30ab6b492d1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2360,6 +2360,15 @@ config CHELSIO_T1 | |||
2360 | To compile this driver as a module, choose M here: the module | 2360 | To compile this driver as a module, choose M here: the module |
2361 | will be called cxgb. | 2361 | will be called cxgb. |
2362 | 2362 | ||
2363 | config EHEA | ||
2364 | tristate "eHEA Ethernet support" | ||
2365 | depends on IBMEBUS | ||
2366 | ---help--- | ||
2367 | This driver supports the IBM pSeries eHEA ethernet adapter. | ||
2368 | |||
2369 | To compile the driver as a module, choose M here. The module | ||
2370 | will be called ehea. | ||
2371 | |||
2363 | config IXGB | 2372 | config IXGB |
2364 | tristate "Intel(R) PRO/10GbE support" | 2373 | tristate "Intel(R) PRO/10GbE support" |
2365 | depends on PCI | 2374 | depends on PCI |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 6ff17649c0fc..0f329e56345e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ | |||
6 | obj-$(CONFIG_IBM_EMAC) += ibm_emac/ | 6 | obj-$(CONFIG_IBM_EMAC) += ibm_emac/ |
7 | obj-$(CONFIG_IXGB) += ixgb/ | 7 | obj-$(CONFIG_IXGB) += ixgb/ |
8 | obj-$(CONFIG_CHELSIO_T1) += chelsio/ | 8 | obj-$(CONFIG_CHELSIO_T1) += chelsio/ |
9 | obj-$(CONFIG_EHEA) += ehea/ | ||
9 | obj-$(CONFIG_BONDING) += bonding/ | 10 | obj-$(CONFIG_BONDING) += bonding/ |
10 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | 11 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o |
11 | 12 | ||
diff --git a/drivers/net/ehea/Makefile b/drivers/net/ehea/Makefile new file mode 100644 index 000000000000..775d9969b5c2 --- /dev/null +++ b/drivers/net/ehea/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for the eHEA ethernet device driver for IBM eServer System p | ||
3 | # | ||
4 | ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o | ||
5 | obj-$(CONFIG_EHEA) += ehea.o | ||
6 | |||
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h new file mode 100644 index 000000000000..87c510f22aca --- /dev/null +++ b/drivers/net/ehea/ehea.h | |||
@@ -0,0 +1,447 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_H__ | ||
30 | #define __EHEA_H__ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/if_vlan.h> | ||
36 | |||
37 | #include <asm/ibmebus.h> | ||
38 | #include <asm/abs_addr.h> | ||
39 | #include <asm/io.h> | ||
40 | |||
41 | #define DRV_NAME "ehea" | ||
42 | #define DRV_VERSION "EHEA_0027" | ||
43 | |||
44 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | ||
45 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | ||
46 | |||
47 | #define EHEA_MAX_ENTRIES_RQ1 32767 | ||
48 | #define EHEA_MAX_ENTRIES_RQ2 16383 | ||
49 | #define EHEA_MAX_ENTRIES_RQ3 16383 | ||
50 | #define EHEA_MAX_ENTRIES_SQ 32767 | ||
51 | #define EHEA_MIN_ENTRIES_QP 127 | ||
52 | |||
53 | #define EHEA_NUM_TX_QP 1 | ||
54 | |||
55 | #ifdef EHEA_SMALL_QUEUES | ||
56 | #define EHEA_MAX_CQE_COUNT 1023 | ||
57 | #define EHEA_DEF_ENTRIES_SQ 1023 | ||
58 | #define EHEA_DEF_ENTRIES_RQ1 4095 | ||
59 | #define EHEA_DEF_ENTRIES_RQ2 1023 | ||
60 | #define EHEA_DEF_ENTRIES_RQ3 1023 | ||
61 | #else | ||
62 | #define EHEA_MAX_CQE_COUNT 32000 | ||
63 | #define EHEA_DEF_ENTRIES_SQ 16000 | ||
64 | #define EHEA_DEF_ENTRIES_RQ1 32080 | ||
65 | #define EHEA_DEF_ENTRIES_RQ2 4020 | ||
66 | #define EHEA_DEF_ENTRIES_RQ3 4020 | ||
67 | #endif | ||
68 | |||
69 | #define EHEA_MAX_ENTRIES_EQ 20 | ||
70 | |||
71 | #define EHEA_SG_SQ 2 | ||
72 | #define EHEA_SG_RQ1 1 | ||
73 | #define EHEA_SG_RQ2 0 | ||
74 | #define EHEA_SG_RQ3 0 | ||
75 | |||
76 | #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */ | ||
77 | #define EHEA_RQ2_PKT_SIZE 1522 | ||
78 | #define EHEA_L_PKT_SIZE 256 /* low latency */ | ||
79 | |||
80 | #define EHEA_POLL_MAX_RWQE 1000 | ||
81 | |||
82 | /* Send completion signaling */ | ||
83 | #define EHEA_SIG_IV_LONG 1 | ||
84 | |||
85 | /* Protection Domain Identifier */ | ||
86 | #define EHEA_PD_ID 0xaabcdeff | ||
87 | |||
88 | #define EHEA_RQ2_THRESHOLD 1 | ||
89 | #define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */ | ||
90 | |||
91 | #define EHEA_SPEED_10G 10000 | ||
92 | #define EHEA_SPEED_1G 1000 | ||
93 | #define EHEA_SPEED_100M 100 | ||
94 | #define EHEA_SPEED_10M 10 | ||
95 | #define EHEA_SPEED_AUTONEG 0 | ||
96 | |||
97 | /* Broadcast/Multicast registration types */ | ||
98 | #define EHEA_BCMC_SCOPE_ALL 0x08 | ||
99 | #define EHEA_BCMC_SCOPE_SINGLE 0x00 | ||
100 | #define EHEA_BCMC_MULTICAST 0x04 | ||
101 | #define EHEA_BCMC_BROADCAST 0x00 | ||
102 | #define EHEA_BCMC_UNTAGGED 0x02 | ||
103 | #define EHEA_BCMC_TAGGED 0x00 | ||
104 | #define EHEA_BCMC_VLANID_ALL 0x01 | ||
105 | #define EHEA_BCMC_VLANID_SINGLE 0x00 | ||
106 | |||
107 | /* Use this define to kmallocate pHYP control blocks */ | ||
108 | #define H_CB_ALIGNMENT 4096 | ||
109 | |||
110 | #define EHEA_CACHE_LINE 128 | ||
111 | |||
112 | /* Memory Regions */ | ||
113 | #define EHEA_MR_MAX_TX_PAGES 20 | ||
114 | #define EHEA_MR_TX_DATA_PN 3 | ||
115 | #define EHEA_MR_ACC_CTRL 0x00800000 | ||
116 | #define EHEA_RWQES_PER_MR_RQ2 10 | ||
117 | #define EHEA_RWQES_PER_MR_RQ3 10 | ||
118 | |||
119 | #define EHEA_WATCH_DOG_TIMEOUT 10*HZ | ||
120 | |||
121 | /* utility functions */ | ||
122 | |||
123 | #define ehea_info(fmt, args...) \ | ||
124 | printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args) | ||
125 | |||
126 | #define ehea_error(fmt, args...) \ | ||
127 | printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args) | ||
128 | |||
129 | #ifdef DEBUG | ||
130 | #define ehea_debug(fmt, args...) \ | ||
131 | printk(KERN_DEBUG DRV_NAME ": " fmt, ## args) | ||
132 | #else | ||
133 | #define ehea_debug(fmt, args...) do {} while (0) | ||
134 | #endif | ||
135 | |||
136 | void ehea_dump(void *adr, int len, char *msg); | ||
137 | |||
138 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) | ||
139 | |||
140 | #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) | ||
141 | |||
142 | #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff) | ||
143 | |||
144 | #define EHEA_BMASK_MASK(mask) \ | ||
145 | (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) | ||
146 | |||
147 | #define EHEA_BMASK_SET(mask, value) \ | ||
148 | ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) | ||
149 | |||
150 | #define EHEA_BMASK_GET(mask, value) \ | ||
151 | (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) | ||
152 | |||
153 | /* | ||
154 | * Generic ehea page | ||
155 | */ | ||
156 | struct ehea_page { | ||
157 | u8 entries[PAGE_SIZE]; | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * Generic queue in linux kernel virtual memory | ||
162 | */ | ||
163 | struct hw_queue { | ||
164 | u64 current_q_offset; /* current queue entry */ | ||
165 | struct ehea_page **queue_pages; /* array of pages belonging to queue */ | ||
166 | u32 qe_size; /* queue entry size */ | ||
167 | u32 queue_length; /* queue length allocated in bytes */ | ||
168 | u32 pagesize; | ||
169 | u32 toggle_state; /* toggle flag - per page */ | ||
170 | u32 reserved; /* 64 bit alignment */ | ||
171 | }; | ||
172 | |||
173 | /* | ||
174 | * For pSeries this is a 64bit memory address where | ||
175 | * I/O memory is mapped into CPU address space | ||
176 | */ | ||
177 | struct h_epa { | ||
178 | void __iomem *addr; | ||
179 | }; | ||
180 | |||
181 | struct h_epa_user { | ||
182 | u64 addr; | ||
183 | }; | ||
184 | |||
185 | struct h_epas { | ||
186 | struct h_epa kernel; /* kernel space accessible resource, | ||
187 | set to 0 if unused */ | ||
188 | struct h_epa_user user; /* user space accessible resource | ||
189 | set to 0 if unused */ | ||
190 | }; | ||
191 | |||
192 | struct ehea_qp; | ||
193 | struct ehea_cq; | ||
194 | struct ehea_eq; | ||
195 | struct ehea_port; | ||
196 | struct ehea_av; | ||
197 | |||
198 | /* | ||
199 | * Queue attributes passed to ehea_create_qp() | ||
200 | */ | ||
201 | struct ehea_qp_init_attr { | ||
202 | /* input parameter */ | ||
203 | u32 qp_token; /* queue token */ | ||
204 | u8 low_lat_rq1; | ||
205 | u8 signalingtype; /* cqe generation flag */ | ||
206 | u8 rq_count; /* num of receive queues */ | ||
207 | u8 eqe_gen; /* eqe generation flag */ | ||
208 | u16 max_nr_send_wqes; /* max number of send wqes */ | ||
209 | u16 max_nr_rwqes_rq1; /* max number of receive wqes */ | ||
210 | u16 max_nr_rwqes_rq2; | ||
211 | u16 max_nr_rwqes_rq3; | ||
212 | u8 wqe_size_enc_sq; | ||
213 | u8 wqe_size_enc_rq1; | ||
214 | u8 wqe_size_enc_rq2; | ||
215 | u8 wqe_size_enc_rq3; | ||
216 | u8 swqe_imm_data_len; /* immediate data length for swqes */ | ||
217 | u16 port_nr; | ||
218 | u16 rq2_threshold; | ||
219 | u16 rq3_threshold; | ||
220 | u64 send_cq_handle; | ||
221 | u64 recv_cq_handle; | ||
222 | u64 aff_eq_handle; | ||
223 | |||
224 | /* output parameter */ | ||
225 | u32 qp_nr; | ||
226 | u16 act_nr_send_wqes; | ||
227 | u16 act_nr_rwqes_rq1; | ||
228 | u16 act_nr_rwqes_rq2; | ||
229 | u16 act_nr_rwqes_rq3; | ||
230 | u8 act_wqe_size_enc_sq; | ||
231 | u8 act_wqe_size_enc_rq1; | ||
232 | u8 act_wqe_size_enc_rq2; | ||
233 | u8 act_wqe_size_enc_rq3; | ||
234 | u32 nr_sq_pages; | ||
235 | u32 nr_rq1_pages; | ||
236 | u32 nr_rq2_pages; | ||
237 | u32 nr_rq3_pages; | ||
238 | u32 liobn_sq; | ||
239 | u32 liobn_rq1; | ||
240 | u32 liobn_rq2; | ||
241 | u32 liobn_rq3; | ||
242 | }; | ||
243 | |||
244 | /* | ||
245 | * Event Queue attributes, passed as paramter | ||
246 | */ | ||
247 | struct ehea_eq_attr { | ||
248 | u32 type; | ||
249 | u32 max_nr_of_eqes; | ||
250 | u8 eqe_gen; /* generate eqe flag */ | ||
251 | u64 eq_handle; | ||
252 | u32 act_nr_of_eqes; | ||
253 | u32 nr_pages; | ||
254 | u32 ist1; /* Interrupt service token */ | ||
255 | u32 ist2; | ||
256 | u32 ist3; | ||
257 | u32 ist4; | ||
258 | }; | ||
259 | |||
260 | |||
261 | /* | ||
262 | * Event Queue | ||
263 | */ | ||
264 | struct ehea_eq { | ||
265 | struct ehea_adapter *adapter; | ||
266 | struct hw_queue hw_queue; | ||
267 | u64 fw_handle; | ||
268 | struct h_epas epas; | ||
269 | spinlock_t spinlock; | ||
270 | struct ehea_eq_attr attr; | ||
271 | }; | ||
272 | |||
273 | /* | ||
274 | * HEA Queues | ||
275 | */ | ||
276 | struct ehea_qp { | ||
277 | struct ehea_adapter *adapter; | ||
278 | u64 fw_handle; /* QP handle for firmware calls */ | ||
279 | struct hw_queue hw_squeue; | ||
280 | struct hw_queue hw_rqueue1; | ||
281 | struct hw_queue hw_rqueue2; | ||
282 | struct hw_queue hw_rqueue3; | ||
283 | struct h_epas epas; | ||
284 | struct ehea_qp_init_attr init_attr; | ||
285 | }; | ||
286 | |||
287 | /* | ||
288 | * Completion Queue attributes | ||
289 | */ | ||
290 | struct ehea_cq_attr { | ||
291 | /* input parameter */ | ||
292 | u32 max_nr_of_cqes; | ||
293 | u32 cq_token; | ||
294 | u64 eq_handle; | ||
295 | |||
296 | /* output parameter */ | ||
297 | u32 act_nr_of_cqes; | ||
298 | u32 nr_pages; | ||
299 | }; | ||
300 | |||
301 | /* | ||
302 | * Completion Queue | ||
303 | */ | ||
304 | struct ehea_cq { | ||
305 | struct ehea_adapter *adapter; | ||
306 | u64 fw_handle; | ||
307 | struct hw_queue hw_queue; | ||
308 | struct h_epas epas; | ||
309 | struct ehea_cq_attr attr; | ||
310 | }; | ||
311 | |||
312 | /* | ||
313 | * Memory Region | ||
314 | */ | ||
315 | struct ehea_mr { | ||
316 | u64 handle; | ||
317 | u64 vaddr; | ||
318 | u32 lkey; | ||
319 | }; | ||
320 | |||
321 | /* | ||
322 | * Port state information | ||
323 | */ | ||
324 | struct port_state { | ||
325 | int poll_max_processed; | ||
326 | int poll_receive_errors; | ||
327 | int ehea_poll; | ||
328 | int queue_stopped; | ||
329 | int min_swqe_avail; | ||
330 | u64 sqc_stop_sum; | ||
331 | int pkt_send; | ||
332 | int pkt_xmit; | ||
333 | int send_tasklet; | ||
334 | int nwqe; | ||
335 | }; | ||
336 | |||
337 | #define EHEA_IRQ_NAME_SIZE 20 | ||
338 | |||
339 | /* | ||
340 | * Queue SKB Array | ||
341 | */ | ||
342 | struct ehea_q_skb_arr { | ||
343 | struct sk_buff **arr; /* skb array for queue */ | ||
344 | int len; /* array length */ | ||
345 | int index; /* array index */ | ||
346 | int os_skbs; /* rq2/rq3 only: outstanding skbs */ | ||
347 | }; | ||
348 | |||
349 | /* | ||
350 | * Port resources | ||
351 | */ | ||
352 | struct ehea_port_res { | ||
353 | struct ehea_mr send_mr; /* send memory region */ | ||
354 | struct ehea_mr recv_mr; /* receive memory region */ | ||
355 | spinlock_t xmit_lock; | ||
356 | struct ehea_port *port; | ||
357 | char int_recv_name[EHEA_IRQ_NAME_SIZE]; | ||
358 | char int_send_name[EHEA_IRQ_NAME_SIZE]; | ||
359 | struct ehea_qp *qp; | ||
360 | struct ehea_cq *send_cq; | ||
361 | struct ehea_cq *recv_cq; | ||
362 | struct ehea_eq *send_eq; | ||
363 | struct ehea_eq *recv_eq; | ||
364 | spinlock_t send_lock; | ||
365 | struct ehea_q_skb_arr rq1_skba; | ||
366 | struct ehea_q_skb_arr rq2_skba; | ||
367 | struct ehea_q_skb_arr rq3_skba; | ||
368 | struct ehea_q_skb_arr sq_skba; | ||
369 | spinlock_t netif_queue; | ||
370 | int queue_stopped; | ||
371 | int swqe_refill_th; | ||
372 | atomic_t swqe_avail; | ||
373 | int swqe_ll_count; | ||
374 | int swqe_count; | ||
375 | u32 swqe_id_counter; | ||
376 | u64 tx_packets; | ||
377 | struct tasklet_struct send_comp_task; | ||
378 | spinlock_t recv_lock; | ||
379 | struct port_state p_state; | ||
380 | u64 rx_packets; | ||
381 | u32 poll_counter; | ||
382 | }; | ||
383 | |||
384 | |||
385 | struct ehea_adapter { | ||
386 | u64 handle; | ||
387 | u8 num_ports; | ||
388 | struct ehea_port *port[16]; | ||
389 | struct ehea_eq *neq; /* notification event queue */ | ||
390 | struct workqueue_struct *ehea_wq; | ||
391 | struct tasklet_struct neq_tasklet; | ||
392 | struct ehea_mr mr; | ||
393 | u32 pd; /* protection domain */ | ||
394 | u64 max_mc_mac; /* max number of multicast mac addresses */ | ||
395 | }; | ||
396 | |||
397 | |||
398 | struct ehea_mc_list { | ||
399 | struct list_head list; | ||
400 | u64 macaddr; | ||
401 | }; | ||
402 | |||
403 | #define EHEA_PORT_UP 1 | ||
404 | #define EHEA_PORT_DOWN 0 | ||
405 | #define EHEA_MAX_PORT_RES 16 | ||
406 | struct ehea_port { | ||
407 | struct ehea_adapter *adapter; /* adapter that owns this port */ | ||
408 | struct net_device *netdev; | ||
409 | struct net_device_stats stats; | ||
410 | struct ehea_port_res port_res[EHEA_MAX_PORT_RES]; | ||
411 | struct device_node *of_dev_node; /* Open Firmware Device Node */ | ||
412 | struct ehea_mc_list *mc_list; /* Multicast MAC addresses */ | ||
413 | struct vlan_group *vgrp; | ||
414 | struct ehea_eq *qp_eq; | ||
415 | struct work_struct reset_task; | ||
416 | struct semaphore port_lock; | ||
417 | char int_aff_name[EHEA_IRQ_NAME_SIZE]; | ||
418 | int allmulti; /* Indicates IFF_ALLMULTI state */ | ||
419 | int promisc; /* Indicates IFF_PROMISC state */ | ||
420 | int num_add_tx_qps; | ||
421 | int resets; | ||
422 | u64 mac_addr; | ||
423 | u32 logical_port_id; | ||
424 | u32 port_speed; | ||
425 | u32 msg_enable; | ||
426 | u32 sig_comp_iv; | ||
427 | u32 state; | ||
428 | u8 full_duplex; | ||
429 | u8 autoneg; | ||
430 | u8 num_def_qps; | ||
431 | }; | ||
432 | |||
433 | struct port_res_cfg { | ||
434 | int max_entries_rcq; | ||
435 | int max_entries_scq; | ||
436 | int max_entries_sq; | ||
437 | int max_entries_rq1; | ||
438 | int max_entries_rq2; | ||
439 | int max_entries_rq3; | ||
440 | }; | ||
441 | |||
442 | |||
443 | void ehea_set_ethtool_ops(struct net_device *netdev); | ||
444 | int ehea_sense_port_attr(struct ehea_port *port); | ||
445 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); | ||
446 | |||
447 | #endif /* __EHEA_H__ */ | ||
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c new file mode 100644 index 000000000000..6906af6277c8 --- /dev/null +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -0,0 +1,294 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_ethtool.c | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include "ehea.h" | ||
30 | #include "ehea_phyp.h" | ||
31 | |||
32 | static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
33 | { | ||
34 | struct ehea_port *port = netdev_priv(dev); | ||
35 | int ret; | ||
36 | |||
37 | ret = ehea_sense_port_attr(port); | ||
38 | |||
39 | if (ret) | ||
40 | return ret; | ||
41 | |||
42 | if (netif_carrier_ok(dev)) { | ||
43 | switch(port->port_speed) { | ||
44 | case EHEA_SPEED_10M: cmd->speed = SPEED_10; break; | ||
45 | case EHEA_SPEED_100M: cmd->speed = SPEED_100; break; | ||
46 | case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break; | ||
47 | case EHEA_SPEED_10G: cmd->speed = SPEED_10000; break; | ||
48 | } | ||
49 | cmd->duplex = port->full_duplex == 1 ? | ||
50 | DUPLEX_FULL : DUPLEX_HALF; | ||
51 | } else { | ||
52 | cmd->speed = -1; | ||
53 | cmd->duplex = -1; | ||
54 | } | ||
55 | |||
56 | cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | ||
57 | | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half | ||
58 | | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | ||
59 | | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | ||
60 | |||
61 | cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg | ||
62 | | ADVERTISED_FIBRE); | ||
63 | |||
64 | cmd->port = PORT_FIBRE; | ||
65 | cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
71 | { | ||
72 | struct ehea_port *port = netdev_priv(dev); | ||
73 | int ret = 0; | ||
74 | u32 sp; | ||
75 | |||
76 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
77 | sp = EHEA_SPEED_AUTONEG; | ||
78 | goto doit; | ||
79 | } | ||
80 | |||
81 | switch(cmd->speed) { | ||
82 | case SPEED_10: | ||
83 | if (cmd->duplex == DUPLEX_FULL) | ||
84 | sp = H_SPEED_10M_F; | ||
85 | else | ||
86 | sp = H_SPEED_10M_H; | ||
87 | break; | ||
88 | |||
89 | case SPEED_100: | ||
90 | if (cmd->duplex == DUPLEX_FULL) | ||
91 | sp = H_SPEED_100M_F; | ||
92 | else | ||
93 | sp = H_SPEED_100M_H; | ||
94 | break; | ||
95 | |||
96 | case SPEED_1000: | ||
97 | if (cmd->duplex == DUPLEX_FULL) | ||
98 | sp = H_SPEED_1G_F; | ||
99 | else | ||
100 | ret = -EINVAL; | ||
101 | break; | ||
102 | |||
103 | case SPEED_10000: | ||
104 | if (cmd->duplex == DUPLEX_FULL) | ||
105 | sp = H_SPEED_10G_F; | ||
106 | else | ||
107 | ret = -EINVAL; | ||
108 | break; | ||
109 | |||
110 | default: | ||
111 | ret = -EINVAL; | ||
112 | break; | ||
113 | } | ||
114 | |||
115 | if (ret) | ||
116 | goto out; | ||
117 | doit: | ||
118 | ret = ehea_set_portspeed(port, sp); | ||
119 | |||
120 | if (!ret) | ||
121 | ehea_info("%s: Port speed succesfully set: %dMbps " | ||
122 | "%s Duplex", | ||
123 | port->netdev->name, port->port_speed, | ||
124 | port->full_duplex == 1 ? "Full" : "Half"); | ||
125 | out: | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | static int ehea_nway_reset(struct net_device *dev) | ||
130 | { | ||
131 | struct ehea_port *port = netdev_priv(dev); | ||
132 | int ret; | ||
133 | |||
134 | ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); | ||
135 | |||
136 | if (!ret) | ||
137 | ehea_info("%s: Port speed succesfully set: %dMbps " | ||
138 | "%s Duplex", | ||
139 | port->netdev->name, port->port_speed, | ||
140 | port->full_duplex == 1 ? "Full" : "Half"); | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static void ehea_get_drvinfo(struct net_device *dev, | ||
145 | struct ethtool_drvinfo *info) | ||
146 | { | ||
147 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); | ||
148 | strlcpy(info->version, DRV_VERSION, sizeof(info->version) - 1); | ||
149 | } | ||
150 | |||
151 | static u32 ehea_get_msglevel(struct net_device *dev) | ||
152 | { | ||
153 | struct ehea_port *port = netdev_priv(dev); | ||
154 | return port->msg_enable; | ||
155 | } | ||
156 | |||
157 | static void ehea_set_msglevel(struct net_device *dev, u32 value) | ||
158 | { | ||
159 | struct ehea_port *port = netdev_priv(dev); | ||
160 | port->msg_enable = value; | ||
161 | } | ||
162 | |||
163 | static u32 ehea_get_rx_csum(struct net_device *dev) | ||
164 | { | ||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { | ||
169 | {"poll_max_processed"}, | ||
170 | {"queue_stopped"}, | ||
171 | {"min_swqe_avail"}, | ||
172 | {"poll_receive_err"}, | ||
173 | {"pkt_send"}, | ||
174 | {"pkt_xmit"}, | ||
175 | {"send_tasklet"}, | ||
176 | {"ehea_poll"}, | ||
177 | {"nwqe"}, | ||
178 | {"swqe_available_0"}, | ||
179 | {"sig_comp_iv"}, | ||
180 | {"swqe_refill_th"}, | ||
181 | {"port resets"}, | ||
182 | {"rxo"}, | ||
183 | {"rx64"}, | ||
184 | {"rx65"}, | ||
185 | {"rx128"}, | ||
186 | {"rx256"}, | ||
187 | {"rx512"}, | ||
188 | {"rx1024"}, | ||
189 | {"txo"}, | ||
190 | {"tx64"}, | ||
191 | {"tx65"}, | ||
192 | {"tx128"}, | ||
193 | {"tx256"}, | ||
194 | {"tx512"}, | ||
195 | {"tx1024"}, | ||
196 | }; | ||
197 | |||
198 | static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
199 | { | ||
200 | if (stringset == ETH_SS_STATS) { | ||
201 | memcpy(data, &ehea_ethtool_stats_keys, | ||
202 | sizeof(ehea_ethtool_stats_keys)); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | static int ehea_get_stats_count(struct net_device *dev) | ||
207 | { | ||
208 | return ARRAY_SIZE(ehea_ethtool_stats_keys); | ||
209 | } | ||
210 | |||
211 | static void ehea_get_ethtool_stats(struct net_device *dev, | ||
212 | struct ethtool_stats *stats, u64 *data) | ||
213 | { | ||
214 | u64 hret; | ||
215 | int i; | ||
216 | struct ehea_port *port = netdev_priv(dev); | ||
217 | struct ehea_adapter *adapter = port->adapter; | ||
218 | struct ehea_port_res *pr = &port->port_res[0]; | ||
219 | struct port_state *p_state = &pr->p_state; | ||
220 | struct hcp_ehea_port_cb6 *cb6; | ||
221 | |||
222 | for (i = 0; i < ehea_get_stats_count(dev); i++) | ||
223 | data[i] = 0; | ||
224 | |||
225 | i = 0; | ||
226 | |||
227 | data[i++] = p_state->poll_max_processed; | ||
228 | data[i++] = p_state->queue_stopped; | ||
229 | data[i++] = p_state->min_swqe_avail; | ||
230 | data[i++] = p_state->poll_receive_errors; | ||
231 | data[i++] = p_state->pkt_send; | ||
232 | data[i++] = p_state->pkt_xmit; | ||
233 | data[i++] = p_state->send_tasklet; | ||
234 | data[i++] = p_state->ehea_poll; | ||
235 | data[i++] = p_state->nwqe; | ||
236 | data[i++] = atomic_read(&port->port_res[0].swqe_avail); | ||
237 | data[i++] = port->sig_comp_iv; | ||
238 | data[i++] = port->port_res[0].swqe_refill_th; | ||
239 | data[i++] = port->resets; | ||
240 | |||
241 | cb6 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
242 | if (!cb6) { | ||
243 | ehea_error("no mem for cb6"); | ||
244 | return; | ||
245 | } | ||
246 | |||
247 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | ||
248 | H_PORT_CB6, H_PORT_CB6_ALL, cb6); | ||
249 | if (netif_msg_hw(port)) | ||
250 | ehea_dump(cb6, sizeof(*cb6), "ehea_get_ethtool_stats"); | ||
251 | |||
252 | if (hret == H_SUCCESS) { | ||
253 | data[i++] = cb6->rxo; | ||
254 | data[i++] = cb6->rx64; | ||
255 | data[i++] = cb6->rx65; | ||
256 | data[i++] = cb6->rx128; | ||
257 | data[i++] = cb6->rx256; | ||
258 | data[i++] = cb6->rx512; | ||
259 | data[i++] = cb6->rx1024; | ||
260 | data[i++] = cb6->txo; | ||
261 | data[i++] = cb6->tx64; | ||
262 | data[i++] = cb6->tx65; | ||
263 | data[i++] = cb6->tx128; | ||
264 | data[i++] = cb6->tx256; | ||
265 | data[i++] = cb6->tx512; | ||
266 | data[i++] = cb6->tx1024; | ||
267 | } else | ||
268 | ehea_error("query_ehea_port failed"); | ||
269 | |||
270 | kfree(cb6); | ||
271 | } | ||
272 | |||
273 | struct ethtool_ops ehea_ethtool_ops = { | ||
274 | .get_settings = ehea_get_settings, | ||
275 | .get_drvinfo = ehea_get_drvinfo, | ||
276 | .get_msglevel = ehea_get_msglevel, | ||
277 | .set_msglevel = ehea_set_msglevel, | ||
278 | .get_link = ethtool_op_get_link, | ||
279 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
280 | .get_sg = ethtool_op_get_sg, | ||
281 | .get_tso = ethtool_op_get_tso, | ||
282 | .set_tso = ethtool_op_set_tso, | ||
283 | .get_strings = ehea_get_strings, | ||
284 | .get_stats_count = ehea_get_stats_count, | ||
285 | .get_ethtool_stats = ehea_get_ethtool_stats, | ||
286 | .get_rx_csum = ehea_get_rx_csum, | ||
287 | .set_settings = ehea_set_settings, | ||
288 | .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ | ||
289 | }; | ||
290 | |||
291 | void ehea_set_ethtool_ops(struct net_device *netdev) | ||
292 | { | ||
293 | SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops); | ||
294 | } | ||
diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h new file mode 100644 index 000000000000..8e7d1c3edc60 --- /dev/null +++ b/drivers/net/ehea/ehea_hcall.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_hcall.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_HCALL_H__ | ||
30 | #define __EHEA_HCALL_H__ | ||
31 | |||
32 | /** | ||
33 | * This file contains HCALL defines that are to be included in the appropriate | ||
34 | * kernel files later | ||
35 | */ | ||
36 | |||
37 | #define H_ALLOC_HEA_RESOURCE 0x278 | ||
38 | #define H_MODIFY_HEA_QP 0x250 | ||
39 | #define H_QUERY_HEA_QP 0x254 | ||
40 | #define H_QUERY_HEA 0x258 | ||
41 | #define H_QUERY_HEA_PORT 0x25C | ||
42 | #define H_MODIFY_HEA_PORT 0x260 | ||
43 | #define H_REG_BCMC 0x264 | ||
44 | #define H_DEREG_BCMC 0x268 | ||
45 | #define H_REGISTER_HEA_RPAGES 0x26C | ||
46 | #define H_DISABLE_AND_GET_HEA 0x270 | ||
47 | #define H_GET_HEA_INFO 0x274 | ||
48 | #define H_ADD_CONN 0x284 | ||
49 | #define H_DEL_CONN 0x288 | ||
50 | |||
51 | #endif /* __EHEA_HCALL_H__ */ | ||
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h new file mode 100644 index 000000000000..e3a7d07f88cc --- /dev/null +++ b/drivers/net/ehea/ehea_hw.h | |||
@@ -0,0 +1,287 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_hw.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_HW_H__ | ||
30 | #define __EHEA_HW_H__ | ||
31 | |||
32 | #define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63) | ||
33 | #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63) | ||
34 | #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63) | ||
35 | #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63) | ||
36 | |||
37 | #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x) | ||
38 | |||
39 | struct ehea_qptemm { | ||
40 | u64 qpx_hcr; | ||
41 | u64 qpx_c; | ||
42 | u64 qpx_herr; | ||
43 | u64 qpx_aer; | ||
44 | u64 qpx_sqa; | ||
45 | u64 qpx_sqc; | ||
46 | u64 qpx_rq1a; | ||
47 | u64 qpx_rq1c; | ||
48 | u64 qpx_st; | ||
49 | u64 qpx_aerr; | ||
50 | u64 qpx_tenure; | ||
51 | u64 qpx_reserved1[(0x098 - 0x058) / 8]; | ||
52 | u64 qpx_portp; | ||
53 | u64 qpx_reserved2[(0x100 - 0x0A0) / 8]; | ||
54 | u64 qpx_t; | ||
55 | u64 qpx_sqhp; | ||
56 | u64 qpx_sqptp; | ||
57 | u64 qpx_reserved3[(0x140 - 0x118) / 8]; | ||
58 | u64 qpx_sqwsize; | ||
59 | u64 qpx_reserved4[(0x170 - 0x148) / 8]; | ||
60 | u64 qpx_sqsize; | ||
61 | u64 qpx_reserved5[(0x1B0 - 0x178) / 8]; | ||
62 | u64 qpx_sigt; | ||
63 | u64 qpx_wqecnt; | ||
64 | u64 qpx_rq1hp; | ||
65 | u64 qpx_rq1ptp; | ||
66 | u64 qpx_rq1size; | ||
67 | u64 qpx_reserved6[(0x220 - 0x1D8) / 8]; | ||
68 | u64 qpx_rq1wsize; | ||
69 | u64 qpx_reserved7[(0x240 - 0x228) / 8]; | ||
70 | u64 qpx_pd; | ||
71 | u64 qpx_scqn; | ||
72 | u64 qpx_rcqn; | ||
73 | u64 qpx_aeqn; | ||
74 | u64 reserved49; | ||
75 | u64 qpx_ram; | ||
76 | u64 qpx_reserved8[(0x300 - 0x270) / 8]; | ||
77 | u64 qpx_rq2a; | ||
78 | u64 qpx_rq2c; | ||
79 | u64 qpx_rq2hp; | ||
80 | u64 qpx_rq2ptp; | ||
81 | u64 qpx_rq2size; | ||
82 | u64 qpx_rq2wsize; | ||
83 | u64 qpx_rq2th; | ||
84 | u64 qpx_rq3a; | ||
85 | u64 qpx_rq3c; | ||
86 | u64 qpx_rq3hp; | ||
87 | u64 qpx_rq3ptp; | ||
88 | u64 qpx_rq3size; | ||
89 | u64 qpx_rq3wsize; | ||
90 | u64 qpx_rq3th; | ||
91 | u64 qpx_lpn; | ||
92 | u64 qpx_reserved9[(0x400 - 0x378) / 8]; | ||
93 | u64 reserved_ext[(0x500 - 0x400) / 8]; | ||
94 | u64 reserved2[(0x1000 - 0x500) / 8]; | ||
95 | }; | ||
96 | |||
97 | #define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0) | ||
98 | |||
99 | #define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x) | ||
100 | |||
101 | struct ehea_mrmwmm { | ||
102 | u64 mrx_hcr; | ||
103 | u64 mrx_c; | ||
104 | u64 mrx_herr; | ||
105 | u64 mrx_aer; | ||
106 | u64 mrx_pp; | ||
107 | u64 reserved1; | ||
108 | u64 reserved2; | ||
109 | u64 reserved3; | ||
110 | u64 reserved4[(0x200 - 0x40) / 8]; | ||
111 | u64 mrx_ctl[64]; | ||
112 | }; | ||
113 | |||
114 | #define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x) | ||
115 | |||
116 | struct ehea_qpedmm { | ||
117 | |||
118 | u64 reserved0[(0x400) / 8]; | ||
119 | u64 qpedx_phh; | ||
120 | u64 qpedx_ppsgp; | ||
121 | u64 qpedx_ppsgu; | ||
122 | u64 qpedx_ppdgp; | ||
123 | u64 qpedx_ppdgu; | ||
124 | u64 qpedx_aph; | ||
125 | u64 qpedx_apsgp; | ||
126 | u64 qpedx_apsgu; | ||
127 | u64 qpedx_apdgp; | ||
128 | u64 qpedx_apdgu; | ||
129 | u64 qpedx_apav; | ||
130 | u64 qpedx_apsav; | ||
131 | u64 qpedx_hcr; | ||
132 | u64 reserved1[4]; | ||
133 | u64 qpedx_rrl0; | ||
134 | u64 qpedx_rrrkey0; | ||
135 | u64 qpedx_rrva0; | ||
136 | u64 reserved2; | ||
137 | u64 qpedx_rrl1; | ||
138 | u64 qpedx_rrrkey1; | ||
139 | u64 qpedx_rrva1; | ||
140 | u64 reserved3; | ||
141 | u64 qpedx_rrl2; | ||
142 | u64 qpedx_rrrkey2; | ||
143 | u64 qpedx_rrva2; | ||
144 | u64 reserved4; | ||
145 | u64 qpedx_rrl3; | ||
146 | u64 qpedx_rrrkey3; | ||
147 | u64 qpedx_rrva3; | ||
148 | }; | ||
149 | |||
150 | #define CQX_FECADDER EHEA_BMASK_IBM(32, 63) | ||
151 | #define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63) | ||
152 | #define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0) | ||
153 | #define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0) | ||
154 | |||
155 | #define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x) | ||
156 | |||
157 | struct ehea_cqtemm { | ||
158 | u64 cqx_hcr; | ||
159 | u64 cqx_c; | ||
160 | u64 cqx_herr; | ||
161 | u64 cqx_aer; | ||
162 | u64 cqx_ptp; | ||
163 | u64 cqx_tp; | ||
164 | u64 cqx_fec; | ||
165 | u64 cqx_feca; | ||
166 | u64 cqx_ep; | ||
167 | u64 cqx_eq; | ||
168 | u64 reserved1; | ||
169 | u64 cqx_n0; | ||
170 | u64 cqx_n1; | ||
171 | u64 reserved2[(0x1000 - 0x60) / 8]; | ||
172 | }; | ||
173 | |||
174 | #define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x) | ||
175 | |||
176 | struct ehea_eqtemm { | ||
177 | u64 eqx_hcr; | ||
178 | u64 eqx_c; | ||
179 | u64 eqx_herr; | ||
180 | u64 eqx_aer; | ||
181 | u64 eqx_ptp; | ||
182 | u64 eqx_tp; | ||
183 | u64 eqx_ssba; | ||
184 | u64 eqx_psba; | ||
185 | u64 eqx_cec; | ||
186 | u64 eqx_meql; | ||
187 | u64 eqx_xisbi; | ||
188 | u64 eqx_xisc; | ||
189 | u64 eqx_it; | ||
190 | }; | ||
191 | |||
192 | static inline u64 epa_load(struct h_epa epa, u32 offset) | ||
193 | { | ||
194 | return readq((void __iomem *)(epa.addr + offset)); | ||
195 | } | ||
196 | |||
197 | static inline void epa_store(struct h_epa epa, u32 offset, u64 value) | ||
198 | { | ||
199 | writeq(value, (void __iomem *)(epa.addr + offset)); | ||
200 | epa_load(epa, offset); /* synchronize explicitly to eHEA */ | ||
201 | } | ||
202 | |||
203 | static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value) | ||
204 | { | ||
205 | writeq(value, (void __iomem *)(epa.addr + offset)); | ||
206 | } | ||
207 | |||
208 | #define epa_store_eq(epa, offset, value)\ | ||
209 | epa_store(epa, EQTEMM_OFFSET(offset), value) | ||
210 | #define epa_load_eq(epa, offset)\ | ||
211 | epa_load(epa, EQTEMM_OFFSET(offset)) | ||
212 | |||
213 | #define epa_store_cq(epa, offset, value)\ | ||
214 | epa_store(epa, CQTEMM_OFFSET(offset), value) | ||
215 | #define epa_load_cq(epa, offset)\ | ||
216 | epa_load(epa, CQTEMM_OFFSET(offset)) | ||
217 | |||
218 | #define epa_store_qp(epa, offset, value)\ | ||
219 | epa_store(epa, QPTEMM_OFFSET(offset), value) | ||
220 | #define epa_load_qp(epa, offset)\ | ||
221 | epa_load(epa, QPTEMM_OFFSET(offset)) | ||
222 | |||
223 | #define epa_store_qped(epa, offset, value)\ | ||
224 | epa_store(epa, QPEDMM_OFFSET(offset), value) | ||
225 | #define epa_load_qped(epa, offset)\ | ||
226 | epa_load(epa, QPEDMM_OFFSET(offset)) | ||
227 | |||
228 | #define epa_store_mrmw(epa, offset, value)\ | ||
229 | epa_store(epa, MRMWMM_OFFSET(offset), value) | ||
230 | #define epa_load_mrmw(epa, offset)\ | ||
231 | epa_load(epa, MRMWMM_OFFSET(offset)) | ||
232 | |||
233 | #define epa_store_base(epa, offset, value)\ | ||
234 | epa_store(epa, HCAGR_OFFSET(offset), value) | ||
235 | #define epa_load_base(epa, offset)\ | ||
236 | epa_load(epa, HCAGR_OFFSET(offset)) | ||
237 | |||
238 | static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) | ||
239 | { | ||
240 | struct h_epa epa = qp->epas.kernel; | ||
241 | epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa), | ||
242 | EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes)); | ||
243 | } | ||
244 | |||
245 | static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) | ||
246 | { | ||
247 | struct h_epa epa = qp->epas.kernel; | ||
248 | epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a), | ||
249 | EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes)); | ||
250 | } | ||
251 | |||
252 | static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) | ||
253 | { | ||
254 | struct h_epa epa = qp->epas.kernel; | ||
255 | epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a), | ||
256 | EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes)); | ||
257 | } | ||
258 | |||
259 | static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) | ||
260 | { | ||
261 | struct h_epa epa = qp->epas.kernel; | ||
262 | epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a), | ||
263 | EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes)); | ||
264 | } | ||
265 | |||
266 | static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes) | ||
267 | { | ||
268 | struct h_epa epa = cq->epas.kernel; | ||
269 | epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca), | ||
270 | EHEA_BMASK_SET(CQX_FECADDER, nr_cqes)); | ||
271 | } | ||
272 | |||
273 | static inline void ehea_reset_cq_n1(struct ehea_cq *cq) | ||
274 | { | ||
275 | struct h_epa epa = cq->epas.kernel; | ||
276 | epa_store_cq(epa, cqx_n1, | ||
277 | EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1)); | ||
278 | } | ||
279 | |||
280 | static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq) | ||
281 | { | ||
282 | struct h_epa epa = my_cq->epas.kernel; | ||
283 | epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep), | ||
284 | EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0)); | ||
285 | } | ||
286 | |||
287 | #endif /* __EHEA_HW_H__ */ | ||
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c new file mode 100644 index 000000000000..82a58c1cfe55 --- /dev/null +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -0,0 +1,2654 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_main.c | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include <linux/in.h> | ||
30 | #include <linux/ip.h> | ||
31 | #include <linux/tcp.h> | ||
32 | #include <linux/udp.h> | ||
33 | #include <linux/if.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/if_ether.h> | ||
36 | #include <net/ip.h> | ||
37 | |||
38 | #include "ehea.h" | ||
39 | #include "ehea_qmr.h" | ||
40 | #include "ehea_phyp.h" | ||
41 | |||
42 | |||
43 | MODULE_LICENSE("GPL"); | ||
44 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | ||
45 | MODULE_DESCRIPTION("IBM eServer HEA Driver"); | ||
46 | MODULE_VERSION(DRV_VERSION); | ||
47 | |||
48 | |||
49 | static int msg_level = -1; | ||
50 | static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; | ||
51 | static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; | ||
52 | static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; | ||
53 | static int sq_entries = EHEA_DEF_ENTRIES_SQ; | ||
54 | |||
55 | module_param(msg_level, int, 0); | ||
56 | module_param(rq1_entries, int, 0); | ||
57 | module_param(rq2_entries, int, 0); | ||
58 | module_param(rq3_entries, int, 0); | ||
59 | module_param(sq_entries, int, 0); | ||
60 | |||
61 | MODULE_PARM_DESC(msg_level, "msg_level"); | ||
62 | MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " | ||
63 | "[2^x - 1], x = [6..14]. Default = " | ||
64 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); | ||
65 | MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " | ||
66 | "[2^x - 1], x = [6..14]. Default = " | ||
67 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); | ||
68 | MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " | ||
69 | "[2^x - 1], x = [6..14]. Default = " | ||
70 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); | ||
71 | MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " | ||
72 | "[2^x - 1], x = [6..14]. Default = " | ||
73 | __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); | ||
74 | |||
75 | void ehea_dump(void *adr, int len, char *msg) { | ||
76 | int x; | ||
77 | unsigned char *deb = adr; | ||
78 | for (x = 0; x < len; x += 16) { | ||
79 | printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg, | ||
80 | deb, x, *((u64*)&deb[0]), *((u64*)&deb[8])); | ||
81 | deb += 16; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static struct net_device_stats *ehea_get_stats(struct net_device *dev) | ||
86 | { | ||
87 | struct ehea_port *port = netdev_priv(dev); | ||
88 | struct net_device_stats *stats = &port->stats; | ||
89 | struct hcp_ehea_port_cb2 *cb2; | ||
90 | u64 hret, rx_packets; | ||
91 | int i; | ||
92 | |||
93 | memset(stats, 0, sizeof(*stats)); | ||
94 | |||
95 | cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
96 | if (!cb2) { | ||
97 | ehea_error("no mem for cb2"); | ||
98 | goto out; | ||
99 | } | ||
100 | |||
101 | hret = ehea_h_query_ehea_port(port->adapter->handle, | ||
102 | port->logical_port_id, | ||
103 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); | ||
104 | if (hret != H_SUCCESS) { | ||
105 | ehea_error("query_ehea_port failed"); | ||
106 | goto out_herr; | ||
107 | } | ||
108 | |||
109 | if (netif_msg_hw(port)) | ||
110 | ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); | ||
111 | |||
112 | rx_packets = 0; | ||
113 | for (i = 0; i < port->num_def_qps; i++) | ||
114 | rx_packets += port->port_res[i].rx_packets; | ||
115 | |||
116 | stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; | ||
117 | stats->multicast = cb2->rxmcp; | ||
118 | stats->rx_errors = cb2->rxuerr; | ||
119 | stats->rx_bytes = cb2->rxo; | ||
120 | stats->tx_bytes = cb2->txo; | ||
121 | stats->rx_packets = rx_packets; | ||
122 | |||
123 | out_herr: | ||
124 | kfree(cb2); | ||
125 | out: | ||
126 | return stats; | ||
127 | } | ||
128 | |||
129 | static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | ||
130 | { | ||
131 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | ||
132 | struct net_device *dev = pr->port->netdev; | ||
133 | int max_index_mask = pr->rq1_skba.len - 1; | ||
134 | int i; | ||
135 | |||
136 | if (!nr_of_wqes) | ||
137 | return; | ||
138 | |||
139 | for (i = 0; i < nr_of_wqes; i++) { | ||
140 | if (!skb_arr_rq1[index]) { | ||
141 | skb_arr_rq1[index] = netdev_alloc_skb(dev, | ||
142 | EHEA_L_PKT_SIZE); | ||
143 | if (!skb_arr_rq1[index]) { | ||
144 | ehea_error("%s: no mem for skb/%d wqes filled", | ||
145 | dev->name, i); | ||
146 | break; | ||
147 | } | ||
148 | } | ||
149 | index--; | ||
150 | index &= max_index_mask; | ||
151 | } | ||
152 | /* Ring doorbell */ | ||
153 | ehea_update_rq1a(pr->qp, i); | ||
154 | } | ||
155 | |||
156 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | ||
157 | { | ||
158 | int ret = 0; | ||
159 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | ||
160 | struct net_device *dev = pr->port->netdev; | ||
161 | int i; | ||
162 | |||
163 | for (i = 0; i < pr->rq1_skba.len; i++) { | ||
164 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); | ||
165 | if (!skb_arr_rq1[i]) { | ||
166 | ehea_error("%s: no mem for skb/%d wqes filled", | ||
167 | dev->name, i); | ||
168 | ret = -ENOMEM; | ||
169 | goto out; | ||
170 | } | ||
171 | } | ||
172 | /* Ring doorbell */ | ||
173 | ehea_update_rq1a(pr->qp, nr_rq1a); | ||
174 | out: | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | ||
179 | struct ehea_q_skb_arr *q_skba, int rq_nr, | ||
180 | int num_wqes, int wqe_type, int packet_size) | ||
181 | { | ||
182 | struct net_device *dev = pr->port->netdev; | ||
183 | struct ehea_qp *qp = pr->qp; | ||
184 | struct sk_buff **skb_arr = q_skba->arr; | ||
185 | struct ehea_rwqe *rwqe; | ||
186 | int i, index, max_index_mask, fill_wqes; | ||
187 | int ret = 0; | ||
188 | |||
189 | fill_wqes = q_skba->os_skbs + num_wqes; | ||
190 | |||
191 | if (!fill_wqes) | ||
192 | return ret; | ||
193 | |||
194 | index = q_skba->index; | ||
195 | max_index_mask = q_skba->len - 1; | ||
196 | for (i = 0; i < fill_wqes; i++) { | ||
197 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); | ||
198 | if (!skb) { | ||
199 | ehea_error("%s: no mem for skb/%d wqes filled", | ||
200 | dev->name, i); | ||
201 | q_skba->os_skbs = fill_wqes - i; | ||
202 | ret = -ENOMEM; | ||
203 | break; | ||
204 | } | ||
205 | skb_reserve(skb, NET_IP_ALIGN); | ||
206 | |||
207 | skb_arr[index] = skb; | ||
208 | |||
209 | rwqe = ehea_get_next_rwqe(qp, rq_nr); | ||
210 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | ||
211 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); | ||
212 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; | ||
213 | rwqe->sg_list[0].vaddr = (u64)skb->data; | ||
214 | rwqe->sg_list[0].len = packet_size; | ||
215 | rwqe->data_segments = 1; | ||
216 | |||
217 | index++; | ||
218 | index &= max_index_mask; | ||
219 | } | ||
220 | q_skba->index = index; | ||
221 | |||
222 | /* Ring doorbell */ | ||
223 | iosync(); | ||
224 | if (rq_nr == 2) | ||
225 | ehea_update_rq2a(pr->qp, i); | ||
226 | else | ||
227 | ehea_update_rq3a(pr->qp, i); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | |||
233 | static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) | ||
234 | { | ||
235 | return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, | ||
236 | nr_of_wqes, EHEA_RWQE2_TYPE, | ||
237 | EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); | ||
238 | } | ||
239 | |||
240 | |||
241 | static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) | ||
242 | { | ||
243 | return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, | ||
244 | nr_of_wqes, EHEA_RWQE3_TYPE, | ||
245 | EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); | ||
246 | } | ||
247 | |||
248 | static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) | ||
249 | { | ||
250 | *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; | ||
251 | if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) | ||
252 | return 0; | ||
253 | if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && | ||
254 | (cqe->header_length == 0)) | ||
255 | return 0; | ||
256 | return -EINVAL; | ||
257 | } | ||
258 | |||
259 | static inline void ehea_fill_skb(struct net_device *dev, | ||
260 | struct sk_buff *skb, struct ehea_cqe *cqe) | ||
261 | { | ||
262 | int length = cqe->num_bytes_transfered - 4; /*remove CRC */ | ||
263 | |||
264 | skb_put(skb, length); | ||
265 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
266 | skb->protocol = eth_type_trans(skb, dev); | ||
267 | } | ||
268 | |||
269 | static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, | ||
270 | int arr_len, | ||
271 | struct ehea_cqe *cqe) | ||
272 | { | ||
273 | int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | ||
274 | struct sk_buff *skb; | ||
275 | void *pref; | ||
276 | int x; | ||
277 | |||
278 | x = skb_index + 1; | ||
279 | x &= (arr_len - 1); | ||
280 | |||
281 | pref = skb_array[x]; | ||
282 | prefetchw(pref); | ||
283 | prefetchw(pref + EHEA_CACHE_LINE); | ||
284 | |||
285 | pref = (skb_array[x]->data); | ||
286 | prefetch(pref); | ||
287 | prefetch(pref + EHEA_CACHE_LINE); | ||
288 | prefetch(pref + EHEA_CACHE_LINE * 2); | ||
289 | prefetch(pref + EHEA_CACHE_LINE * 3); | ||
290 | skb = skb_array[skb_index]; | ||
291 | skb_array[skb_index] = NULL; | ||
292 | return skb; | ||
293 | } | ||
294 | |||
295 | static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, | ||
296 | int arr_len, int wqe_index) | ||
297 | { | ||
298 | struct sk_buff *skb; | ||
299 | void *pref; | ||
300 | int x; | ||
301 | |||
302 | x = wqe_index + 1; | ||
303 | x &= (arr_len - 1); | ||
304 | |||
305 | pref = skb_array[x]; | ||
306 | prefetchw(pref); | ||
307 | prefetchw(pref + EHEA_CACHE_LINE); | ||
308 | |||
309 | pref = (skb_array[x]->data); | ||
310 | prefetchw(pref); | ||
311 | prefetchw(pref + EHEA_CACHE_LINE); | ||
312 | |||
313 | skb = skb_array[wqe_index]; | ||
314 | skb_array[wqe_index] = NULL; | ||
315 | return skb; | ||
316 | } | ||
317 | |||
318 | static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | ||
319 | struct ehea_cqe *cqe, int *processed_rq2, | ||
320 | int *processed_rq3) | ||
321 | { | ||
322 | struct sk_buff *skb; | ||
323 | |||
324 | if (netif_msg_rx_err(pr->port)) { | ||
325 | ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); | ||
326 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | ||
327 | } | ||
328 | |||
329 | if (rq == 2) { | ||
330 | *processed_rq2 += 1; | ||
331 | skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); | ||
332 | dev_kfree_skb(skb); | ||
333 | } else if (rq == 3) { | ||
334 | *processed_rq3 += 1; | ||
335 | skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); | ||
336 | dev_kfree_skb(skb); | ||
337 | } | ||
338 | |||
339 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { | ||
340 | ehea_error("Critical receive error. Resetting port."); | ||
341 | queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static int ehea_poll(struct net_device *dev, int *budget) | ||
349 | { | ||
350 | struct ehea_port *port = netdev_priv(dev); | ||
351 | struct ehea_port_res *pr = &port->port_res[0]; | ||
352 | struct ehea_qp *qp = pr->qp; | ||
353 | struct ehea_cqe *cqe; | ||
354 | struct sk_buff *skb; | ||
355 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | ||
356 | struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; | ||
357 | struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; | ||
358 | int skb_arr_rq1_len = pr->rq1_skba.len; | ||
359 | int skb_arr_rq2_len = pr->rq2_skba.len; | ||
360 | int skb_arr_rq3_len = pr->rq3_skba.len; | ||
361 | int processed, processed_rq1, processed_rq2, processed_rq3; | ||
362 | int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; | ||
363 | |||
364 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; | ||
365 | last_wqe_index = 0; | ||
366 | my_quota = min(*budget, dev->quota); | ||
367 | my_quota = min(my_quota, EHEA_POLL_MAX_RWQE); | ||
368 | |||
369 | /* rq0 is low latency RQ */ | ||
370 | cqe = ehea_poll_rq1(qp, &wqe_index); | ||
371 | while ((my_quota > 0) && cqe) { | ||
372 | ehea_inc_rq1(qp); | ||
373 | processed_rq1++; | ||
374 | processed++; | ||
375 | my_quota--; | ||
376 | if (netif_msg_rx_status(port)) | ||
377 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | ||
378 | |||
379 | last_wqe_index = wqe_index; | ||
380 | rmb(); | ||
381 | if (!ehea_check_cqe(cqe, &rq)) { | ||
382 | if (rq == 1) { /* LL RQ1 */ | ||
383 | skb = get_skb_by_index_ll(skb_arr_rq1, | ||
384 | skb_arr_rq1_len, | ||
385 | wqe_index); | ||
386 | if (unlikely(!skb)) { | ||
387 | if (netif_msg_rx_err(port)) | ||
388 | ehea_error("LL rq1: skb=NULL"); | ||
389 | skb = netdev_alloc_skb(dev, | ||
390 | EHEA_L_PKT_SIZE); | ||
391 | if (!skb) | ||
392 | break; | ||
393 | } | ||
394 | memcpy(skb->data, ((char*)cqe) + 64, | ||
395 | cqe->num_bytes_transfered - 4); | ||
396 | ehea_fill_skb(dev, skb, cqe); | ||
397 | } else if (rq == 2) { /* RQ2 */ | ||
398 | skb = get_skb_by_index(skb_arr_rq2, | ||
399 | skb_arr_rq2_len, cqe); | ||
400 | if (unlikely(!skb)) { | ||
401 | if (netif_msg_rx_err(port)) | ||
402 | ehea_error("rq2: skb=NULL"); | ||
403 | break; | ||
404 | } | ||
405 | ehea_fill_skb(dev, skb, cqe); | ||
406 | processed_rq2++; | ||
407 | } else { /* RQ3 */ | ||
408 | skb = get_skb_by_index(skb_arr_rq3, | ||
409 | skb_arr_rq3_len, cqe); | ||
410 | if (unlikely(!skb)) { | ||
411 | if (netif_msg_rx_err(port)) | ||
412 | ehea_error("rq3: skb=NULL"); | ||
413 | break; | ||
414 | } | ||
415 | ehea_fill_skb(dev, skb, cqe); | ||
416 | processed_rq3++; | ||
417 | } | ||
418 | |||
419 | if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) | ||
420 | vlan_hwaccel_receive_skb(skb, port->vgrp, | ||
421 | cqe->vlan_tag); | ||
422 | else | ||
423 | netif_receive_skb(skb); | ||
424 | |||
425 | } else { /* Error occured */ | ||
426 | pr->p_state.poll_receive_errors++; | ||
427 | port_reset = ehea_treat_poll_error(pr, rq, cqe, | ||
428 | &processed_rq2, | ||
429 | &processed_rq3); | ||
430 | if (port_reset) | ||
431 | break; | ||
432 | } | ||
433 | cqe = ehea_poll_rq1(qp, &wqe_index); | ||
434 | } | ||
435 | |||
436 | dev->quota -= processed; | ||
437 | *budget -= processed; | ||
438 | |||
439 | pr->p_state.ehea_poll += 1; | ||
440 | pr->rx_packets += processed; | ||
441 | |||
442 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); | ||
443 | ehea_refill_rq2(pr, processed_rq2); | ||
444 | ehea_refill_rq3(pr, processed_rq3); | ||
445 | |||
446 | intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); | ||
447 | |||
448 | if (!cqe || intreq) { | ||
449 | netif_rx_complete(dev); | ||
450 | ehea_reset_cq_ep(pr->recv_cq); | ||
451 | ehea_reset_cq_n1(pr->recv_cq); | ||
452 | cqe = hw_qeit_get_valid(&qp->hw_rqueue1); | ||
453 | if (!cqe || intreq) | ||
454 | return 0; | ||
455 | if (!netif_rx_reschedule(dev, my_quota)) | ||
456 | return 0; | ||
457 | } | ||
458 | return 1; | ||
459 | } | ||
460 | |||
461 | void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) | ||
462 | { | ||
463 | struct sk_buff *skb; | ||
464 | int index, max_index_mask, i; | ||
465 | |||
466 | index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | ||
467 | max_index_mask = pr->sq_skba.len - 1; | ||
468 | for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) { | ||
469 | skb = pr->sq_skba.arr[index]; | ||
470 | if (likely(skb)) { | ||
471 | dev_kfree_skb(skb); | ||
472 | pr->sq_skba.arr[index] = NULL; | ||
473 | } else { | ||
474 | ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d", | ||
475 | cqe->wr_id, i, index); | ||
476 | } | ||
477 | index--; | ||
478 | index &= max_index_mask; | ||
479 | } | ||
480 | } | ||
481 | |||
482 | #define MAX_SENDCOMP_QUOTA 400 | ||
483 | void ehea_send_irq_tasklet(unsigned long data) | ||
484 | { | ||
485 | struct ehea_port_res *pr = (struct ehea_port_res*)data; | ||
486 | struct ehea_cq *send_cq = pr->send_cq; | ||
487 | struct ehea_cqe *cqe; | ||
488 | int quota = MAX_SENDCOMP_QUOTA; | ||
489 | int cqe_counter = 0; | ||
490 | int swqe_av = 0; | ||
491 | unsigned long flags; | ||
492 | |||
493 | do { | ||
494 | cqe = ehea_poll_cq(send_cq); | ||
495 | if (!cqe) { | ||
496 | ehea_reset_cq_ep(send_cq); | ||
497 | ehea_reset_cq_n1(send_cq); | ||
498 | cqe = ehea_poll_cq(send_cq); | ||
499 | if (!cqe) | ||
500 | break; | ||
501 | } | ||
502 | cqe_counter++; | ||
503 | rmb(); | ||
504 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | ||
505 | ehea_error("Send Completion Error: Resetting port"); | ||
506 | if (netif_msg_tx_err(pr->port)) | ||
507 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | ||
508 | queue_work(pr->port->adapter->ehea_wq, | ||
509 | &pr->port->reset_task); | ||
510 | break; | ||
511 | } | ||
512 | |||
513 | if (netif_msg_tx_done(pr->port)) | ||
514 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | ||
515 | |||
516 | if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) | ||
517 | == EHEA_SWQE2_TYPE)) | ||
518 | free_sent_skbs(cqe, pr); | ||
519 | |||
520 | swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); | ||
521 | quota--; | ||
522 | } while (quota > 0); | ||
523 | |||
524 | ehea_update_feca(send_cq, cqe_counter); | ||
525 | atomic_add(swqe_av, &pr->swqe_avail); | ||
526 | |||
527 | spin_lock_irqsave(&pr->netif_queue, flags); | ||
528 | if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) | ||
529 | >= pr->swqe_refill_th)) { | ||
530 | netif_wake_queue(pr->port->netdev); | ||
531 | pr->queue_stopped = 0; | ||
532 | } | ||
533 | spin_unlock_irqrestore(&pr->netif_queue, flags); | ||
534 | |||
535 | if (unlikely(cqe)) | ||
536 | tasklet_hi_schedule(&pr->send_comp_task); | ||
537 | } | ||
538 | |||
539 | static irqreturn_t ehea_send_irq_handler(int irq, void *param, | ||
540 | struct pt_regs *regs) | ||
541 | { | ||
542 | struct ehea_port_res *pr = param; | ||
543 | tasklet_hi_schedule(&pr->send_comp_task); | ||
544 | return IRQ_HANDLED; | ||
545 | } | ||
546 | |||
547 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param, | ||
548 | struct pt_regs *regs) | ||
549 | { | ||
550 | struct ehea_port_res *pr = param; | ||
551 | struct ehea_port *port = pr->port; | ||
552 | netif_rx_schedule(port->netdev); | ||
553 | return IRQ_HANDLED; | ||
554 | } | ||
555 | |||
556 | static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param, | ||
557 | struct pt_regs *regs) | ||
558 | { | ||
559 | struct ehea_port *port = param; | ||
560 | struct ehea_eqe *eqe; | ||
561 | u32 qp_token; | ||
562 | |||
563 | eqe = ehea_poll_eq(port->qp_eq); | ||
564 | ehea_debug("eqe=%p", eqe); | ||
565 | while (eqe) { | ||
566 | ehea_debug("*eqe=%lx", *(u64*)eqe); | ||
567 | eqe = ehea_poll_eq(port->qp_eq); | ||
568 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); | ||
569 | ehea_debug("next eqe=%p", eqe); | ||
570 | } | ||
571 | |||
572 | return IRQ_HANDLED; | ||
573 | } | ||
574 | |||
575 | static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, | ||
576 | int logical_port) | ||
577 | { | ||
578 | int i; | ||
579 | |||
580 | for (i = 0; i < adapter->num_ports; i++) | ||
581 | if (adapter->port[i]->logical_port_id == logical_port) | ||
582 | return adapter->port[i]; | ||
583 | return NULL; | ||
584 | } | ||
585 | |||
586 | int ehea_sense_port_attr(struct ehea_port *port) | ||
587 | { | ||
588 | int ret; | ||
589 | u64 hret; | ||
590 | struct hcp_ehea_port_cb0 *cb0; | ||
591 | |||
592 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
593 | if (!cb0) { | ||
594 | ehea_error("no mem for cb0"); | ||
595 | ret = -ENOMEM; | ||
596 | goto out; | ||
597 | } | ||
598 | |||
599 | hret = ehea_h_query_ehea_port(port->adapter->handle, | ||
600 | port->logical_port_id, H_PORT_CB0, | ||
601 | EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), | ||
602 | cb0); | ||
603 | if (hret != H_SUCCESS) { | ||
604 | ret = -EIO; | ||
605 | goto out_free; | ||
606 | } | ||
607 | |||
608 | /* MAC address */ | ||
609 | port->mac_addr = cb0->port_mac_addr << 16; | ||
610 | |||
611 | if (!is_valid_ether_addr((u8*)&port->mac_addr)) { | ||
612 | ret = -EADDRNOTAVAIL; | ||
613 | goto out_free; | ||
614 | } | ||
615 | |||
616 | /* Port speed */ | ||
617 | switch (cb0->port_speed) { | ||
618 | case H_SPEED_10M_H: | ||
619 | port->port_speed = EHEA_SPEED_10M; | ||
620 | port->full_duplex = 0; | ||
621 | break; | ||
622 | case H_SPEED_10M_F: | ||
623 | port->port_speed = EHEA_SPEED_10M; | ||
624 | port->full_duplex = 1; | ||
625 | break; | ||
626 | case H_SPEED_100M_H: | ||
627 | port->port_speed = EHEA_SPEED_100M; | ||
628 | port->full_duplex = 0; | ||
629 | break; | ||
630 | case H_SPEED_100M_F: | ||
631 | port->port_speed = EHEA_SPEED_100M; | ||
632 | port->full_duplex = 1; | ||
633 | break; | ||
634 | case H_SPEED_1G_F: | ||
635 | port->port_speed = EHEA_SPEED_1G; | ||
636 | port->full_duplex = 1; | ||
637 | break; | ||
638 | case H_SPEED_10G_F: | ||
639 | port->port_speed = EHEA_SPEED_10G; | ||
640 | port->full_duplex = 1; | ||
641 | break; | ||
642 | default: | ||
643 | port->port_speed = 0; | ||
644 | port->full_duplex = 0; | ||
645 | break; | ||
646 | } | ||
647 | |||
648 | /* Number of default QPs */ | ||
649 | port->num_def_qps = cb0->num_default_qps; | ||
650 | |||
651 | if (!port->num_def_qps) { | ||
652 | ret = -EINVAL; | ||
653 | goto out_free; | ||
654 | } | ||
655 | |||
656 | if (port->num_def_qps >= EHEA_NUM_TX_QP) | ||
657 | port->num_add_tx_qps = 0; | ||
658 | else | ||
659 | port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; | ||
660 | |||
661 | ret = 0; | ||
662 | out_free: | ||
663 | if (ret || netif_msg_probe(port)) | ||
664 | ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); | ||
665 | kfree(cb0); | ||
666 | out: | ||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | ||
671 | { | ||
672 | struct hcp_ehea_port_cb4 *cb4; | ||
673 | u64 hret; | ||
674 | int ret = 0; | ||
675 | |||
676 | cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
677 | if (!cb4) { | ||
678 | ehea_error("no mem for cb4"); | ||
679 | ret = -ENOMEM; | ||
680 | goto out; | ||
681 | } | ||
682 | |||
683 | cb4->port_speed = port_speed; | ||
684 | |||
685 | netif_carrier_off(port->netdev); | ||
686 | |||
687 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | ||
688 | port->logical_port_id, | ||
689 | H_PORT_CB4, H_PORT_CB4_SPEED, cb4); | ||
690 | if (hret == H_SUCCESS) { | ||
691 | port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; | ||
692 | |||
693 | hret = ehea_h_query_ehea_port(port->adapter->handle, | ||
694 | port->logical_port_id, | ||
695 | H_PORT_CB4, H_PORT_CB4_SPEED, | ||
696 | cb4); | ||
697 | if (hret == H_SUCCESS) { | ||
698 | switch (cb4->port_speed) { | ||
699 | case H_SPEED_10M_H: | ||
700 | port->port_speed = EHEA_SPEED_10M; | ||
701 | port->full_duplex = 0; | ||
702 | break; | ||
703 | case H_SPEED_10M_F: | ||
704 | port->port_speed = EHEA_SPEED_10M; | ||
705 | port->full_duplex = 1; | ||
706 | break; | ||
707 | case H_SPEED_100M_H: | ||
708 | port->port_speed = EHEA_SPEED_100M; | ||
709 | port->full_duplex = 0; | ||
710 | break; | ||
711 | case H_SPEED_100M_F: | ||
712 | port->port_speed = EHEA_SPEED_100M; | ||
713 | port->full_duplex = 1; | ||
714 | break; | ||
715 | case H_SPEED_1G_F: | ||
716 | port->port_speed = EHEA_SPEED_1G; | ||
717 | port->full_duplex = 1; | ||
718 | break; | ||
719 | case H_SPEED_10G_F: | ||
720 | port->port_speed = EHEA_SPEED_10G; | ||
721 | port->full_duplex = 1; | ||
722 | break; | ||
723 | default: | ||
724 | port->port_speed = 0; | ||
725 | port->full_duplex = 0; | ||
726 | break; | ||
727 | } | ||
728 | } else { | ||
729 | ehea_error("Failed sensing port speed"); | ||
730 | ret = -EIO; | ||
731 | } | ||
732 | } else { | ||
733 | if (hret == H_AUTHORITY) { | ||
734 | ehea_info("Hypervisor denied setting port speed. Either" | ||
735 | " this partition is not authorized to set " | ||
736 | "port speed or another partition has modified" | ||
737 | " port speed first."); | ||
738 | ret = -EPERM; | ||
739 | } else { | ||
740 | ret = -EIO; | ||
741 | ehea_error("Failed setting port speed"); | ||
742 | } | ||
743 | } | ||
744 | netif_carrier_on(port->netdev); | ||
745 | kfree(cb4); | ||
746 | out: | ||
747 | return ret; | ||
748 | } | ||
749 | |||
750 | static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | ||
751 | { | ||
752 | int ret; | ||
753 | u8 ec; | ||
754 | u8 portnum; | ||
755 | struct ehea_port *port; | ||
756 | |||
757 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); | ||
758 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); | ||
759 | port = ehea_get_port(adapter, portnum); | ||
760 | |||
761 | switch (ec) { | ||
762 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ | ||
763 | |||
764 | if (!port) { | ||
765 | ehea_error("unknown portnum %x", portnum); | ||
766 | break; | ||
767 | } | ||
768 | |||
769 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { | ||
770 | if (!netif_carrier_ok(port->netdev)) { | ||
771 | ret = ehea_sense_port_attr( | ||
772 | adapter->port[portnum]); | ||
773 | if (ret) { | ||
774 | ehea_error("failed resensing port " | ||
775 | "attributes"); | ||
776 | break; | ||
777 | } | ||
778 | |||
779 | if (netif_msg_link(port)) | ||
780 | ehea_info("%s: Logical port up: %dMbps " | ||
781 | "%s Duplex", | ||
782 | port->netdev->name, | ||
783 | port->port_speed, | ||
784 | port->full_duplex == | ||
785 | 1 ? "Full" : "Half"); | ||
786 | |||
787 | netif_carrier_on(port->netdev); | ||
788 | netif_wake_queue(port->netdev); | ||
789 | } | ||
790 | } else | ||
791 | if (netif_carrier_ok(port->netdev)) { | ||
792 | if (netif_msg_link(port)) | ||
793 | ehea_info("%s: Logical port down", | ||
794 | port->netdev->name); | ||
795 | netif_carrier_off(port->netdev); | ||
796 | netif_stop_queue(port->netdev); | ||
797 | } | ||
798 | |||
799 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { | ||
800 | if (netif_msg_link(port)) | ||
801 | ehea_info("%s: Physical port up", | ||
802 | port->netdev->name); | ||
803 | } else { | ||
804 | if (netif_msg_link(port)) | ||
805 | ehea_info("%s: Physical port down", | ||
806 | port->netdev->name); | ||
807 | } | ||
808 | |||
809 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) | ||
810 | ehea_info("External switch port is primary port"); | ||
811 | else | ||
812 | ehea_info("External switch port is backup port"); | ||
813 | |||
814 | break; | ||
815 | case EHEA_EC_ADAPTER_MALFUNC: | ||
816 | ehea_error("Adapter malfunction"); | ||
817 | break; | ||
818 | case EHEA_EC_PORT_MALFUNC: | ||
819 | ehea_info("Port malfunction: Device: %s", port->netdev->name); | ||
820 | netif_carrier_off(port->netdev); | ||
821 | netif_stop_queue(port->netdev); | ||
822 | break; | ||
823 | default: | ||
824 | ehea_error("unknown event code %x", ec); | ||
825 | break; | ||
826 | } | ||
827 | } | ||
828 | |||
829 | static void ehea_neq_tasklet(unsigned long data) | ||
830 | { | ||
831 | struct ehea_adapter *adapter = (struct ehea_adapter*)data; | ||
832 | struct ehea_eqe *eqe; | ||
833 | u64 event_mask; | ||
834 | |||
835 | eqe = ehea_poll_eq(adapter->neq); | ||
836 | ehea_debug("eqe=%p", eqe); | ||
837 | |||
838 | while (eqe) { | ||
839 | ehea_debug("*eqe=%lx", eqe->entry); | ||
840 | ehea_parse_eqe(adapter, eqe->entry); | ||
841 | eqe = ehea_poll_eq(adapter->neq); | ||
842 | ehea_debug("next eqe=%p", eqe); | ||
843 | } | ||
844 | |||
845 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) | ||
846 | | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) | ||
847 | | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); | ||
848 | |||
849 | ehea_h_reset_events(adapter->handle, | ||
850 | adapter->neq->fw_handle, event_mask); | ||
851 | } | ||
852 | |||
853 | static irqreturn_t ehea_interrupt_neq(int irq, void *param, | ||
854 | struct pt_regs *regs) | ||
855 | { | ||
856 | struct ehea_adapter *adapter = param; | ||
857 | tasklet_hi_schedule(&adapter->neq_tasklet); | ||
858 | return IRQ_HANDLED; | ||
859 | } | ||
860 | |||
861 | |||
862 | static int ehea_fill_port_res(struct ehea_port_res *pr) | ||
863 | { | ||
864 | int ret; | ||
865 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | ||
866 | |||
867 | ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | ||
868 | - init_attr->act_nr_rwqes_rq2 | ||
869 | - init_attr->act_nr_rwqes_rq3 - 1); | ||
870 | |||
871 | ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | ||
872 | |||
873 | ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); | ||
874 | |||
875 | return ret; | ||
876 | } | ||
877 | |||
878 | static int ehea_reg_interrupts(struct net_device *dev) | ||
879 | { | ||
880 | struct ehea_port *port = netdev_priv(dev); | ||
881 | struct ehea_port_res *pr; | ||
882 | int i, ret; | ||
883 | |||
884 | for (i = 0; i < port->num_def_qps; i++) { | ||
885 | pr = &port->port_res[i]; | ||
886 | snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1 | ||
887 | , "%s-recv%d", dev->name, i); | ||
888 | ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1, | ||
889 | ehea_recv_irq_handler, | ||
890 | SA_INTERRUPT, pr->int_recv_name, pr); | ||
891 | if (ret) { | ||
892 | ehea_error("failed registering irq for ehea_recv_int:" | ||
893 | "port_res_nr:%d, ist=%X", i, | ||
894 | pr->recv_eq->attr.ist1); | ||
895 | goto out_free_seq; | ||
896 | } | ||
897 | if (netif_msg_ifup(port)) | ||
898 | ehea_info("irq_handle 0x%X for funct ehea_recv_int %d " | ||
899 | "registered", pr->recv_eq->attr.ist1, i); | ||
900 | } | ||
901 | |||
902 | snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", | ||
903 | dev->name); | ||
904 | |||
905 | ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1, | ||
906 | ehea_qp_aff_irq_handler, | ||
907 | SA_INTERRUPT, port->int_aff_name, port); | ||
908 | if (ret) { | ||
909 | ehea_error("failed registering irq for qp_aff_irq_handler:" | ||
910 | "ist=%X", port->qp_eq->attr.ist1); | ||
911 | goto out_free_qpeq; | ||
912 | } | ||
913 | |||
914 | if (netif_msg_ifup(port)) | ||
915 | ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " | ||
916 | "registered", port->qp_eq->attr.ist1); | ||
917 | |||
918 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | ||
919 | pr = &port->port_res[i]; | ||
920 | snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, | ||
921 | "%s-send%d", dev->name, i); | ||
922 | ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, | ||
923 | ehea_send_irq_handler, | ||
924 | SA_INTERRUPT, pr->int_send_name, | ||
925 | pr); | ||
926 | if (ret) { | ||
927 | ehea_error("failed registering irq for ehea_send " | ||
928 | "port_res_nr:%d, ist=%X", i, | ||
929 | pr->send_eq->attr.ist1); | ||
930 | goto out_free_req; | ||
931 | } | ||
932 | if (netif_msg_ifup(port)) | ||
933 | ehea_info("irq_handle 0x%X for function ehea_send_int " | ||
934 | "%d registered", pr->send_eq->attr.ist1, i); | ||
935 | } | ||
936 | out: | ||
937 | return ret; | ||
938 | |||
939 | out_free_req: | ||
940 | while (--i >= 0) { | ||
941 | u32 ist = port->port_res[i].send_eq->attr.ist1; | ||
942 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | ||
943 | } | ||
944 | out_free_qpeq: | ||
945 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | ||
946 | i = port->num_def_qps; | ||
947 | out_free_seq: | ||
948 | while (--i >= 0) { | ||
949 | u32 ist = port->port_res[i].recv_eq->attr.ist1; | ||
950 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | ||
951 | } | ||
952 | goto out; | ||
953 | } | ||
954 | |||
955 | static void ehea_free_interrupts(struct net_device *dev) | ||
956 | { | ||
957 | struct ehea_port *port = netdev_priv(dev); | ||
958 | struct ehea_port_res *pr; | ||
959 | int i; | ||
960 | |||
961 | /* send */ | ||
962 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | ||
963 | pr = &port->port_res[i]; | ||
964 | ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); | ||
965 | if (netif_msg_intr(port)) | ||
966 | ehea_info("free send irq for res %d with handle 0x%X", | ||
967 | i, pr->send_eq->attr.ist1); | ||
968 | } | ||
969 | |||
970 | /* receive */ | ||
971 | for (i = 0; i < port->num_def_qps; i++) { | ||
972 | pr = &port->port_res[i]; | ||
973 | ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr); | ||
974 | if (netif_msg_intr(port)) | ||
975 | ehea_info("free recv irq for res %d with handle 0x%X", | ||
976 | i, pr->recv_eq->attr.ist1); | ||
977 | } | ||
978 | |||
979 | /* associated events */ | ||
980 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | ||
981 | if (netif_msg_intr(port)) | ||
982 | ehea_info("associated event interrupt for handle 0x%X freed", | ||
983 | port->qp_eq->attr.ist1); | ||
984 | } | ||
985 | |||
986 | static int ehea_configure_port(struct ehea_port *port) | ||
987 | { | ||
988 | int ret, i; | ||
989 | u64 hret, mask; | ||
990 | struct hcp_ehea_port_cb0 *cb0; | ||
991 | |||
992 | ret = -ENOMEM; | ||
993 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
994 | if (!cb0) | ||
995 | goto out; | ||
996 | |||
997 | cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) | ||
998 | | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) | ||
999 | | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) | ||
1000 | | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) | ||
1001 | | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, | ||
1002 | PXLY_RC_VLAN_FILTER) | ||
1003 | | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); | ||
1004 | |||
1005 | for (i = 0; i < port->num_def_qps; i++) | ||
1006 | cb0->default_qpn_arr[i] = port->port_res[i].qp->init_attr.qp_nr; | ||
1007 | |||
1008 | if (netif_msg_ifup(port)) | ||
1009 | ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); | ||
1010 | |||
1011 | mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) | ||
1012 | | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); | ||
1013 | |||
1014 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | ||
1015 | port->logical_port_id, | ||
1016 | H_PORT_CB0, mask, cb0); | ||
1017 | ret = -EIO; | ||
1018 | if (hret != H_SUCCESS) | ||
1019 | goto out_free; | ||
1020 | |||
1021 | ret = 0; | ||
1022 | |||
1023 | out_free: | ||
1024 | kfree(cb0); | ||
1025 | out: | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
1029 | static int ehea_gen_smrs(struct ehea_port_res *pr) | ||
1030 | { | ||
1031 | u64 hret; | ||
1032 | struct ehea_adapter *adapter = pr->port->adapter; | ||
1033 | |||
1034 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | ||
1035 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | ||
1036 | adapter->pd, &pr->send_mr); | ||
1037 | if (hret != H_SUCCESS) | ||
1038 | goto out; | ||
1039 | |||
1040 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | ||
1041 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | ||
1042 | adapter->pd, &pr->recv_mr); | ||
1043 | if (hret != H_SUCCESS) | ||
1044 | goto out_freeres; | ||
1045 | |||
1046 | return 0; | ||
1047 | |||
1048 | out_freeres: | ||
1049 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | ||
1050 | if (hret != H_SUCCESS) | ||
1051 | ehea_error("failed freeing SMR"); | ||
1052 | out: | ||
1053 | return -EIO; | ||
1054 | } | ||
1055 | |||
1056 | static int ehea_rem_smrs(struct ehea_port_res *pr) | ||
1057 | { | ||
1058 | struct ehea_adapter *adapter = pr->port->adapter; | ||
1059 | int ret = 0; | ||
1060 | u64 hret; | ||
1061 | |||
1062 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | ||
1063 | if (hret != H_SUCCESS) { | ||
1064 | ret = -EIO; | ||
1065 | ehea_error("failed freeing send SMR for pr=%p", pr); | ||
1066 | } | ||
1067 | |||
1068 | hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle); | ||
1069 | if (hret != H_SUCCESS) { | ||
1070 | ret = -EIO; | ||
1071 | ehea_error("failed freeing recv SMR for pr=%p", pr); | ||
1072 | } | ||
1073 | |||
1074 | return ret; | ||
1075 | } | ||
1076 | |||
1077 | static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) | ||
1078 | { | ||
1079 | int arr_size = sizeof(void*) * max_q_entries; | ||
1080 | |||
1081 | q_skba->arr = vmalloc(arr_size); | ||
1082 | if (!q_skba->arr) | ||
1083 | return -ENOMEM; | ||
1084 | |||
1085 | memset(q_skba->arr, 0, arr_size); | ||
1086 | |||
1087 | q_skba->len = max_q_entries; | ||
1088 | q_skba->index = 0; | ||
1089 | q_skba->os_skbs = 0; | ||
1090 | |||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | ||
1095 | struct port_res_cfg *pr_cfg, int queue_token) | ||
1096 | { | ||
1097 | struct ehea_adapter *adapter = port->adapter; | ||
1098 | enum ehea_eq_type eq_type = EHEA_EQ; | ||
1099 | struct ehea_qp_init_attr *init_attr = NULL; | ||
1100 | int ret = -EIO; | ||
1101 | |||
1102 | memset(pr, 0, sizeof(struct ehea_port_res)); | ||
1103 | |||
1104 | pr->port = port; | ||
1105 | spin_lock_init(&pr->send_lock); | ||
1106 | spin_lock_init(&pr->recv_lock); | ||
1107 | spin_lock_init(&pr->xmit_lock); | ||
1108 | spin_lock_init(&pr->netif_queue); | ||
1109 | |||
1110 | pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | ||
1111 | if (!pr->recv_eq) { | ||
1112 | ehea_error("create_eq failed (recv_eq)"); | ||
1113 | goto out_free; | ||
1114 | } | ||
1115 | |||
1116 | pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | ||
1117 | if (!pr->send_eq) { | ||
1118 | ehea_error("create_eq failed (send_eq)"); | ||
1119 | goto out_free; | ||
1120 | } | ||
1121 | |||
1122 | pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, | ||
1123 | pr->recv_eq->fw_handle, | ||
1124 | port->logical_port_id); | ||
1125 | if (!pr->recv_cq) { | ||
1126 | ehea_error("create_cq failed (cq_recv)"); | ||
1127 | goto out_free; | ||
1128 | } | ||
1129 | |||
1130 | pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, | ||
1131 | pr->send_eq->fw_handle, | ||
1132 | port->logical_port_id); | ||
1133 | if (!pr->send_cq) { | ||
1134 | ehea_error("create_cq failed (cq_send)"); | ||
1135 | goto out_free; | ||
1136 | } | ||
1137 | |||
1138 | if (netif_msg_ifup(port)) | ||
1139 | ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", | ||
1140 | pr->send_cq->attr.act_nr_of_cqes, | ||
1141 | pr->recv_cq->attr.act_nr_of_cqes); | ||
1142 | |||
1143 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | ||
1144 | if (!init_attr) { | ||
1145 | ret = -ENOMEM; | ||
1146 | ehea_error("no mem for ehea_qp_init_attr"); | ||
1147 | goto out_free; | ||
1148 | } | ||
1149 | |||
1150 | init_attr->low_lat_rq1 = 1; | ||
1151 | init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ | ||
1152 | init_attr->rq_count = 3; | ||
1153 | init_attr->qp_token = queue_token; | ||
1154 | init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; | ||
1155 | init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; | ||
1156 | init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; | ||
1157 | init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; | ||
1158 | init_attr->wqe_size_enc_sq = EHEA_SG_SQ; | ||
1159 | init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; | ||
1160 | init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; | ||
1161 | init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; | ||
1162 | init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; | ||
1163 | init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; | ||
1164 | init_attr->port_nr = port->logical_port_id; | ||
1165 | init_attr->send_cq_handle = pr->send_cq->fw_handle; | ||
1166 | init_attr->recv_cq_handle = pr->recv_cq->fw_handle; | ||
1167 | init_attr->aff_eq_handle = port->qp_eq->fw_handle; | ||
1168 | |||
1169 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); | ||
1170 | if (!pr->qp) { | ||
1171 | ehea_error("create_qp failed"); | ||
1172 | ret = -EIO; | ||
1173 | goto out_free; | ||
1174 | } | ||
1175 | |||
1176 | if (netif_msg_ifup(port)) | ||
1177 | ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " | ||
1178 | "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, | ||
1179 | init_attr->act_nr_send_wqes, | ||
1180 | init_attr->act_nr_rwqes_rq1, | ||
1181 | init_attr->act_nr_rwqes_rq2, | ||
1182 | init_attr->act_nr_rwqes_rq3); | ||
1183 | |||
1184 | ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1); | ||
1185 | ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); | ||
1186 | ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); | ||
1187 | ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); | ||
1188 | if (ret) | ||
1189 | goto out_free; | ||
1190 | |||
1191 | pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; | ||
1192 | if (ehea_gen_smrs(pr) != 0) { | ||
1193 | ret = -EIO; | ||
1194 | goto out_free; | ||
1195 | } | ||
1196 | tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, | ||
1197 | (unsigned long)pr); | ||
1198 | atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); | ||
1199 | |||
1200 | kfree(init_attr); | ||
1201 | ret = 0; | ||
1202 | goto out; | ||
1203 | |||
1204 | out_free: | ||
1205 | kfree(init_attr); | ||
1206 | vfree(pr->sq_skba.arr); | ||
1207 | vfree(pr->rq1_skba.arr); | ||
1208 | vfree(pr->rq2_skba.arr); | ||
1209 | vfree(pr->rq3_skba.arr); | ||
1210 | ehea_destroy_qp(pr->qp); | ||
1211 | ehea_destroy_cq(pr->send_cq); | ||
1212 | ehea_destroy_cq(pr->recv_cq); | ||
1213 | ehea_destroy_eq(pr->send_eq); | ||
1214 | ehea_destroy_eq(pr->recv_eq); | ||
1215 | out: | ||
1216 | return ret; | ||
1217 | } | ||
1218 | |||
1219 | static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | ||
1220 | { | ||
1221 | int ret, i; | ||
1222 | |||
1223 | ret = ehea_destroy_qp(pr->qp); | ||
1224 | |||
1225 | if (!ret) { | ||
1226 | ehea_destroy_cq(pr->send_cq); | ||
1227 | ehea_destroy_cq(pr->recv_cq); | ||
1228 | ehea_destroy_eq(pr->send_eq); | ||
1229 | ehea_destroy_eq(pr->recv_eq); | ||
1230 | |||
1231 | for (i = 0; i < pr->rq1_skba.len; i++) | ||
1232 | if (pr->rq1_skba.arr[i]) | ||
1233 | dev_kfree_skb(pr->rq1_skba.arr[i]); | ||
1234 | |||
1235 | for (i = 0; i < pr->rq2_skba.len; i++) | ||
1236 | if (pr->rq2_skba.arr[i]) | ||
1237 | dev_kfree_skb(pr->rq2_skba.arr[i]); | ||
1238 | |||
1239 | for (i = 0; i < pr->rq3_skba.len; i++) | ||
1240 | if (pr->rq3_skba.arr[i]) | ||
1241 | dev_kfree_skb(pr->rq3_skba.arr[i]); | ||
1242 | |||
1243 | for (i = 0; i < pr->sq_skba.len; i++) | ||
1244 | if (pr->sq_skba.arr[i]) | ||
1245 | dev_kfree_skb(pr->sq_skba.arr[i]); | ||
1246 | |||
1247 | vfree(pr->rq1_skba.arr); | ||
1248 | vfree(pr->rq2_skba.arr); | ||
1249 | vfree(pr->rq3_skba.arr); | ||
1250 | vfree(pr->sq_skba.arr); | ||
1251 | ret = ehea_rem_smrs(pr); | ||
1252 | } | ||
1253 | return ret; | ||
1254 | } | ||
1255 | |||
1256 | /* | ||
1257 | * The write_* functions store information in swqe which is used by | ||
1258 | * the hardware to calculate the ip/tcp/udp checksum | ||
1259 | */ | ||
1260 | |||
1261 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | ||
1262 | const struct sk_buff *skb) | ||
1263 | { | ||
1264 | swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); | ||
1265 | swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); | ||
1266 | } | ||
1267 | |||
1268 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | ||
1269 | const struct sk_buff *skb) | ||
1270 | { | ||
1271 | swqe->tcp_offset = | ||
1272 | (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); | ||
1273 | |||
1274 | swqe->tcp_end = (u16)skb->len - 1; | ||
1275 | } | ||
1276 | |||
1277 | static inline void write_udp_offset_end(struct ehea_swqe *swqe, | ||
1278 | const struct sk_buff *skb) | ||
1279 | { | ||
1280 | swqe->tcp_offset = | ||
1281 | (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); | ||
1282 | |||
1283 | swqe->tcp_end = (u16)skb->len - 1; | ||
1284 | } | ||
1285 | |||
1286 | |||
1287 | static void write_swqe2_TSO(struct sk_buff *skb, | ||
1288 | struct ehea_swqe *swqe, u32 lkey) | ||
1289 | { | ||
1290 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | ||
1291 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | ||
1292 | int skb_data_size = skb->len - skb->data_len; | ||
1293 | int headersize; | ||
1294 | u64 tmp_addr; | ||
1295 | |||
1296 | /* Packet is TCP with TSO enabled */ | ||
1297 | swqe->tx_control |= EHEA_SWQE_TSO; | ||
1298 | swqe->mss = skb_shinfo(skb)->gso_size; | ||
1299 | /* copy only eth/ip/tcp headers to immediate data and | ||
1300 | * the rest of skb->data to sg1entry | ||
1301 | */ | ||
1302 | headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); | ||
1303 | |||
1304 | skb_data_size = skb->len - skb->data_len; | ||
1305 | |||
1306 | if (skb_data_size >= headersize) { | ||
1307 | /* copy immediate data */ | ||
1308 | memcpy(imm_data, skb->data, headersize); | ||
1309 | swqe->immediate_data_length = headersize; | ||
1310 | |||
1311 | if (skb_data_size > headersize) { | ||
1312 | /* set sg1entry data */ | ||
1313 | sg1entry->l_key = lkey; | ||
1314 | sg1entry->len = skb_data_size - headersize; | ||
1315 | |||
1316 | tmp_addr = (u64)(skb->data + headersize); | ||
1317 | sg1entry->vaddr = tmp_addr; | ||
1318 | swqe->descriptors++; | ||
1319 | } | ||
1320 | } else | ||
1321 | ehea_error("cannot handle fragmented headers"); | ||
1322 | } | ||
1323 | |||
1324 | static void write_swqe2_nonTSO(struct sk_buff *skb, | ||
1325 | struct ehea_swqe *swqe, u32 lkey) | ||
1326 | { | ||
1327 | int skb_data_size = skb->len - skb->data_len; | ||
1328 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | ||
1329 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | ||
1330 | u64 tmp_addr; | ||
1331 | |||
1332 | /* Packet is any nonTSO type | ||
1333 | * | ||
1334 | * Copy as much as possible skb->data to immediate data and | ||
1335 | * the rest to sg1entry | ||
1336 | */ | ||
1337 | if (skb_data_size >= SWQE2_MAX_IMM) { | ||
1338 | /* copy immediate data */ | ||
1339 | memcpy(imm_data, skb->data, SWQE2_MAX_IMM); | ||
1340 | |||
1341 | swqe->immediate_data_length = SWQE2_MAX_IMM; | ||
1342 | |||
1343 | if (skb_data_size > SWQE2_MAX_IMM) { | ||
1344 | /* copy sg1entry data */ | ||
1345 | sg1entry->l_key = lkey; | ||
1346 | sg1entry->len = skb_data_size - SWQE2_MAX_IMM; | ||
1347 | tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); | ||
1348 | sg1entry->vaddr = tmp_addr; | ||
1349 | swqe->descriptors++; | ||
1350 | } | ||
1351 | } else { | ||
1352 | memcpy(imm_data, skb->data, skb_data_size); | ||
1353 | swqe->immediate_data_length = skb_data_size; | ||
1354 | } | ||
1355 | } | ||
1356 | |||
1357 | static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, | ||
1358 | struct ehea_swqe *swqe, u32 lkey) | ||
1359 | { | ||
1360 | struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; | ||
1361 | skb_frag_t *frag; | ||
1362 | int nfrags, sg1entry_contains_frag_data, i; | ||
1363 | u64 tmp_addr; | ||
1364 | |||
1365 | nfrags = skb_shinfo(skb)->nr_frags; | ||
1366 | sg1entry = &swqe->u.immdata_desc.sg_entry; | ||
1367 | sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list; | ||
1368 | swqe->descriptors = 0; | ||
1369 | sg1entry_contains_frag_data = 0; | ||
1370 | |||
1371 | if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) | ||
1372 | write_swqe2_TSO(skb, swqe, lkey); | ||
1373 | else | ||
1374 | write_swqe2_nonTSO(skb, swqe, lkey); | ||
1375 | |||
1376 | /* write descriptors */ | ||
1377 | if (nfrags > 0) { | ||
1378 | if (swqe->descriptors == 0) { | ||
1379 | /* sg1entry not yet used */ | ||
1380 | frag = &skb_shinfo(skb)->frags[0]; | ||
1381 | |||
1382 | /* copy sg1entry data */ | ||
1383 | sg1entry->l_key = lkey; | ||
1384 | sg1entry->len = frag->size; | ||
1385 | tmp_addr = (u64)(page_address(frag->page) | ||
1386 | + frag->page_offset); | ||
1387 | sg1entry->vaddr = tmp_addr; | ||
1388 | swqe->descriptors++; | ||
1389 | sg1entry_contains_frag_data = 1; | ||
1390 | } | ||
1391 | |||
1392 | for (i = sg1entry_contains_frag_data; i < nfrags; i++) { | ||
1393 | |||
1394 | frag = &skb_shinfo(skb)->frags[i]; | ||
1395 | sgentry = &sg_list[i - sg1entry_contains_frag_data]; | ||
1396 | |||
1397 | sgentry->l_key = lkey; | ||
1398 | sgentry->len = frag->size; | ||
1399 | |||
1400 | tmp_addr = (u64)(page_address(frag->page) | ||
1401 | + frag->page_offset); | ||
1402 | sgentry->vaddr = tmp_addr; | ||
1403 | swqe->descriptors++; | ||
1404 | } | ||
1405 | } | ||
1406 | } | ||
1407 | |||
1408 | static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | ||
1409 | { | ||
1410 | int ret = 0; | ||
1411 | u64 hret; | ||
1412 | u8 reg_type; | ||
1413 | |||
1414 | /* De/Register untagged packets */ | ||
1415 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; | ||
1416 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | ||
1417 | port->logical_port_id, | ||
1418 | reg_type, port->mac_addr, 0, hcallid); | ||
1419 | if (hret != H_SUCCESS) { | ||
1420 | ehea_error("reg_dereg_bcmc failed (tagged)"); | ||
1421 | ret = -EIO; | ||
1422 | goto out_herr; | ||
1423 | } | ||
1424 | |||
1425 | /* De/Register VLAN packets */ | ||
1426 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; | ||
1427 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | ||
1428 | port->logical_port_id, | ||
1429 | reg_type, port->mac_addr, 0, hcallid); | ||
1430 | if (hret != H_SUCCESS) { | ||
1431 | ehea_error("reg_dereg_bcmc failed (vlan)"); | ||
1432 | ret = -EIO; | ||
1433 | } | ||
1434 | out_herr: | ||
1435 | return ret; | ||
1436 | } | ||
1437 | |||
1438 | static int ehea_set_mac_addr(struct net_device *dev, void *sa) | ||
1439 | { | ||
1440 | struct ehea_port *port = netdev_priv(dev); | ||
1441 | struct sockaddr *mac_addr = sa; | ||
1442 | struct hcp_ehea_port_cb0 *cb0; | ||
1443 | int ret; | ||
1444 | u64 hret; | ||
1445 | |||
1446 | if (!is_valid_ether_addr(mac_addr->sa_data)) { | ||
1447 | ret = -EADDRNOTAVAIL; | ||
1448 | goto out; | ||
1449 | } | ||
1450 | |||
1451 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1452 | if (!cb0) { | ||
1453 | ehea_error("no mem for cb0"); | ||
1454 | ret = -ENOMEM; | ||
1455 | goto out; | ||
1456 | } | ||
1457 | |||
1458 | memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); | ||
1459 | |||
1460 | cb0->port_mac_addr = cb0->port_mac_addr >> 16; | ||
1461 | |||
1462 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | ||
1463 | port->logical_port_id, H_PORT_CB0, | ||
1464 | EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); | ||
1465 | if (hret != H_SUCCESS) { | ||
1466 | ret = -EIO; | ||
1467 | goto out_free; | ||
1468 | } | ||
1469 | |||
1470 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); | ||
1471 | |||
1472 | /* Deregister old MAC in pHYP */ | ||
1473 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
1474 | if (ret) | ||
1475 | goto out_free; | ||
1476 | |||
1477 | port->mac_addr = cb0->port_mac_addr << 16; | ||
1478 | |||
1479 | /* Register new MAC in pHYP */ | ||
1480 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | ||
1481 | if (ret) | ||
1482 | goto out_free; | ||
1483 | |||
1484 | ret = 0; | ||
1485 | out_free: | ||
1486 | kfree(cb0); | ||
1487 | out: | ||
1488 | return ret; | ||
1489 | } | ||
1490 | |||
1491 | static void ehea_promiscuous_error(u64 hret, int enable) | ||
1492 | { | ||
1493 | ehea_info("Hypervisor denied %sabling promiscuous mode.%s", | ||
1494 | enable == 1 ? "en" : "dis", | ||
1495 | hret != H_AUTHORITY ? "" : " Another partition owning a " | ||
1496 | "logical port on the same physical port might have altered " | ||
1497 | "promiscuous mode first."); | ||
1498 | } | ||
1499 | |||
1500 | static void ehea_promiscuous(struct net_device *dev, int enable) | ||
1501 | { | ||
1502 | struct ehea_port *port = netdev_priv(dev); | ||
1503 | struct hcp_ehea_port_cb7 *cb7; | ||
1504 | u64 hret; | ||
1505 | |||
1506 | if ((enable && port->promisc) || (!enable && !port->promisc)) | ||
1507 | return; | ||
1508 | |||
1509 | cb7 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1510 | if (!cb7) { | ||
1511 | ehea_error("no mem for cb7"); | ||
1512 | goto out; | ||
1513 | } | ||
1514 | |||
1515 | /* Modify Pxs_DUCQPN in CB7 */ | ||
1516 | cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; | ||
1517 | |||
1518 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | ||
1519 | port->logical_port_id, | ||
1520 | H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); | ||
1521 | if (hret) { | ||
1522 | ehea_promiscuous_error(hret, enable); | ||
1523 | goto out; | ||
1524 | } | ||
1525 | |||
1526 | port->promisc = enable; | ||
1527 | out: | ||
1528 | kfree(cb7); | ||
1529 | return; | ||
1530 | } | ||
1531 | |||
1532 | static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, | ||
1533 | u32 hcallid) | ||
1534 | { | ||
1535 | u64 hret; | ||
1536 | u8 reg_type; | ||
1537 | |||
1538 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | ||
1539 | | EHEA_BCMC_UNTAGGED; | ||
1540 | |||
1541 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | ||
1542 | port->logical_port_id, | ||
1543 | reg_type, mc_mac_addr, 0, hcallid); | ||
1544 | if (hret) | ||
1545 | goto out; | ||
1546 | |||
1547 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | ||
1548 | | EHEA_BCMC_VLANID_ALL; | ||
1549 | |||
1550 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | ||
1551 | port->logical_port_id, | ||
1552 | reg_type, mc_mac_addr, 0, hcallid); | ||
1553 | out: | ||
1554 | return hret; | ||
1555 | } | ||
1556 | |||
1557 | static int ehea_drop_multicast_list(struct net_device *dev) | ||
1558 | { | ||
1559 | struct ehea_port *port = netdev_priv(dev); | ||
1560 | struct ehea_mc_list *mc_entry = port->mc_list; | ||
1561 | struct list_head *pos; | ||
1562 | struct list_head *temp; | ||
1563 | int ret = 0; | ||
1564 | u64 hret; | ||
1565 | |||
1566 | list_for_each_safe(pos, temp, &(port->mc_list->list)) { | ||
1567 | mc_entry = list_entry(pos, struct ehea_mc_list, list); | ||
1568 | |||
1569 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, | ||
1570 | H_DEREG_BCMC); | ||
1571 | if (hret) { | ||
1572 | ehea_error("failed deregistering mcast MAC"); | ||
1573 | ret = -EIO; | ||
1574 | } | ||
1575 | |||
1576 | list_del(pos); | ||
1577 | kfree(mc_entry); | ||
1578 | } | ||
1579 | return ret; | ||
1580 | } | ||
1581 | |||
1582 | static void ehea_allmulti(struct net_device *dev, int enable) | ||
1583 | { | ||
1584 | struct ehea_port *port = netdev_priv(dev); | ||
1585 | u64 hret; | ||
1586 | |||
1587 | if (!port->allmulti) { | ||
1588 | if (enable) { | ||
1589 | /* Enable ALLMULTI */ | ||
1590 | ehea_drop_multicast_list(dev); | ||
1591 | hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); | ||
1592 | if (!hret) | ||
1593 | port->allmulti = 1; | ||
1594 | else | ||
1595 | ehea_error("failed enabling IFF_ALLMULTI"); | ||
1596 | } | ||
1597 | } else | ||
1598 | if (!enable) { | ||
1599 | /* Disable ALLMULTI */ | ||
1600 | hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); | ||
1601 | if (!hret) | ||
1602 | port->allmulti = 0; | ||
1603 | else | ||
1604 | ehea_error("failed disabling IFF_ALLMULTI"); | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr) | ||
1609 | { | ||
1610 | struct ehea_mc_list *ehea_mcl_entry; | ||
1611 | u64 hret; | ||
1612 | |||
1613 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_KERNEL); | ||
1614 | if (!ehea_mcl_entry) { | ||
1615 | ehea_error("no mem for mcl_entry"); | ||
1616 | return; | ||
1617 | } | ||
1618 | |||
1619 | INIT_LIST_HEAD(&ehea_mcl_entry->list); | ||
1620 | |||
1621 | memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); | ||
1622 | |||
1623 | hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, | ||
1624 | H_REG_BCMC); | ||
1625 | if (!hret) | ||
1626 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); | ||
1627 | else { | ||
1628 | ehea_error("failed registering mcast MAC"); | ||
1629 | kfree(ehea_mcl_entry); | ||
1630 | } | ||
1631 | } | ||
1632 | |||
1633 | static void ehea_set_multicast_list(struct net_device *dev) | ||
1634 | { | ||
1635 | struct ehea_port *port = netdev_priv(dev); | ||
1636 | struct dev_mc_list *k_mcl_entry; | ||
1637 | int ret, i; | ||
1638 | |||
1639 | if (dev->flags & IFF_PROMISC) { | ||
1640 | ehea_promiscuous(dev, 1); | ||
1641 | return; | ||
1642 | } | ||
1643 | ehea_promiscuous(dev, 0); | ||
1644 | |||
1645 | if (dev->flags & IFF_ALLMULTI) { | ||
1646 | ehea_allmulti(dev, 1); | ||
1647 | return; | ||
1648 | } | ||
1649 | ehea_allmulti(dev, 0); | ||
1650 | |||
1651 | if (dev->mc_count) { | ||
1652 | ret = ehea_drop_multicast_list(dev); | ||
1653 | if (ret) { | ||
1654 | /* Dropping the current multicast list failed. | ||
1655 | * Enabling ALL_MULTI is the best we can do. | ||
1656 | */ | ||
1657 | ehea_allmulti(dev, 1); | ||
1658 | } | ||
1659 | |||
1660 | if (dev->mc_count > port->adapter->max_mc_mac) { | ||
1661 | ehea_info("Mcast registration limit reached (0x%lx). " | ||
1662 | "Use ALLMULTI!", | ||
1663 | port->adapter->max_mc_mac); | ||
1664 | goto out; | ||
1665 | } | ||
1666 | |||
1667 | for (i = 0, k_mcl_entry = dev->mc_list; | ||
1668 | i < dev->mc_count; | ||
1669 | i++, k_mcl_entry = k_mcl_entry->next) { | ||
1670 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); | ||
1671 | } | ||
1672 | } | ||
1673 | out: | ||
1674 | return; | ||
1675 | } | ||
1676 | |||
1677 | static int ehea_change_mtu(struct net_device *dev, int new_mtu) | ||
1678 | { | ||
1679 | if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) | ||
1680 | return -EINVAL; | ||
1681 | dev->mtu = new_mtu; | ||
1682 | return 0; | ||
1683 | } | ||
1684 | |||
1685 | static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | ||
1686 | struct ehea_swqe *swqe, u32 lkey) | ||
1687 | { | ||
1688 | if (skb->protocol == htons(ETH_P_IP)) { | ||
1689 | /* IPv4 */ | ||
1690 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1691 | | EHEA_SWQE_IP_CHECKSUM | ||
1692 | | EHEA_SWQE_TCP_CHECKSUM | ||
1693 | | EHEA_SWQE_IMM_DATA_PRESENT | ||
1694 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | ||
1695 | |||
1696 | write_ip_start_end(swqe, skb); | ||
1697 | |||
1698 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | ||
1699 | if ((skb->nh.iph->frag_off & IP_MF) || | ||
1700 | (skb->nh.iph->frag_off & IP_OFFSET)) | ||
1701 | /* IP fragment, so don't change cs */ | ||
1702 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | ||
1703 | else | ||
1704 | write_udp_offset_end(swqe, skb); | ||
1705 | |||
1706 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) { | ||
1707 | write_tcp_offset_end(swqe, skb); | ||
1708 | } | ||
1709 | |||
1710 | /* icmp (big data) and ip segmentation packets (all other ip | ||
1711 | packets) do not require any special handling */ | ||
1712 | |||
1713 | } else { | ||
1714 | /* Other Ethernet Protocol */ | ||
1715 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1716 | | EHEA_SWQE_IMM_DATA_PRESENT | ||
1717 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | ||
1718 | } | ||
1719 | |||
1720 | write_swqe2_data(skb, dev, swqe, lkey); | ||
1721 | } | ||
1722 | |||
1723 | static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | ||
1724 | struct ehea_swqe *swqe) | ||
1725 | { | ||
1726 | int nfrags = skb_shinfo(skb)->nr_frags; | ||
1727 | u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; | ||
1728 | skb_frag_t *frag; | ||
1729 | int i; | ||
1730 | |||
1731 | if (skb->protocol == htons(ETH_P_IP)) { | ||
1732 | /* IPv4 */ | ||
1733 | write_ip_start_end(swqe, skb); | ||
1734 | |||
1735 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | ||
1736 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1737 | | EHEA_SWQE_IP_CHECKSUM | ||
1738 | | EHEA_SWQE_TCP_CHECKSUM | ||
1739 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
1740 | |||
1741 | write_tcp_offset_end(swqe, skb); | ||
1742 | |||
1743 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | ||
1744 | if ((skb->nh.iph->frag_off & IP_MF) || | ||
1745 | (skb->nh.iph->frag_off & IP_OFFSET)) | ||
1746 | /* IP fragment, so don't change cs */ | ||
1747 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1748 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
1749 | else { | ||
1750 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1751 | | EHEA_SWQE_IP_CHECKSUM | ||
1752 | | EHEA_SWQE_TCP_CHECKSUM | ||
1753 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
1754 | |||
1755 | write_udp_offset_end(swqe, skb); | ||
1756 | } | ||
1757 | } else { | ||
1758 | /* icmp (big data) and | ||
1759 | ip segmentation packets (all other ip packets) */ | ||
1760 | swqe->tx_control |= EHEA_SWQE_CRC | ||
1761 | | EHEA_SWQE_IP_CHECKSUM | ||
1762 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
1763 | } | ||
1764 | } else { | ||
1765 | /* Other Ethernet Protocol */ | ||
1766 | swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; | ||
1767 | } | ||
1768 | /* copy (immediate) data */ | ||
1769 | if (nfrags == 0) { | ||
1770 | /* data is in a single piece */ | ||
1771 | memcpy(imm_data, skb->data, skb->len); | ||
1772 | } else { | ||
1773 | /* first copy data from the skb->data buffer ... */ | ||
1774 | memcpy(imm_data, skb->data, skb->len - skb->data_len); | ||
1775 | imm_data += skb->len - skb->data_len; | ||
1776 | |||
1777 | /* ... then copy data from the fragments */ | ||
1778 | for (i = 0; i < nfrags; i++) { | ||
1779 | frag = &skb_shinfo(skb)->frags[i]; | ||
1780 | memcpy(imm_data, | ||
1781 | page_address(frag->page) + frag->page_offset, | ||
1782 | frag->size); | ||
1783 | imm_data += frag->size; | ||
1784 | } | ||
1785 | } | ||
1786 | swqe->immediate_data_length = skb->len; | ||
1787 | dev_kfree_skb(skb); | ||
1788 | } | ||
1789 | |||
1790 | static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1791 | { | ||
1792 | struct ehea_port *port = netdev_priv(dev); | ||
1793 | struct ehea_swqe *swqe; | ||
1794 | unsigned long flags; | ||
1795 | u32 lkey; | ||
1796 | int swqe_index; | ||
1797 | struct ehea_port_res *pr = &port->port_res[0]; | ||
1798 | |||
1799 | spin_lock(&pr->xmit_lock); | ||
1800 | |||
1801 | swqe = ehea_get_swqe(pr->qp, &swqe_index); | ||
1802 | memset(swqe, 0, SWQE_HEADER_SIZE); | ||
1803 | atomic_dec(&pr->swqe_avail); | ||
1804 | |||
1805 | if (skb->len <= SWQE3_MAX_IMM) { | ||
1806 | u32 sig_iv = port->sig_comp_iv; | ||
1807 | u32 swqe_num = pr->swqe_id_counter; | ||
1808 | ehea_xmit3(skb, dev, swqe); | ||
1809 | swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) | ||
1810 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); | ||
1811 | if (pr->swqe_ll_count >= (sig_iv - 1)) { | ||
1812 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | ||
1813 | sig_iv); | ||
1814 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | ||
1815 | pr->swqe_ll_count = 0; | ||
1816 | } else | ||
1817 | pr->swqe_ll_count += 1; | ||
1818 | } else { | ||
1819 | swqe->wr_id = | ||
1820 | EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) | ||
1821 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) | ||
1822 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); | ||
1823 | pr->sq_skba.arr[pr->sq_skba.index] = skb; | ||
1824 | |||
1825 | pr->sq_skba.index++; | ||
1826 | pr->sq_skba.index &= (pr->sq_skba.len - 1); | ||
1827 | |||
1828 | lkey = pr->send_mr.lkey; | ||
1829 | ehea_xmit2(skb, dev, swqe, lkey); | ||
1830 | |||
1831 | if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) { | ||
1832 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | ||
1833 | EHEA_SIG_IV_LONG); | ||
1834 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | ||
1835 | pr->swqe_count = 0; | ||
1836 | } else | ||
1837 | pr->swqe_count += 1; | ||
1838 | } | ||
1839 | pr->swqe_id_counter += 1; | ||
1840 | |||
1841 | if (port->vgrp && vlan_tx_tag_present(skb)) { | ||
1842 | swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; | ||
1843 | swqe->vlan_tag = vlan_tx_tag_get(skb); | ||
1844 | } | ||
1845 | |||
1846 | if (netif_msg_tx_queued(port)) { | ||
1847 | ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); | ||
1848 | ehea_dump(swqe, sizeof(*swqe), "swqe"); | ||
1849 | } | ||
1850 | |||
1851 | ehea_post_swqe(pr->qp, swqe); | ||
1852 | pr->tx_packets++; | ||
1853 | |||
1854 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | ||
1855 | spin_lock_irqsave(&pr->netif_queue, flags); | ||
1856 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | ||
1857 | netif_stop_queue(dev); | ||
1858 | pr->queue_stopped = 1; | ||
1859 | } | ||
1860 | spin_unlock_irqrestore(&pr->netif_queue, flags); | ||
1861 | } | ||
1862 | dev->trans_start = jiffies; | ||
1863 | spin_unlock(&pr->xmit_lock); | ||
1864 | |||
1865 | return NETDEV_TX_OK; | ||
1866 | } | ||
1867 | |||
1868 | static void ehea_vlan_rx_register(struct net_device *dev, | ||
1869 | struct vlan_group *grp) | ||
1870 | { | ||
1871 | struct ehea_port *port = netdev_priv(dev); | ||
1872 | struct ehea_adapter *adapter = port->adapter; | ||
1873 | struct hcp_ehea_port_cb1 *cb1; | ||
1874 | u64 hret; | ||
1875 | |||
1876 | port->vgrp = grp; | ||
1877 | |||
1878 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1879 | if (!cb1) { | ||
1880 | ehea_error("no mem for cb1"); | ||
1881 | goto out; | ||
1882 | } | ||
1883 | |||
1884 | if (grp) | ||
1885 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | ||
1886 | else | ||
1887 | memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter)); | ||
1888 | |||
1889 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | ||
1890 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | ||
1891 | if (hret != H_SUCCESS) | ||
1892 | ehea_error("modify_ehea_port failed"); | ||
1893 | |||
1894 | kfree(cb1); | ||
1895 | out: | ||
1896 | return; | ||
1897 | } | ||
1898 | |||
1899 | static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
1900 | { | ||
1901 | struct ehea_port *port = netdev_priv(dev); | ||
1902 | struct ehea_adapter *adapter = port->adapter; | ||
1903 | struct hcp_ehea_port_cb1 *cb1; | ||
1904 | int index; | ||
1905 | u64 hret; | ||
1906 | |||
1907 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1908 | if (!cb1) { | ||
1909 | ehea_error("no mem for cb1"); | ||
1910 | goto out; | ||
1911 | } | ||
1912 | |||
1913 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | ||
1914 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | ||
1915 | if (hret != H_SUCCESS) { | ||
1916 | ehea_error("query_ehea_port failed"); | ||
1917 | goto out; | ||
1918 | } | ||
1919 | |||
1920 | index = (vid / 64); | ||
1921 | cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F))); | ||
1922 | |||
1923 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | ||
1924 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | ||
1925 | if (hret != H_SUCCESS) | ||
1926 | ehea_error("modify_ehea_port failed"); | ||
1927 | out: | ||
1928 | kfree(cb1); | ||
1929 | return; | ||
1930 | } | ||
1931 | |||
1932 | static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
1933 | { | ||
1934 | struct ehea_port *port = netdev_priv(dev); | ||
1935 | struct ehea_adapter *adapter = port->adapter; | ||
1936 | struct hcp_ehea_port_cb1 *cb1; | ||
1937 | int index; | ||
1938 | u64 hret; | ||
1939 | |||
1940 | if (port->vgrp) | ||
1941 | port->vgrp->vlan_devices[vid] = NULL; | ||
1942 | |||
1943 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1944 | if (!cb1) { | ||
1945 | ehea_error("no mem for cb1"); | ||
1946 | goto out; | ||
1947 | } | ||
1948 | |||
1949 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | ||
1950 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | ||
1951 | if (hret != H_SUCCESS) { | ||
1952 | ehea_error("query_ehea_port failed"); | ||
1953 | goto out; | ||
1954 | } | ||
1955 | |||
1956 | index = (vid / 64); | ||
1957 | cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F))); | ||
1958 | |||
1959 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | ||
1960 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | ||
1961 | if (hret != H_SUCCESS) | ||
1962 | ehea_error("modify_ehea_port failed"); | ||
1963 | out: | ||
1964 | kfree(cb1); | ||
1965 | return; | ||
1966 | } | ||
1967 | |||
1968 | int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | ||
1969 | { | ||
1970 | int ret = -EIO; | ||
1971 | u64 hret; | ||
1972 | u16 dummy16 = 0; | ||
1973 | u64 dummy64 = 0; | ||
1974 | struct hcp_modify_qp_cb0* cb0; | ||
1975 | |||
1976 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
1977 | if (!cb0) { | ||
1978 | ret = -ENOMEM; | ||
1979 | goto out; | ||
1980 | } | ||
1981 | |||
1982 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
1983 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | ||
1984 | if (hret != H_SUCCESS) { | ||
1985 | ehea_error("query_ehea_qp failed (1)"); | ||
1986 | goto out; | ||
1987 | } | ||
1988 | |||
1989 | cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; | ||
1990 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
1991 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | ||
1992 | &dummy64, &dummy64, &dummy16, &dummy16); | ||
1993 | if (hret != H_SUCCESS) { | ||
1994 | ehea_error("modify_ehea_qp failed (1)"); | ||
1995 | goto out; | ||
1996 | } | ||
1997 | |||
1998 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
1999 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | ||
2000 | if (hret != H_SUCCESS) { | ||
2001 | ehea_error("query_ehea_qp failed (2)"); | ||
2002 | goto out; | ||
2003 | } | ||
2004 | |||
2005 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; | ||
2006 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2007 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | ||
2008 | &dummy64, &dummy64, &dummy16, &dummy16); | ||
2009 | if (hret != H_SUCCESS) { | ||
2010 | ehea_error("modify_ehea_qp failed (2)"); | ||
2011 | goto out; | ||
2012 | } | ||
2013 | |||
2014 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2015 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | ||
2016 | if (hret != H_SUCCESS) { | ||
2017 | ehea_error("query_ehea_qp failed (3)"); | ||
2018 | goto out; | ||
2019 | } | ||
2020 | |||
2021 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; | ||
2022 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2023 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | ||
2024 | &dummy64, &dummy64, &dummy16, &dummy16); | ||
2025 | if (hret != H_SUCCESS) { | ||
2026 | ehea_error("modify_ehea_qp failed (3)"); | ||
2027 | goto out; | ||
2028 | } | ||
2029 | |||
2030 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2031 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | ||
2032 | if (hret != H_SUCCESS) { | ||
2033 | ehea_error("query_ehea_qp failed (4)"); | ||
2034 | goto out; | ||
2035 | } | ||
2036 | |||
2037 | ret = 0; | ||
2038 | out: | ||
2039 | kfree(cb0); | ||
2040 | return ret; | ||
2041 | } | ||
2042 | |||
2043 | static int ehea_port_res_setup(struct ehea_port *port, int def_qps, | ||
2044 | int add_tx_qps) | ||
2045 | { | ||
2046 | int ret, i; | ||
2047 | struct port_res_cfg pr_cfg, pr_cfg_small_rx; | ||
2048 | enum ehea_eq_type eq_type = EHEA_EQ; | ||
2049 | |||
2050 | port->qp_eq = ehea_create_eq(port->adapter, eq_type, | ||
2051 | EHEA_MAX_ENTRIES_EQ, 1); | ||
2052 | if (!port->qp_eq) { | ||
2053 | ret = -EINVAL; | ||
2054 | ehea_error("ehea_create_eq failed (qp_eq)"); | ||
2055 | goto out_kill_eq; | ||
2056 | } | ||
2057 | |||
2058 | pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; | ||
2059 | pr_cfg.max_entries_scq = sq_entries; | ||
2060 | pr_cfg.max_entries_sq = sq_entries; | ||
2061 | pr_cfg.max_entries_rq1 = rq1_entries; | ||
2062 | pr_cfg.max_entries_rq2 = rq2_entries; | ||
2063 | pr_cfg.max_entries_rq3 = rq3_entries; | ||
2064 | |||
2065 | pr_cfg_small_rx.max_entries_rcq = 1; | ||
2066 | pr_cfg_small_rx.max_entries_scq = sq_entries; | ||
2067 | pr_cfg_small_rx.max_entries_sq = sq_entries; | ||
2068 | pr_cfg_small_rx.max_entries_rq1 = 1; | ||
2069 | pr_cfg_small_rx.max_entries_rq2 = 1; | ||
2070 | pr_cfg_small_rx.max_entries_rq3 = 1; | ||
2071 | |||
2072 | for (i = 0; i < def_qps; i++) { | ||
2073 | ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); | ||
2074 | if (ret) | ||
2075 | goto out_clean_pr; | ||
2076 | } | ||
2077 | for (i = def_qps; i < def_qps + add_tx_qps; i++) { | ||
2078 | ret = ehea_init_port_res(port, &port->port_res[i], | ||
2079 | &pr_cfg_small_rx, i); | ||
2080 | if (ret) | ||
2081 | goto out_clean_pr; | ||
2082 | } | ||
2083 | |||
2084 | return 0; | ||
2085 | |||
2086 | out_clean_pr: | ||
2087 | while (--i >= 0) | ||
2088 | ehea_clean_portres(port, &port->port_res[i]); | ||
2089 | |||
2090 | out_kill_eq: | ||
2091 | ehea_destroy_eq(port->qp_eq); | ||
2092 | return ret; | ||
2093 | } | ||
2094 | |||
2095 | static int ehea_clean_all_portres(struct ehea_port *port) | ||
2096 | { | ||
2097 | int ret = 0; | ||
2098 | int i; | ||
2099 | |||
2100 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | ||
2101 | ret |= ehea_clean_portres(port, &port->port_res[i]); | ||
2102 | |||
2103 | ret |= ehea_destroy_eq(port->qp_eq); | ||
2104 | |||
2105 | return ret; | ||
2106 | } | ||
2107 | |||
2108 | static int ehea_up(struct net_device *dev) | ||
2109 | { | ||
2110 | int ret, i; | ||
2111 | struct ehea_port *port = netdev_priv(dev); | ||
2112 | u64 mac_addr = 0; | ||
2113 | |||
2114 | if (port->state == EHEA_PORT_UP) | ||
2115 | return 0; | ||
2116 | |||
2117 | ret = ehea_port_res_setup(port, port->num_def_qps, | ||
2118 | port->num_add_tx_qps); | ||
2119 | if (ret) { | ||
2120 | ehea_error("port_res_failed"); | ||
2121 | goto out; | ||
2122 | } | ||
2123 | |||
2124 | /* Set default QP for this port */ | ||
2125 | ret = ehea_configure_port(port); | ||
2126 | if (ret) { | ||
2127 | ehea_error("ehea_configure_port failed. ret:%d", ret); | ||
2128 | goto out_clean_pr; | ||
2129 | } | ||
2130 | |||
2131 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | ||
2132 | if (ret) { | ||
2133 | ret = -EIO; | ||
2134 | ehea_error("out_clean_pr"); | ||
2135 | goto out_clean_pr; | ||
2136 | } | ||
2137 | mac_addr = (*(u64*)dev->dev_addr) >> 16; | ||
2138 | |||
2139 | ret = ehea_reg_interrupts(dev); | ||
2140 | if (ret) { | ||
2141 | ehea_error("out_dereg_bc"); | ||
2142 | goto out_dereg_bc; | ||
2143 | } | ||
2144 | |||
2145 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | ||
2146 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | ||
2147 | if (ret) { | ||
2148 | ehea_error("activate_qp failed"); | ||
2149 | goto out_free_irqs; | ||
2150 | } | ||
2151 | } | ||
2152 | |||
2153 | for(i = 0; i < port->num_def_qps; i++) { | ||
2154 | ret = ehea_fill_port_res(&port->port_res[i]); | ||
2155 | if (ret) { | ||
2156 | ehea_error("out_free_irqs"); | ||
2157 | goto out_free_irqs; | ||
2158 | } | ||
2159 | } | ||
2160 | |||
2161 | ret = 0; | ||
2162 | port->state = EHEA_PORT_UP; | ||
2163 | goto out; | ||
2164 | |||
2165 | out_free_irqs: | ||
2166 | ehea_free_interrupts(dev); | ||
2167 | |||
2168 | out_dereg_bc: | ||
2169 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
2170 | |||
2171 | out_clean_pr: | ||
2172 | ehea_clean_all_portres(port); | ||
2173 | out: | ||
2174 | return ret; | ||
2175 | } | ||
2176 | |||
2177 | static int ehea_open(struct net_device *dev) | ||
2178 | { | ||
2179 | int ret; | ||
2180 | struct ehea_port *port = netdev_priv(dev); | ||
2181 | |||
2182 | down(&port->port_lock); | ||
2183 | |||
2184 | if (netif_msg_ifup(port)) | ||
2185 | ehea_info("enabling port %s", dev->name); | ||
2186 | |||
2187 | ret = ehea_up(dev); | ||
2188 | if (!ret) | ||
2189 | netif_start_queue(dev); | ||
2190 | |||
2191 | up(&port->port_lock); | ||
2192 | |||
2193 | return ret; | ||
2194 | } | ||
2195 | |||
2196 | static int ehea_down(struct net_device *dev) | ||
2197 | { | ||
2198 | int ret, i; | ||
2199 | struct ehea_port *port = netdev_priv(dev); | ||
2200 | |||
2201 | if (port->state == EHEA_PORT_DOWN) | ||
2202 | return 0; | ||
2203 | |||
2204 | ehea_drop_multicast_list(dev); | ||
2205 | ehea_free_interrupts(dev); | ||
2206 | |||
2207 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | ||
2208 | tasklet_kill(&port->port_res[i].send_comp_task); | ||
2209 | |||
2210 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
2211 | ret = ehea_clean_all_portres(port); | ||
2212 | port->state = EHEA_PORT_DOWN; | ||
2213 | return ret; | ||
2214 | } | ||
2215 | |||
2216 | static int ehea_stop(struct net_device *dev) | ||
2217 | { | ||
2218 | int ret; | ||
2219 | struct ehea_port *port = netdev_priv(dev); | ||
2220 | |||
2221 | if (netif_msg_ifdown(port)) | ||
2222 | ehea_info("disabling port %s", dev->name); | ||
2223 | |||
2224 | flush_workqueue(port->adapter->ehea_wq); | ||
2225 | down(&port->port_lock); | ||
2226 | netif_stop_queue(dev); | ||
2227 | ret = ehea_down(dev); | ||
2228 | up(&port->port_lock); | ||
2229 | return ret; | ||
2230 | } | ||
2231 | |||
2232 | static void ehea_reset_port(void *data) | ||
2233 | { | ||
2234 | int ret; | ||
2235 | struct net_device *dev = data; | ||
2236 | struct ehea_port *port = netdev_priv(dev); | ||
2237 | |||
2238 | port->resets++; | ||
2239 | down(&port->port_lock); | ||
2240 | netif_stop_queue(dev); | ||
2241 | netif_poll_disable(dev); | ||
2242 | |||
2243 | ret = ehea_down(dev); | ||
2244 | if (ret) | ||
2245 | ehea_error("ehea_down failed. not all resources are freed"); | ||
2246 | |||
2247 | ret = ehea_up(dev); | ||
2248 | if (ret) { | ||
2249 | ehea_error("Reset device %s failed: ret=%d", dev->name, ret); | ||
2250 | goto out; | ||
2251 | } | ||
2252 | |||
2253 | if (netif_msg_timer(port)) | ||
2254 | ehea_info("Device %s resetted successfully", dev->name); | ||
2255 | |||
2256 | netif_poll_enable(dev); | ||
2257 | netif_wake_queue(dev); | ||
2258 | out: | ||
2259 | up(&port->port_lock); | ||
2260 | return; | ||
2261 | } | ||
2262 | |||
2263 | static void ehea_tx_watchdog(struct net_device *dev) | ||
2264 | { | ||
2265 | struct ehea_port *port = netdev_priv(dev); | ||
2266 | |||
2267 | if (netif_carrier_ok(dev)) | ||
2268 | queue_work(port->adapter->ehea_wq, &port->reset_task); | ||
2269 | } | ||
2270 | |||
2271 | int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | ||
2272 | { | ||
2273 | struct hcp_query_ehea *cb; | ||
2274 | u64 hret; | ||
2275 | int ret; | ||
2276 | |||
2277 | cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
2278 | if (!cb) { | ||
2279 | ret = -ENOMEM; | ||
2280 | goto out; | ||
2281 | } | ||
2282 | |||
2283 | hret = ehea_h_query_ehea(adapter->handle, cb); | ||
2284 | |||
2285 | if (hret != H_SUCCESS) { | ||
2286 | ret = -EIO; | ||
2287 | goto out_herr; | ||
2288 | } | ||
2289 | |||
2290 | adapter->num_ports = cb->num_ports; | ||
2291 | adapter->max_mc_mac = cb->max_mc_mac - 1; | ||
2292 | ret = 0; | ||
2293 | |||
2294 | out_herr: | ||
2295 | kfree(cb); | ||
2296 | out: | ||
2297 | return ret; | ||
2298 | } | ||
2299 | |||
2300 | static int ehea_setup_single_port(struct ehea_port *port, | ||
2301 | struct device_node *dn) | ||
2302 | { | ||
2303 | int ret; | ||
2304 | u64 hret; | ||
2305 | struct net_device *dev = port->netdev; | ||
2306 | struct ehea_adapter *adapter = port->adapter; | ||
2307 | struct hcp_ehea_port_cb4 *cb4; | ||
2308 | u32 *dn_log_port_id; | ||
2309 | |||
2310 | sema_init(&port->port_lock, 1); | ||
2311 | port->state = EHEA_PORT_DOWN; | ||
2312 | port->sig_comp_iv = sq_entries / 10; | ||
2313 | |||
2314 | if (!dn) { | ||
2315 | ehea_error("bad device node: dn=%p", dn); | ||
2316 | ret = -EINVAL; | ||
2317 | goto out; | ||
2318 | } | ||
2319 | |||
2320 | port->of_dev_node = dn; | ||
2321 | |||
2322 | /* Determine logical port id */ | ||
2323 | dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL); | ||
2324 | |||
2325 | if (!dn_log_port_id) { | ||
2326 | ehea_error("bad device node: dn_log_port_id=%p", | ||
2327 | dn_log_port_id); | ||
2328 | ret = -EINVAL; | ||
2329 | goto out; | ||
2330 | } | ||
2331 | port->logical_port_id = *dn_log_port_id; | ||
2332 | |||
2333 | port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); | ||
2334 | if (!port->mc_list) { | ||
2335 | ret = -ENOMEM; | ||
2336 | goto out; | ||
2337 | } | ||
2338 | |||
2339 | INIT_LIST_HEAD(&port->mc_list->list); | ||
2340 | |||
2341 | ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); | ||
2342 | |||
2343 | ret = ehea_sense_port_attr(port); | ||
2344 | if (ret) | ||
2345 | goto out; | ||
2346 | |||
2347 | /* Enable Jumbo frames */ | ||
2348 | cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | ||
2349 | if (!cb4) { | ||
2350 | ehea_error("no mem for cb4"); | ||
2351 | } else { | ||
2352 | cb4->jumbo_frame = 1; | ||
2353 | hret = ehea_h_modify_ehea_port(adapter->handle, | ||
2354 | port->logical_port_id, | ||
2355 | H_PORT_CB4, H_PORT_CB4_JUMBO, | ||
2356 | cb4); | ||
2357 | if (hret != H_SUCCESS) { | ||
2358 | ehea_info("Jumbo frames not activated"); | ||
2359 | } | ||
2360 | kfree(cb4); | ||
2361 | } | ||
2362 | |||
2363 | /* initialize net_device structure */ | ||
2364 | SET_MODULE_OWNER(dev); | ||
2365 | |||
2366 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); | ||
2367 | |||
2368 | dev->open = ehea_open; | ||
2369 | dev->poll = ehea_poll; | ||
2370 | dev->weight = 64; | ||
2371 | dev->stop = ehea_stop; | ||
2372 | dev->hard_start_xmit = ehea_start_xmit; | ||
2373 | dev->get_stats = ehea_get_stats; | ||
2374 | dev->set_multicast_list = ehea_set_multicast_list; | ||
2375 | dev->set_mac_address = ehea_set_mac_addr; | ||
2376 | dev->change_mtu = ehea_change_mtu; | ||
2377 | dev->vlan_rx_register = ehea_vlan_rx_register; | ||
2378 | dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid; | ||
2379 | dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid; | ||
2380 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | ||
2381 | | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | ||
2382 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | ||
2383 | | NETIF_F_LLTX; | ||
2384 | dev->tx_timeout = &ehea_tx_watchdog; | ||
2385 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | ||
2386 | |||
2387 | INIT_WORK(&port->reset_task, ehea_reset_port, dev); | ||
2388 | |||
2389 | ehea_set_ethtool_ops(dev); | ||
2390 | |||
2391 | ret = register_netdev(dev); | ||
2392 | if (ret) { | ||
2393 | ehea_error("register_netdev failed. ret=%d", ret); | ||
2394 | goto out_free; | ||
2395 | } | ||
2396 | |||
2397 | port->netdev = dev; | ||
2398 | ret = 0; | ||
2399 | goto out; | ||
2400 | |||
2401 | out_free: | ||
2402 | kfree(port->mc_list); | ||
2403 | out: | ||
2404 | return ret; | ||
2405 | } | ||
2406 | |||
2407 | static int ehea_setup_ports(struct ehea_adapter *adapter) | ||
2408 | { | ||
2409 | int ret; | ||
2410 | int port_setup_ok = 0; | ||
2411 | struct ehea_port *port; | ||
2412 | struct device_node *dn = NULL; | ||
2413 | struct net_device *dev; | ||
2414 | int i; | ||
2415 | |||
2416 | /* get port properties for all ports */ | ||
2417 | for (i = 0; i < adapter->num_ports; i++) { | ||
2418 | |||
2419 | if (adapter->port[i]) | ||
2420 | continue; /* port already up and running */ | ||
2421 | |||
2422 | /* allocate memory for the port structures */ | ||
2423 | dev = alloc_etherdev(sizeof(struct ehea_port)); | ||
2424 | |||
2425 | if (!dev) { | ||
2426 | ehea_error("no mem for net_device"); | ||
2427 | break; | ||
2428 | } | ||
2429 | |||
2430 | port = netdev_priv(dev); | ||
2431 | port->adapter = adapter; | ||
2432 | port->netdev = dev; | ||
2433 | adapter->port[i] = port; | ||
2434 | port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); | ||
2435 | |||
2436 | dn = of_find_node_by_name(dn, "ethernet"); | ||
2437 | ret = ehea_setup_single_port(port, dn); | ||
2438 | if (ret) { | ||
2439 | /* Free mem for this port struct. The others will be | ||
2440 | processed on rollback */ | ||
2441 | free_netdev(dev); | ||
2442 | adapter->port[i] = NULL; | ||
2443 | ehea_error("eHEA port %d setup failed, ret=%d", i, ret); | ||
2444 | } | ||
2445 | } | ||
2446 | |||
2447 | of_node_put(dn); | ||
2448 | |||
2449 | /* Check for succesfully set up ports */ | ||
2450 | for (i = 0; i < adapter->num_ports; i++) | ||
2451 | if (adapter->port[i]) | ||
2452 | port_setup_ok++; | ||
2453 | |||
2454 | if (port_setup_ok) | ||
2455 | ret = 0; /* At least some ports are setup correctly */ | ||
2456 | else | ||
2457 | ret = -EINVAL; | ||
2458 | |||
2459 | return ret; | ||
2460 | } | ||
2461 | |||
2462 | static int __devinit ehea_probe(struct ibmebus_dev *dev, | ||
2463 | const struct of_device_id *id) | ||
2464 | { | ||
2465 | struct ehea_adapter *adapter; | ||
2466 | u64 *adapter_handle; | ||
2467 | int ret; | ||
2468 | |||
2469 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | ||
2470 | if (!adapter) { | ||
2471 | ret = -ENOMEM; | ||
2472 | dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n"); | ||
2473 | goto out; | ||
2474 | } | ||
2475 | |||
2476 | adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle", | ||
2477 | NULL); | ||
2478 | if (!adapter_handle) { | ||
2479 | dev_err(&dev->ofdev.dev, "failed getting handle for adapter" | ||
2480 | " '%s'\n", dev->ofdev.node->full_name); | ||
2481 | ret = -ENODEV; | ||
2482 | goto out_free_ad; | ||
2483 | } | ||
2484 | |||
2485 | adapter->handle = *adapter_handle; | ||
2486 | adapter->pd = EHEA_PD_ID; | ||
2487 | |||
2488 | dev->ofdev.dev.driver_data = adapter; | ||
2489 | |||
2490 | ret = ehea_reg_mr_adapter(adapter); | ||
2491 | if (ret) { | ||
2492 | dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n"); | ||
2493 | goto out_free_ad; | ||
2494 | } | ||
2495 | |||
2496 | /* initialize adapter and ports */ | ||
2497 | /* get adapter properties */ | ||
2498 | ret = ehea_sense_adapter_attr(adapter); | ||
2499 | if (ret) { | ||
2500 | dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); | ||
2501 | goto out_free_res; | ||
2502 | } | ||
2503 | dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports); | ||
2504 | |||
2505 | adapter->neq = ehea_create_eq(adapter, | ||
2506 | EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); | ||
2507 | if (!adapter->neq) { | ||
2508 | dev_err(&dev->ofdev.dev, "NEQ creation failed"); | ||
2509 | goto out_free_res; | ||
2510 | } | ||
2511 | |||
2512 | tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, | ||
2513 | (unsigned long)adapter); | ||
2514 | |||
2515 | ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1, | ||
2516 | ehea_interrupt_neq, SA_INTERRUPT, | ||
2517 | "ehea_neq", adapter); | ||
2518 | if (ret) { | ||
2519 | dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed"); | ||
2520 | goto out_kill_eq; | ||
2521 | } | ||
2522 | |||
2523 | adapter->ehea_wq = create_workqueue("ehea_wq"); | ||
2524 | if (!adapter->ehea_wq) | ||
2525 | goto out_free_irq; | ||
2526 | |||
2527 | ret = ehea_setup_ports(adapter); | ||
2528 | if (ret) { | ||
2529 | dev_err(&dev->ofdev.dev, "setup_ports failed"); | ||
2530 | goto out_kill_wq; | ||
2531 | } | ||
2532 | |||
2533 | ret = 0; | ||
2534 | goto out; | ||
2535 | |||
2536 | out_kill_wq: | ||
2537 | destroy_workqueue(adapter->ehea_wq); | ||
2538 | |||
2539 | out_free_irq: | ||
2540 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | ||
2541 | |||
2542 | out_kill_eq: | ||
2543 | ehea_destroy_eq(adapter->neq); | ||
2544 | |||
2545 | out_free_res: | ||
2546 | ehea_h_free_resource(adapter->handle, adapter->mr.handle); | ||
2547 | |||
2548 | out_free_ad: | ||
2549 | kfree(adapter); | ||
2550 | out: | ||
2551 | return ret; | ||
2552 | } | ||
2553 | |||
2554 | static void ehea_shutdown_single_port(struct ehea_port *port) | ||
2555 | { | ||
2556 | unregister_netdev(port->netdev); | ||
2557 | kfree(port->mc_list); | ||
2558 | free_netdev(port->netdev); | ||
2559 | } | ||
2560 | |||
2561 | static int __devexit ehea_remove(struct ibmebus_dev *dev) | ||
2562 | { | ||
2563 | struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; | ||
2564 | u64 hret; | ||
2565 | int i; | ||
2566 | |||
2567 | for (i = 0; i < adapter->num_ports; i++) | ||
2568 | if (adapter->port[i]) { | ||
2569 | ehea_shutdown_single_port(adapter->port[i]); | ||
2570 | adapter->port[i] = NULL; | ||
2571 | } | ||
2572 | destroy_workqueue(adapter->ehea_wq); | ||
2573 | |||
2574 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | ||
2575 | |||
2576 | ehea_destroy_eq(adapter->neq); | ||
2577 | |||
2578 | hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle); | ||
2579 | if (hret) { | ||
2580 | dev_err(&dev->ofdev.dev, "free_resource_mr failed"); | ||
2581 | return -EIO; | ||
2582 | } | ||
2583 | kfree(adapter); | ||
2584 | return 0; | ||
2585 | } | ||
2586 | |||
2587 | static int check_module_parm(void) | ||
2588 | { | ||
2589 | int ret = 0; | ||
2590 | |||
2591 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || | ||
2592 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { | ||
2593 | ehea_info("Bad parameter: rq1_entries"); | ||
2594 | ret = -EINVAL; | ||
2595 | } | ||
2596 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || | ||
2597 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { | ||
2598 | ehea_info("Bad parameter: rq2_entries"); | ||
2599 | ret = -EINVAL; | ||
2600 | } | ||
2601 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || | ||
2602 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { | ||
2603 | ehea_info("Bad parameter: rq3_entries"); | ||
2604 | ret = -EINVAL; | ||
2605 | } | ||
2606 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || | ||
2607 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { | ||
2608 | ehea_info("Bad parameter: sq_entries"); | ||
2609 | ret = -EINVAL; | ||
2610 | } | ||
2611 | |||
2612 | return ret; | ||
2613 | } | ||
2614 | |||
2615 | static struct of_device_id ehea_device_table[] = { | ||
2616 | { | ||
2617 | .name = "lhea", | ||
2618 | .compatible = "IBM,lhea", | ||
2619 | }, | ||
2620 | {}, | ||
2621 | }; | ||
2622 | |||
2623 | static struct ibmebus_driver ehea_driver = { | ||
2624 | .name = "ehea", | ||
2625 | .id_table = ehea_device_table, | ||
2626 | .probe = ehea_probe, | ||
2627 | .remove = ehea_remove, | ||
2628 | }; | ||
2629 | |||
2630 | int __init ehea_module_init(void) | ||
2631 | { | ||
2632 | int ret; | ||
2633 | |||
2634 | printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", | ||
2635 | DRV_VERSION); | ||
2636 | |||
2637 | ret = check_module_parm(); | ||
2638 | if (ret) | ||
2639 | goto out; | ||
2640 | ret = ibmebus_register_driver(&ehea_driver); | ||
2641 | if (ret) | ||
2642 | ehea_error("failed registering eHEA device driver on ebus"); | ||
2643 | |||
2644 | out: | ||
2645 | return ret; | ||
2646 | } | ||
2647 | |||
2648 | static void __exit ehea_module_exit(void) | ||
2649 | { | ||
2650 | ibmebus_unregister_driver(&ehea_driver); | ||
2651 | } | ||
2652 | |||
2653 | module_init(ehea_module_init); | ||
2654 | module_exit(ehea_module_exit); | ||
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c new file mode 100644 index 000000000000..4a85aca4c7e9 --- /dev/null +++ b/drivers/net/ehea/ehea_phyp.c | |||
@@ -0,0 +1,705 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_phyp.c | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include "ehea_phyp.h" | ||
30 | |||
31 | |||
32 | static inline u16 get_order_of_qentries(u16 queue_entries) | ||
33 | { | ||
34 | u8 ld = 1; /* logarithmus dualis */ | ||
35 | while (((1U << ld) - 1) < queue_entries) | ||
36 | ld++; | ||
37 | return ld - 1; | ||
38 | } | ||
39 | |||
40 | /* Defines for H_CALL H_ALLOC_RESOURCE */ | ||
41 | #define H_ALL_RES_TYPE_QP 1 | ||
42 | #define H_ALL_RES_TYPE_CQ 2 | ||
43 | #define H_ALL_RES_TYPE_EQ 3 | ||
44 | #define H_ALL_RES_TYPE_MR 5 | ||
45 | #define H_ALL_RES_TYPE_MW 6 | ||
46 | |||
47 | static long ehea_hcall_9arg_9ret(unsigned long opcode, | ||
48 | unsigned long arg1, unsigned long arg2, | ||
49 | unsigned long arg3, unsigned long arg4, | ||
50 | unsigned long arg5, unsigned long arg6, | ||
51 | unsigned long arg7, unsigned long arg8, | ||
52 | unsigned long arg9, unsigned long *out1, | ||
53 | unsigned long *out2,unsigned long *out3, | ||
54 | unsigned long *out4,unsigned long *out5, | ||
55 | unsigned long *out6,unsigned long *out7, | ||
56 | unsigned long *out8,unsigned long *out9) | ||
57 | { | ||
58 | long hret; | ||
59 | int i, sleep_msecs; | ||
60 | |||
61 | for (i = 0; i < 5; i++) { | ||
62 | hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4, | ||
63 | arg5, arg6, arg7, arg8, arg9, out1, | ||
64 | out2, out3, out4, out5, out6, out7, | ||
65 | out8, out9); | ||
66 | if (H_IS_LONG_BUSY(hret)) { | ||
67 | sleep_msecs = get_longbusy_msecs(hret); | ||
68 | msleep_interruptible(sleep_msecs); | ||
69 | continue; | ||
70 | } | ||
71 | |||
72 | if (hret < H_SUCCESS) | ||
73 | ehea_error("op=%lx hret=%lx " | ||
74 | "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx " | ||
75 | "i7=%lx i8=%lx i9=%lx " | ||
76 | "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx " | ||
77 | "o7=%lx o8=%lx o9=%lx", | ||
78 | opcode, hret, arg1, arg2, arg3, arg4, arg5, | ||
79 | arg6, arg7, arg8, arg9, *out1, *out2, *out3, | ||
80 | *out4, *out5, *out6, *out7, *out8, *out9); | ||
81 | return hret; | ||
82 | } | ||
83 | return H_BUSY; | ||
84 | } | ||
85 | |||
86 | u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, | ||
87 | const u64 qp_handle, const u64 sel_mask, void *cb_addr) | ||
88 | { | ||
89 | u64 dummy; | ||
90 | |||
91 | if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { | ||
92 | ehea_error("not on pageboundary"); | ||
93 | return H_PARAMETER; | ||
94 | } | ||
95 | |||
96 | return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP, | ||
97 | adapter_handle, /* R4 */ | ||
98 | qp_category, /* R5 */ | ||
99 | qp_handle, /* R6 */ | ||
100 | sel_mask, /* R7 */ | ||
101 | virt_to_abs(cb_addr), /* R8 */ | ||
102 | 0, 0, 0, 0, /* R9-R12 */ | ||
103 | &dummy, /* R4 */ | ||
104 | &dummy, /* R5 */ | ||
105 | &dummy, /* R6 */ | ||
106 | &dummy, /* R7 */ | ||
107 | &dummy, /* R8 */ | ||
108 | &dummy, /* R9 */ | ||
109 | &dummy, /* R10 */ | ||
110 | &dummy, /* R11 */ | ||
111 | &dummy); /* R12 */ | ||
112 | } | ||
113 | |||
114 | /* input param R5 */ | ||
115 | #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) | ||
116 | #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) | ||
117 | #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) | ||
118 | #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) | ||
119 | #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) | ||
120 | #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) | ||
121 | #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) | ||
122 | #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) | ||
123 | #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) | ||
124 | #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) | ||
125 | |||
126 | /* input param R9 */ | ||
127 | #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) | ||
128 | #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63) | ||
129 | |||
130 | /* input param R10 */ | ||
131 | #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) | ||
132 | #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) | ||
133 | #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) | ||
134 | #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) | ||
135 | /* Max Send Scatter Gather Elements */ | ||
136 | #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) | ||
137 | #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) | ||
138 | /* Max Receive SG Elements RQ1 */ | ||
139 | #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) | ||
140 | #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) | ||
141 | |||
142 | /* input param R11 */ | ||
143 | #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) | ||
144 | /* max swqe immediate data length */ | ||
145 | #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) | ||
146 | |||
147 | /* input param R12 */ | ||
148 | #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) | ||
149 | /* Threshold RQ2 */ | ||
150 | #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) | ||
151 | /* Threshold RQ3 */ | ||
152 | |||
153 | /* output param R6 */ | ||
154 | #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) | ||
155 | #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) | ||
156 | #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) | ||
157 | #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) | ||
158 | |||
159 | /* output param, R7 */ | ||
160 | #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) | ||
161 | #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) | ||
162 | #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) | ||
163 | #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) | ||
164 | #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39) | ||
165 | |||
166 | /* output param R8,R9 */ | ||
167 | #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) | ||
168 | #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) | ||
169 | #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) | ||
170 | #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) | ||
171 | |||
172 | /* output param R11,R12 */ | ||
173 | #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) | ||
174 | #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) | ||
175 | #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) | ||
176 | #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) | ||
177 | |||
178 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, | ||
179 | struct ehea_qp_init_attr *init_attr, const u32 pd, | ||
180 | u64 *qp_handle, struct h_epas *h_epas) | ||
181 | { | ||
182 | u64 hret; | ||
183 | |||
184 | u64 allocate_controls = | ||
185 | EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) | ||
186 | | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0) | ||
187 | | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */ | ||
188 | | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */ | ||
189 | | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1) | ||
190 | | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0) | ||
191 | | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0) | ||
192 | | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype) | ||
193 | | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP); | ||
194 | |||
195 | u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd) | ||
196 | | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token); | ||
197 | |||
198 | u64 max_r10_reg = | ||
199 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE, | ||
200 | get_order_of_qentries(init_attr->max_nr_send_wqes)) | ||
201 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE, | ||
202 | get_order_of_qentries(init_attr->max_nr_rwqes_rq1)) | ||
203 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE, | ||
204 | get_order_of_qentries(init_attr->max_nr_rwqes_rq2)) | ||
205 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE, | ||
206 | get_order_of_qentries(init_attr->max_nr_rwqes_rq3)) | ||
207 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq) | ||
208 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE, | ||
209 | init_attr->wqe_size_enc_rq1) | ||
210 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE, | ||
211 | init_attr->wqe_size_enc_rq2) | ||
212 | | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE, | ||
213 | init_attr->wqe_size_enc_rq3); | ||
214 | |||
215 | u64 r11_in = | ||
216 | EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len) | ||
217 | | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr); | ||
218 | u64 threshold = | ||
219 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold) | ||
220 | | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold); | ||
221 | |||
222 | u64 r5_out = 0; | ||
223 | u64 r6_out = 0; | ||
224 | u64 r7_out = 0; | ||
225 | u64 r8_out = 0; | ||
226 | u64 r9_out = 0; | ||
227 | u64 g_la_user_out = 0; | ||
228 | u64 r11_out = 0; | ||
229 | u64 r12_out = 0; | ||
230 | |||
231 | hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, | ||
232 | adapter_handle, /* R4 */ | ||
233 | allocate_controls, /* R5 */ | ||
234 | init_attr->send_cq_handle, /* R6 */ | ||
235 | init_attr->recv_cq_handle, /* R7 */ | ||
236 | init_attr->aff_eq_handle, /* R8 */ | ||
237 | r9_reg, /* R9 */ | ||
238 | max_r10_reg, /* R10 */ | ||
239 | r11_in, /* R11 */ | ||
240 | threshold, /* R12 */ | ||
241 | qp_handle, /* R4 */ | ||
242 | &r5_out, /* R5 */ | ||
243 | &r6_out, /* R6 */ | ||
244 | &r7_out, /* R7 */ | ||
245 | &r8_out, /* R8 */ | ||
246 | &r9_out, /* R9 */ | ||
247 | &g_la_user_out, /* R10 */ | ||
248 | &r11_out, /* R11 */ | ||
249 | &r12_out); /* R12 */ | ||
250 | |||
251 | init_attr->qp_nr = (u32)r5_out; | ||
252 | |||
253 | init_attr->act_nr_send_wqes = | ||
254 | (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out); | ||
255 | init_attr->act_nr_rwqes_rq1 = | ||
256 | (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out); | ||
257 | init_attr->act_nr_rwqes_rq2 = | ||
258 | (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out); | ||
259 | init_attr->act_nr_rwqes_rq3 = | ||
260 | (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out); | ||
261 | |||
262 | init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq; | ||
263 | init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1; | ||
264 | init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2; | ||
265 | init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3; | ||
266 | |||
267 | init_attr->nr_sq_pages = | ||
268 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out); | ||
269 | init_attr->nr_rq1_pages = | ||
270 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out); | ||
271 | init_attr->nr_rq2_pages = | ||
272 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out); | ||
273 | init_attr->nr_rq3_pages = | ||
274 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out); | ||
275 | |||
276 | init_attr->liobn_sq = | ||
277 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out); | ||
278 | init_attr->liobn_rq1 = | ||
279 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out); | ||
280 | init_attr->liobn_rq2 = | ||
281 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out); | ||
282 | init_attr->liobn_rq3 = | ||
283 | (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out); | ||
284 | |||
285 | if (!hret) | ||
286 | hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out); | ||
287 | |||
288 | return hret; | ||
289 | } | ||
290 | |||
291 | u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, | ||
292 | struct ehea_cq_attr *cq_attr, | ||
293 | u64 *cq_handle, struct h_epas *epas) | ||
294 | { | ||
295 | u64 hret, dummy, act_nr_of_cqes_out, act_pages_out; | ||
296 | u64 g_la_privileged_out, g_la_user_out; | ||
297 | |||
298 | hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, | ||
299 | adapter_handle, /* R4 */ | ||
300 | H_ALL_RES_TYPE_CQ, /* R5 */ | ||
301 | cq_attr->eq_handle, /* R6 */ | ||
302 | cq_attr->cq_token, /* R7 */ | ||
303 | cq_attr->max_nr_of_cqes, /* R8 */ | ||
304 | 0, 0, 0, 0, /* R9-R12 */ | ||
305 | cq_handle, /* R4 */ | ||
306 | &dummy, /* R5 */ | ||
307 | &dummy, /* R6 */ | ||
308 | &act_nr_of_cqes_out, /* R7 */ | ||
309 | &act_pages_out, /* R8 */ | ||
310 | &g_la_privileged_out, /* R9 */ | ||
311 | &g_la_user_out, /* R10 */ | ||
312 | &dummy, /* R11 */ | ||
313 | &dummy); /* R12 */ | ||
314 | |||
315 | cq_attr->act_nr_of_cqes = act_nr_of_cqes_out; | ||
316 | cq_attr->nr_pages = act_pages_out; | ||
317 | |||
318 | if (!hret) | ||
319 | hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out); | ||
320 | |||
321 | return hret; | ||
322 | } | ||
323 | |||
324 | /* Defines for H_CALL H_ALLOC_RESOURCE */ | ||
325 | #define H_ALL_RES_TYPE_QP 1 | ||
326 | #define H_ALL_RES_TYPE_CQ 2 | ||
327 | #define H_ALL_RES_TYPE_EQ 3 | ||
328 | #define H_ALL_RES_TYPE_MR 5 | ||
329 | #define H_ALL_RES_TYPE_MW 6 | ||
330 | |||
331 | /* input param R5 */ | ||
332 | #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) | ||
333 | #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7) | ||
334 | #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16) | ||
335 | #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) | ||
336 | /* input param R6 */ | ||
337 | #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) | ||
338 | |||
339 | /* output param R6 */ | ||
340 | #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) | ||
341 | |||
342 | /* output param R7 */ | ||
343 | #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) | ||
344 | |||
345 | /* output param R8 */ | ||
346 | #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) | ||
347 | |||
348 | /* output param R9 */ | ||
349 | #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31) | ||
350 | #define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63) | ||
351 | |||
352 | /* output param R10 */ | ||
353 | #define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63) | ||
354 | |||
355 | /* output param R11 */ | ||
356 | #define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63) | ||
357 | |||
358 | /* output param R12 */ | ||
359 | #define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63) | ||
360 | |||
361 | u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, | ||
362 | struct ehea_eq_attr *eq_attr, u64 *eq_handle) | ||
363 | { | ||
364 | u64 hret, dummy, eq_liobn, allocate_controls; | ||
365 | u64 ist1_out, ist2_out, ist3_out, ist4_out; | ||
366 | u64 act_nr_of_eqes_out, act_pages_out; | ||
367 | |||
368 | /* resource type */ | ||
369 | allocate_controls = | ||
370 | EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ) | ||
371 | | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0) | ||
372 | | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen) | ||
373 | | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1); | ||
374 | |||
375 | hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, | ||
376 | adapter_handle, /* R4 */ | ||
377 | allocate_controls, /* R5 */ | ||
378 | eq_attr->max_nr_of_eqes, /* R6 */ | ||
379 | 0, 0, 0, 0, 0, 0, /* R7-R10 */ | ||
380 | eq_handle, /* R4 */ | ||
381 | &dummy, /* R5 */ | ||
382 | &eq_liobn, /* R6 */ | ||
383 | &act_nr_of_eqes_out, /* R7 */ | ||
384 | &act_pages_out, /* R8 */ | ||
385 | &ist1_out, /* R9 */ | ||
386 | &ist2_out, /* R10 */ | ||
387 | &ist3_out, /* R11 */ | ||
388 | &ist4_out); /* R12 */ | ||
389 | |||
390 | eq_attr->act_nr_of_eqes = act_nr_of_eqes_out; | ||
391 | eq_attr->nr_pages = act_pages_out; | ||
392 | eq_attr->ist1 = ist1_out; | ||
393 | eq_attr->ist2 = ist2_out; | ||
394 | eq_attr->ist3 = ist3_out; | ||
395 | eq_attr->ist4 = ist4_out; | ||
396 | |||
397 | return hret; | ||
398 | } | ||
399 | |||
400 | u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat, | ||
401 | const u64 qp_handle, const u64 sel_mask, | ||
402 | void *cb_addr, u64 *inv_attr_id, u64 *proc_mask, | ||
403 | u16 *out_swr, u16 *out_rwr) | ||
404 | { | ||
405 | u64 hret, dummy, act_out_swr, act_out_rwr; | ||
406 | |||
407 | if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) { | ||
408 | ehea_error("not on page boundary"); | ||
409 | return H_PARAMETER; | ||
410 | } | ||
411 | |||
412 | hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP, | ||
413 | adapter_handle, /* R4 */ | ||
414 | (u64) cat, /* R5 */ | ||
415 | qp_handle, /* R6 */ | ||
416 | sel_mask, /* R7 */ | ||
417 | virt_to_abs(cb_addr), /* R8 */ | ||
418 | 0, 0, 0, 0, /* R9-R12 */ | ||
419 | inv_attr_id, /* R4 */ | ||
420 | &dummy, /* R5 */ | ||
421 | &dummy, /* R6 */ | ||
422 | &act_out_swr, /* R7 */ | ||
423 | &act_out_rwr, /* R8 */ | ||
424 | proc_mask, /* R9 */ | ||
425 | &dummy, /* R10 */ | ||
426 | &dummy, /* R11 */ | ||
427 | &dummy); /* R12 */ | ||
428 | *out_swr = act_out_swr; | ||
429 | *out_rwr = act_out_rwr; | ||
430 | |||
431 | return hret; | ||
432 | } | ||
433 | |||
434 | u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize, | ||
435 | const u8 queue_type, const u64 resource_handle, | ||
436 | const u64 log_pageaddr, u64 count) | ||
437 | { | ||
438 | u64 dummy, reg_control; | ||
439 | |||
440 | reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize) | ||
441 | | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type); | ||
442 | |||
443 | return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES, | ||
444 | adapter_handle, /* R4 */ | ||
445 | reg_control, /* R5 */ | ||
446 | resource_handle, /* R6 */ | ||
447 | log_pageaddr, /* R7 */ | ||
448 | count, /* R8 */ | ||
449 | 0, 0, 0, 0, /* R9-R12 */ | ||
450 | &dummy, /* R4 */ | ||
451 | &dummy, /* R5 */ | ||
452 | &dummy, /* R6 */ | ||
453 | &dummy, /* R7 */ | ||
454 | &dummy, /* R8 */ | ||
455 | &dummy, /* R9 */ | ||
456 | &dummy, /* R10 */ | ||
457 | &dummy, /* R11 */ | ||
458 | &dummy); /* R12 */ | ||
459 | } | ||
460 | |||
461 | u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, | ||
462 | const u64 vaddr_in, const u32 access_ctrl, const u32 pd, | ||
463 | struct ehea_mr *mr) | ||
464 | { | ||
465 | u64 hret, dummy, lkey_out; | ||
466 | |||
467 | hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR, | ||
468 | adapter_handle , /* R4 */ | ||
469 | orig_mr_handle, /* R5 */ | ||
470 | vaddr_in, /* R6 */ | ||
471 | (((u64)access_ctrl) << 32ULL), /* R7 */ | ||
472 | pd, /* R8 */ | ||
473 | 0, 0, 0, 0, /* R9-R12 */ | ||
474 | &mr->handle, /* R4 */ | ||
475 | &dummy, /* R5 */ | ||
476 | &lkey_out, /* R6 */ | ||
477 | &dummy, /* R7 */ | ||
478 | &dummy, /* R8 */ | ||
479 | &dummy, /* R9 */ | ||
480 | &dummy, /* R10 */ | ||
481 | &dummy, /* R11 */ | ||
482 | &dummy); /* R12 */ | ||
483 | mr->lkey = (u32)lkey_out; | ||
484 | |||
485 | return hret; | ||
486 | } | ||
487 | |||
488 | u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) | ||
489 | { | ||
490 | u64 hret, dummy, ladr_next_sq_wqe_out; | ||
491 | u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out; | ||
492 | |||
493 | hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA, | ||
494 | adapter_handle, /* R4 */ | ||
495 | H_DISABLE_GET_EHEA_WQE_P, /* R5 */ | ||
496 | qp_handle, /* R6 */ | ||
497 | 0, 0, 0, 0, 0, 0, /* R7-R12 */ | ||
498 | &ladr_next_sq_wqe_out, /* R4 */ | ||
499 | &ladr_next_rq1_wqe_out, /* R5 */ | ||
500 | &ladr_next_rq2_wqe_out, /* R6 */ | ||
501 | &ladr_next_rq3_wqe_out, /* R7 */ | ||
502 | &dummy, /* R8 */ | ||
503 | &dummy, /* R9 */ | ||
504 | &dummy, /* R10 */ | ||
505 | &dummy, /* R11 */ | ||
506 | &dummy); /* R12 */ | ||
507 | return hret; | ||
508 | } | ||
509 | |||
510 | u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle) | ||
511 | { | ||
512 | u64 dummy; | ||
513 | |||
514 | return ehea_hcall_9arg_9ret(H_FREE_RESOURCE, | ||
515 | adapter_handle, /* R4 */ | ||
516 | res_handle, /* R5 */ | ||
517 | 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */ | ||
518 | &dummy, /* R4 */ | ||
519 | &dummy, /* R5 */ | ||
520 | &dummy, /* R6 */ | ||
521 | &dummy, /* R7 */ | ||
522 | &dummy, /* R8 */ | ||
523 | &dummy, /* R9 */ | ||
524 | &dummy, /* R10 */ | ||
525 | &dummy, /* R11 */ | ||
526 | &dummy); /* R12 */ | ||
527 | } | ||
528 | |||
529 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, | ||
530 | const u64 length, const u32 access_ctrl, | ||
531 | const u32 pd, u64 *mr_handle, u32 *lkey) | ||
532 | { | ||
533 | u64 hret, dummy, lkey_out; | ||
534 | |||
535 | hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE, | ||
536 | adapter_handle, /* R4 */ | ||
537 | 5, /* R5 */ | ||
538 | vaddr, /* R6 */ | ||
539 | length, /* R7 */ | ||
540 | (((u64) access_ctrl) << 32ULL),/* R8 */ | ||
541 | pd, /* R9 */ | ||
542 | 0, 0, 0, /* R10-R12 */ | ||
543 | mr_handle, /* R4 */ | ||
544 | &dummy, /* R5 */ | ||
545 | &lkey_out, /* R6 */ | ||
546 | &dummy, /* R7 */ | ||
547 | &dummy, /* R8 */ | ||
548 | &dummy, /* R9 */ | ||
549 | &dummy, /* R10 */ | ||
550 | &dummy, /* R11 */ | ||
551 | &dummy); /* R12 */ | ||
552 | *lkey = (u32) lkey_out; | ||
553 | |||
554 | return hret; | ||
555 | } | ||
556 | |||
557 | u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | ||
558 | const u8 pagesize, const u8 queue_type, | ||
559 | const u64 log_pageaddr, const u64 count) | ||
560 | { | ||
561 | if ((count > 1) && (log_pageaddr & 0xfff)) { | ||
562 | ehea_error("not on pageboundary"); | ||
563 | return H_PARAMETER; | ||
564 | } | ||
565 | |||
566 | return ehea_h_register_rpage(adapter_handle, pagesize, | ||
567 | queue_type, mr_handle, | ||
568 | log_pageaddr, count); | ||
569 | } | ||
570 | |||
571 | u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) | ||
572 | { | ||
573 | u64 hret, dummy, cb_logaddr; | ||
574 | |||
575 | cb_logaddr = virt_to_abs(cb_addr); | ||
576 | |||
577 | hret = ehea_hcall_9arg_9ret(H_QUERY_HEA, | ||
578 | adapter_handle, /* R4 */ | ||
579 | cb_logaddr, /* R5 */ | ||
580 | 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */ | ||
581 | &dummy, /* R4 */ | ||
582 | &dummy, /* R5 */ | ||
583 | &dummy, /* R6 */ | ||
584 | &dummy, /* R7 */ | ||
585 | &dummy, /* R8 */ | ||
586 | &dummy, /* R9 */ | ||
587 | &dummy, /* R10 */ | ||
588 | &dummy, /* R11 */ | ||
589 | &dummy); /* R12 */ | ||
590 | #ifdef DEBUG | ||
591 | ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); | ||
592 | #endif | ||
593 | return hret; | ||
594 | } | ||
595 | |||
596 | u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, | ||
597 | const u8 cb_cat, const u64 select_mask, | ||
598 | void *cb_addr) | ||
599 | { | ||
600 | u64 port_info, dummy; | ||
601 | u64 cb_logaddr = virt_to_abs(cb_addr); | ||
602 | u64 arr_index = 0; | ||
603 | |||
604 | port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) | ||
605 | | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); | ||
606 | |||
607 | return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT, | ||
608 | adapter_handle, /* R4 */ | ||
609 | port_info, /* R5 */ | ||
610 | select_mask, /* R6 */ | ||
611 | arr_index, /* R7 */ | ||
612 | cb_logaddr, /* R8 */ | ||
613 | 0, 0, 0, 0, /* R9-R12 */ | ||
614 | &dummy, /* R4 */ | ||
615 | &dummy, /* R5 */ | ||
616 | &dummy, /* R6 */ | ||
617 | &dummy, /* R7 */ | ||
618 | &dummy, /* R8 */ | ||
619 | &dummy, /* R9 */ | ||
620 | &dummy, /* R10 */ | ||
621 | &dummy, /* R11 */ | ||
622 | &dummy); /* R12 */ | ||
623 | } | ||
624 | |||
625 | u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, | ||
626 | const u8 cb_cat, const u64 select_mask, | ||
627 | void *cb_addr) | ||
628 | { | ||
629 | u64 port_info, dummy, inv_attr_ident, proc_mask; | ||
630 | u64 arr_index = 0; | ||
631 | u64 cb_logaddr = virt_to_abs(cb_addr); | ||
632 | |||
633 | port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) | ||
634 | | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); | ||
635 | #ifdef DEBUG | ||
636 | ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL"); | ||
637 | #endif | ||
638 | return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT, | ||
639 | adapter_handle, /* R4 */ | ||
640 | port_info, /* R5 */ | ||
641 | select_mask, /* R6 */ | ||
642 | arr_index, /* R7 */ | ||
643 | cb_logaddr, /* R8 */ | ||
644 | 0, 0, 0, 0, /* R9-R12 */ | ||
645 | &inv_attr_ident, /* R4 */ | ||
646 | &proc_mask, /* R5 */ | ||
647 | &dummy, /* R6 */ | ||
648 | &dummy, /* R7 */ | ||
649 | &dummy, /* R8 */ | ||
650 | &dummy, /* R9 */ | ||
651 | &dummy, /* R10 */ | ||
652 | &dummy, /* R11 */ | ||
653 | &dummy); /* R12 */ | ||
654 | } | ||
655 | |||
656 | u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, | ||
657 | const u8 reg_type, const u64 mc_mac_addr, | ||
658 | const u16 vlan_id, const u32 hcall_id) | ||
659 | { | ||
660 | u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy; | ||
661 | u64 mac_addr = mc_mac_addr >> 16; | ||
662 | |||
663 | r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num); | ||
664 | r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type); | ||
665 | r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr); | ||
666 | r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id); | ||
667 | |||
668 | return ehea_hcall_9arg_9ret(hcall_id, | ||
669 | adapter_handle, /* R4 */ | ||
670 | r5_port_num, /* R5 */ | ||
671 | r6_reg_type, /* R6 */ | ||
672 | r7_mc_mac_addr, /* R7 */ | ||
673 | r8_vlan_id, /* R8 */ | ||
674 | 0, 0, 0, 0, /* R9-R12 */ | ||
675 | &dummy, /* R4 */ | ||
676 | &dummy, /* R5 */ | ||
677 | &dummy, /* R6 */ | ||
678 | &dummy, /* R7 */ | ||
679 | &dummy, /* R8 */ | ||
680 | &dummy, /* R9 */ | ||
681 | &dummy, /* R10 */ | ||
682 | &dummy, /* R11 */ | ||
683 | &dummy); /* R12 */ | ||
684 | } | ||
685 | |||
686 | u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, | ||
687 | const u64 event_mask) | ||
688 | { | ||
689 | u64 dummy; | ||
690 | |||
691 | return ehea_hcall_9arg_9ret(H_RESET_EVENTS, | ||
692 | adapter_handle, /* R4 */ | ||
693 | neq_handle, /* R5 */ | ||
694 | event_mask, /* R6 */ | ||
695 | 0, 0, 0, 0, 0, 0, /* R7-R12 */ | ||
696 | &dummy, /* R4 */ | ||
697 | &dummy, /* R5 */ | ||
698 | &dummy, /* R6 */ | ||
699 | &dummy, /* R7 */ | ||
700 | &dummy, /* R8 */ | ||
701 | &dummy, /* R9 */ | ||
702 | &dummy, /* R10 */ | ||
703 | &dummy, /* R11 */ | ||
704 | &dummy); /* R12 */ | ||
705 | } | ||
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h new file mode 100644 index 000000000000..fa51e3b5bb05 --- /dev/null +++ b/drivers/net/ehea/ehea_phyp.h | |||
@@ -0,0 +1,455 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_phyp.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_PHYP_H__ | ||
30 | #define __EHEA_PHYP_H__ | ||
31 | |||
32 | #include <linux/delay.h> | ||
33 | #include <asm/hvcall.h> | ||
34 | #include "ehea.h" | ||
35 | #include "ehea_hw.h" | ||
36 | #include "ehea_hcall.h" | ||
37 | |||
38 | /* Some abbreviations used here: | ||
39 | * | ||
40 | * hcp_* - structures, variables and functions releated to Hypervisor Calls | ||
41 | */ | ||
42 | |||
43 | static inline u32 get_longbusy_msecs(int long_busy_ret_code) | ||
44 | { | ||
45 | switch (long_busy_ret_code) { | ||
46 | case H_LONG_BUSY_ORDER_1_MSEC: | ||
47 | return 1; | ||
48 | case H_LONG_BUSY_ORDER_10_MSEC: | ||
49 | return 10; | ||
50 | case H_LONG_BUSY_ORDER_100_MSEC: | ||
51 | return 100; | ||
52 | case H_LONG_BUSY_ORDER_1_SEC: | ||
53 | return 1000; | ||
54 | case H_LONG_BUSY_ORDER_10_SEC: | ||
55 | return 10000; | ||
56 | case H_LONG_BUSY_ORDER_100_SEC: | ||
57 | return 100000; | ||
58 | default: | ||
59 | return 1; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /* Notification Event Queue (NEQ) Entry bit masks */ | ||
64 | #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) | ||
65 | #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47) | ||
66 | #define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16) | ||
67 | #define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17) | ||
68 | #define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18) | ||
69 | #define NEQE_PLID EHEA_BMASK_IBM(16, 47) | ||
70 | |||
71 | /* Notification Event Codes */ | ||
72 | #define EHEA_EC_PORTSTATE_CHG 0x30 | ||
73 | #define EHEA_EC_ADAPTER_MALFUNC 0x32 | ||
74 | #define EHEA_EC_PORT_MALFUNC 0x33 | ||
75 | |||
76 | /* Notification Event Log Register (NELR) bit masks */ | ||
77 | #define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61) | ||
78 | #define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62) | ||
79 | #define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63) | ||
80 | |||
81 | static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel, | ||
82 | u64 paddr_user) | ||
83 | { | ||
84 | epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE); | ||
85 | epas->user.addr = paddr_user; | ||
86 | } | ||
87 | |||
88 | static inline void hcp_epas_dtor(struct h_epas *epas) | ||
89 | { | ||
90 | if (epas->kernel.addr) | ||
91 | iounmap(epas->kernel.addr); | ||
92 | |||
93 | epas->user.addr = 0; | ||
94 | epas->kernel.addr = 0; | ||
95 | } | ||
96 | |||
97 | struct hcp_modify_qp_cb0 { | ||
98 | u64 qp_ctl_reg; /* 00 */ | ||
99 | u32 max_swqe; /* 02 */ | ||
100 | u32 max_rwqe; /* 03 */ | ||
101 | u32 port_nb; /* 04 */ | ||
102 | u32 reserved0; /* 05 */ | ||
103 | u64 qp_aer; /* 06 */ | ||
104 | u64 qp_tenure; /* 08 */ | ||
105 | }; | ||
106 | |||
107 | /* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */ | ||
108 | #define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5) | ||
109 | #define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0) | ||
110 | #define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1) | ||
111 | #define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2) | ||
112 | #define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3) | ||
113 | #define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4) | ||
114 | #define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5) | ||
115 | |||
116 | /* Queue Pair Control Register Status Bits */ | ||
117 | #define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */ | ||
118 | /* QP States: */ | ||
119 | #define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */ | ||
120 | #define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */ | ||
121 | #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ | ||
122 | #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ | ||
123 | #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ | ||
124 | |||
125 | struct hcp_modify_qp_cb1 { | ||
126 | u32 qpn; /* 00 */ | ||
127 | u32 qp_asyn_ev_eq_nb; /* 01 */ | ||
128 | u64 sq_cq_handle; /* 02 */ | ||
129 | u64 rq_cq_handle; /* 04 */ | ||
130 | /* sgel = scatter gather element */ | ||
131 | u32 sgel_nb_sq; /* 06 */ | ||
132 | u32 sgel_nb_rq1; /* 07 */ | ||
133 | u32 sgel_nb_rq2; /* 08 */ | ||
134 | u32 sgel_nb_rq3; /* 09 */ | ||
135 | }; | ||
136 | |||
137 | /* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */ | ||
138 | #define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7) | ||
139 | #define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0) | ||
140 | #define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1) | ||
141 | #define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2) | ||
142 | #define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3) | ||
143 | #define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4) | ||
144 | #define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5) | ||
145 | #define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6) | ||
146 | #define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7) | ||
147 | |||
148 | struct hcp_query_ehea { | ||
149 | u32 cur_num_qps; /* 00 */ | ||
150 | u32 cur_num_cqs; /* 01 */ | ||
151 | u32 cur_num_eqs; /* 02 */ | ||
152 | u32 cur_num_mrs; /* 03 */ | ||
153 | u32 auth_level; /* 04 */ | ||
154 | u32 max_num_qps; /* 05 */ | ||
155 | u32 max_num_cqs; /* 06 */ | ||
156 | u32 max_num_eqs; /* 07 */ | ||
157 | u32 max_num_mrs; /* 08 */ | ||
158 | u32 reserved0; /* 09 */ | ||
159 | u32 int_clock_freq; /* 10 */ | ||
160 | u32 max_num_pds; /* 11 */ | ||
161 | u32 max_num_addr_handles; /* 12 */ | ||
162 | u32 max_num_cqes; /* 13 */ | ||
163 | u32 max_num_wqes; /* 14 */ | ||
164 | u32 max_num_sgel_rq1wqe; /* 15 */ | ||
165 | u32 max_num_sgel_rq2wqe; /* 16 */ | ||
166 | u32 max_num_sgel_rq3wqe; /* 17 */ | ||
167 | u32 mr_page_size; /* 18 */ | ||
168 | u32 reserved1; /* 19 */ | ||
169 | u64 max_mr_size; /* 20 */ | ||
170 | u64 reserved2; /* 22 */ | ||
171 | u32 num_ports; /* 24 */ | ||
172 | u32 reserved3; /* 25 */ | ||
173 | u32 reserved4; /* 26 */ | ||
174 | u32 reserved5; /* 27 */ | ||
175 | u64 max_mc_mac; /* 28 */ | ||
176 | u64 ehea_cap; /* 30 */ | ||
177 | u32 max_isn_per_eq; /* 32 */ | ||
178 | u32 max_num_neq; /* 33 */ | ||
179 | u64 max_num_vlan_ids; /* 34 */ | ||
180 | u32 max_num_port_group; /* 36 */ | ||
181 | u32 max_num_phys_port; /* 37 */ | ||
182 | |||
183 | }; | ||
184 | |||
185 | /* Hcall Query/Modify Port Control Block defines */ | ||
186 | #define H_PORT_CB0 0 | ||
187 | #define H_PORT_CB1 1 | ||
188 | #define H_PORT_CB2 2 | ||
189 | #define H_PORT_CB3 3 | ||
190 | #define H_PORT_CB4 4 | ||
191 | #define H_PORT_CB5 5 | ||
192 | #define H_PORT_CB6 6 | ||
193 | #define H_PORT_CB7 7 | ||
194 | |||
195 | struct hcp_ehea_port_cb0 { | ||
196 | u64 port_mac_addr; | ||
197 | u64 port_rc; | ||
198 | u64 reserved0; | ||
199 | u32 port_op_state; | ||
200 | u32 port_speed; | ||
201 | u32 ext_swport_op_state; | ||
202 | u32 neg_tpf_prpf; | ||
203 | u32 num_default_qps; | ||
204 | u32 reserved1; | ||
205 | u64 default_qpn_arr[16]; | ||
206 | }; | ||
207 | |||
208 | /* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */ | ||
209 | #define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */ | ||
210 | #define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */ | ||
211 | #define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */ | ||
212 | #define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */ | ||
213 | |||
214 | /* Hcall Query Port: Returned port speed values */ | ||
215 | #define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */ | ||
216 | #define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */ | ||
217 | #define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */ | ||
218 | #define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */ | ||
219 | #define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */ | ||
220 | #define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */ | ||
221 | |||
222 | /* Port Receive Control Status Bits */ | ||
223 | #define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49) | ||
224 | #define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50) | ||
225 | #define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51) | ||
226 | #define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52) | ||
227 | #define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53) | ||
228 | #define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54) | ||
229 | #define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55) | ||
230 | #define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56) | ||
231 | #define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57) | ||
232 | #define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58) | ||
233 | #define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59) | ||
234 | #define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60) | ||
235 | #define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61) | ||
236 | #define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63) | ||
237 | |||
238 | #define PXLY_RC_VLAN_FILTER 2 | ||
239 | #define PXLY_RC_VLAN_PERM 0 | ||
240 | |||
241 | |||
242 | #define H_PORT_CB1_ALL 0x8000000000000000ULL | ||
243 | |||
244 | struct hcp_ehea_port_cb1 { | ||
245 | u64 vlan_filter[64]; | ||
246 | }; | ||
247 | |||
248 | #define H_PORT_CB2_ALL 0xFFE0000000000000ULL | ||
249 | |||
250 | struct hcp_ehea_port_cb2 { | ||
251 | u64 rxo; | ||
252 | u64 rxucp; | ||
253 | u64 rxufd; | ||
254 | u64 rxuerr; | ||
255 | u64 rxftl; | ||
256 | u64 rxmcp; | ||
257 | u64 rxbcp; | ||
258 | u64 txo; | ||
259 | u64 txucp; | ||
260 | u64 txmcp; | ||
261 | u64 txbcp; | ||
262 | }; | ||
263 | |||
264 | struct hcp_ehea_port_cb3 { | ||
265 | u64 vlan_bc_filter[64]; | ||
266 | u64 vlan_mc_filter[64]; | ||
267 | u64 vlan_un_filter[64]; | ||
268 | u64 port_mac_hash_array[64]; | ||
269 | }; | ||
270 | |||
271 | #define H_PORT_CB4_ALL 0xF000000000000000ULL | ||
272 | #define H_PORT_CB4_JUMBO 0x1000000000000000ULL | ||
273 | #define H_PORT_CB4_SPEED 0x8000000000000000ULL | ||
274 | |||
275 | struct hcp_ehea_port_cb4 { | ||
276 | u32 port_speed; | ||
277 | u32 pause_frame; | ||
278 | u32 ens_port_op_state; | ||
279 | u32 jumbo_frame; | ||
280 | u32 ens_port_wrap; | ||
281 | }; | ||
282 | |||
283 | /* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */ | ||
284 | #define H_PORT_CB5_RCU 0x0001000000000000ULL | ||
285 | #define PXS_RCU EHEA_BMASK_IBM(61, 63) | ||
286 | |||
287 | struct hcp_ehea_port_cb5 { | ||
288 | u64 prc; /* 00 */ | ||
289 | u64 uaa; /* 01 */ | ||
290 | u64 macvc; /* 02 */ | ||
291 | u64 xpcsc; /* 03 */ | ||
292 | u64 xpcsp; /* 04 */ | ||
293 | u64 pcsid; /* 05 */ | ||
294 | u64 xpcsst; /* 06 */ | ||
295 | u64 pthlb; /* 07 */ | ||
296 | u64 pthrb; /* 08 */ | ||
297 | u64 pqu; /* 09 */ | ||
298 | u64 pqd; /* 10 */ | ||
299 | u64 prt; /* 11 */ | ||
300 | u64 wsth; /* 12 */ | ||
301 | u64 rcb; /* 13 */ | ||
302 | u64 rcm; /* 14 */ | ||
303 | u64 rcu; /* 15 */ | ||
304 | u64 macc; /* 16 */ | ||
305 | u64 pc; /* 17 */ | ||
306 | u64 pst; /* 18 */ | ||
307 | u64 ducqpn; /* 19 */ | ||
308 | u64 mcqpn; /* 20 */ | ||
309 | u64 mma; /* 21 */ | ||
310 | u64 pmc0h; /* 22 */ | ||
311 | u64 pmc0l; /* 23 */ | ||
312 | u64 lbc; /* 24 */ | ||
313 | }; | ||
314 | |||
315 | #define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL | ||
316 | |||
317 | struct hcp_ehea_port_cb6 { | ||
318 | u64 rxo; /* 00 */ | ||
319 | u64 rx64; /* 01 */ | ||
320 | u64 rx65; /* 02 */ | ||
321 | u64 rx128; /* 03 */ | ||
322 | u64 rx256; /* 04 */ | ||
323 | u64 rx512; /* 05 */ | ||
324 | u64 rx1024; /* 06 */ | ||
325 | u64 rxbfcs; /* 07 */ | ||
326 | u64 rxime; /* 08 */ | ||
327 | u64 rxrle; /* 09 */ | ||
328 | u64 rxorle; /* 10 */ | ||
329 | u64 rxftl; /* 11 */ | ||
330 | u64 rxjab; /* 12 */ | ||
331 | u64 rxse; /* 13 */ | ||
332 | u64 rxce; /* 14 */ | ||
333 | u64 rxrf; /* 15 */ | ||
334 | u64 rxfrag; /* 16 */ | ||
335 | u64 rxuoc; /* 17 */ | ||
336 | u64 rxcpf; /* 18 */ | ||
337 | u64 rxsb; /* 19 */ | ||
338 | u64 rxfd; /* 20 */ | ||
339 | u64 rxoerr; /* 21 */ | ||
340 | u64 rxaln; /* 22 */ | ||
341 | u64 ducqpn; /* 23 */ | ||
342 | u64 reserved0; /* 24 */ | ||
343 | u64 rxmcp; /* 25 */ | ||
344 | u64 rxbcp; /* 26 */ | ||
345 | u64 txmcp; /* 27 */ | ||
346 | u64 txbcp; /* 28 */ | ||
347 | u64 txo; /* 29 */ | ||
348 | u64 tx64; /* 30 */ | ||
349 | u64 tx65; /* 31 */ | ||
350 | u64 tx128; /* 32 */ | ||
351 | u64 tx256; /* 33 */ | ||
352 | u64 tx512; /* 34 */ | ||
353 | u64 tx1024; /* 35 */ | ||
354 | u64 txbfcs; /* 36 */ | ||
355 | u64 txcpf; /* 37 */ | ||
356 | u64 txlf; /* 38 */ | ||
357 | u64 txrf; /* 39 */ | ||
358 | u64 txime; /* 40 */ | ||
359 | u64 txsc; /* 41 */ | ||
360 | u64 txmc; /* 42 */ | ||
361 | u64 txsqe; /* 43 */ | ||
362 | u64 txdef; /* 44 */ | ||
363 | u64 txlcol; /* 45 */ | ||
364 | u64 txexcol; /* 46 */ | ||
365 | u64 txcse; /* 47 */ | ||
366 | u64 txbor; /* 48 */ | ||
367 | }; | ||
368 | |||
369 | #define H_PORT_CB7_DUCQPN 0x8000000000000000ULL | ||
370 | |||
371 | struct hcp_ehea_port_cb7 { | ||
372 | u64 def_uc_qpn; | ||
373 | }; | ||
374 | |||
375 | u64 ehea_h_query_ehea_qp(const u64 adapter_handle, | ||
376 | const u8 qp_category, | ||
377 | const u64 qp_handle, const u64 sel_mask, | ||
378 | void *cb_addr); | ||
379 | |||
380 | u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, | ||
381 | const u8 cat, | ||
382 | const u64 qp_handle, | ||
383 | const u64 sel_mask, | ||
384 | void *cb_addr, | ||
385 | u64 * inv_attr_id, | ||
386 | u64 * proc_mask, u16 * out_swr, u16 * out_rwr); | ||
387 | |||
388 | u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, | ||
389 | struct ehea_eq_attr *eq_attr, u64 * eq_handle); | ||
390 | |||
391 | u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, | ||
392 | struct ehea_cq_attr *cq_attr, | ||
393 | u64 * cq_handle, struct h_epas *epas); | ||
394 | |||
395 | u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, | ||
396 | struct ehea_qp_init_attr *init_attr, | ||
397 | const u32 pd, | ||
398 | u64 * qp_handle, struct h_epas *h_epas); | ||
399 | |||
400 | #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55) | ||
401 | #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63) | ||
402 | |||
403 | u64 ehea_h_register_rpage(const u64 adapter_handle, | ||
404 | const u8 pagesize, | ||
405 | const u8 queue_type, | ||
406 | const u64 resource_handle, | ||
407 | const u64 log_pageaddr, u64 count); | ||
408 | |||
409 | #define H_DISABLE_GET_EHEA_WQE_P 1 | ||
410 | #define H_DISABLE_GET_SQ_WQE_P 2 | ||
411 | #define H_DISABLE_GET_RQC 3 | ||
412 | |||
413 | u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle); | ||
414 | |||
415 | u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle); | ||
416 | |||
417 | u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, | ||
418 | const u64 length, const u32 access_ctrl, | ||
419 | const u32 pd, u64 * mr_handle, u32 * lkey); | ||
420 | |||
421 | u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | ||
422 | const u8 pagesize, const u8 queue_type, | ||
423 | const u64 log_pageaddr, const u64 count); | ||
424 | |||
425 | u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, | ||
426 | const u64 vaddr_in, const u32 access_ctrl, const u32 pd, | ||
427 | struct ehea_mr *mr); | ||
428 | |||
429 | u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr); | ||
430 | |||
431 | /* output param R5 */ | ||
432 | #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47) | ||
433 | #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63) | ||
434 | |||
435 | u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, | ||
436 | const u8 cb_cat, const u64 select_mask, | ||
437 | void *cb_addr); | ||
438 | |||
439 | u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, | ||
440 | const u8 cb_cat, const u64 select_mask, | ||
441 | void *cb_addr); | ||
442 | |||
443 | #define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63) | ||
444 | #define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63) | ||
445 | #define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63) | ||
446 | #define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63) | ||
447 | |||
448 | u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, | ||
449 | const u8 reg_type, const u64 mc_mac_addr, | ||
450 | const u16 vlan_id, const u32 hcall_id); | ||
451 | |||
452 | u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, | ||
453 | const u64 event_mask); | ||
454 | |||
455 | #endif /* __EHEA_PHYP_H__ */ | ||
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c new file mode 100644 index 000000000000..3e1862326c88 --- /dev/null +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -0,0 +1,582 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_qmr.c | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include "ehea.h" | ||
30 | #include "ehea_phyp.h" | ||
31 | #include "ehea_qmr.h" | ||
32 | |||
33 | static void *hw_qpageit_get_inc(struct hw_queue *queue) | ||
34 | { | ||
35 | void *retvalue = hw_qeit_get(queue); | ||
36 | |||
37 | queue->current_q_offset += queue->pagesize; | ||
38 | if (queue->current_q_offset > queue->queue_length) { | ||
39 | queue->current_q_offset -= queue->pagesize; | ||
40 | retvalue = NULL; | ||
41 | } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { | ||
42 | ehea_error("not on pageboundary"); | ||
43 | retvalue = NULL; | ||
44 | } | ||
45 | return retvalue; | ||
46 | } | ||
47 | |||
48 | static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, | ||
49 | const u32 pagesize, const u32 qe_size) | ||
50 | { | ||
51 | int pages_per_kpage = PAGE_SIZE / pagesize; | ||
52 | int i, k; | ||
53 | |||
54 | if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { | ||
55 | ehea_error("pagesize conflict! kernel pagesize=%d, " | ||
56 | "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize); | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | queue->queue_length = nr_of_pages * pagesize; | ||
61 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL); | ||
62 | if (!queue->queue_pages) { | ||
63 | ehea_error("no mem for queue_pages"); | ||
64 | return -ENOMEM; | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * allocate pages for queue: | ||
69 | * outer loop allocates whole kernel pages (page aligned) and | ||
70 | * inner loop divides a kernel page into smaller hea queue pages | ||
71 | */ | ||
72 | i = 0; | ||
73 | while (i < nr_of_pages) { | ||
74 | u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); | ||
75 | if (!kpage) | ||
76 | goto out_nomem; | ||
77 | for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { | ||
78 | (queue->queue_pages)[i] = (struct ehea_page*)kpage; | ||
79 | kpage += pagesize; | ||
80 | i++; | ||
81 | } | ||
82 | } | ||
83 | |||
84 | queue->current_q_offset = 0; | ||
85 | queue->qe_size = qe_size; | ||
86 | queue->pagesize = pagesize; | ||
87 | queue->toggle_state = 1; | ||
88 | |||
89 | return 0; | ||
90 | out_nomem: | ||
91 | for (i = 0; i < nr_of_pages; i += pages_per_kpage) { | ||
92 | if (!(queue->queue_pages)[i]) | ||
93 | break; | ||
94 | free_page((unsigned long)(queue->queue_pages)[i]); | ||
95 | } | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | static void hw_queue_dtor(struct hw_queue *queue) | ||
100 | { | ||
101 | int pages_per_kpage = PAGE_SIZE / queue->pagesize; | ||
102 | int i, nr_pages; | ||
103 | |||
104 | if (!queue || !queue->queue_pages) | ||
105 | return; | ||
106 | |||
107 | nr_pages = queue->queue_length / queue->pagesize; | ||
108 | |||
109 | for (i = 0; i < nr_pages; i += pages_per_kpage) | ||
110 | free_page((unsigned long)(queue->queue_pages)[i]); | ||
111 | |||
112 | kfree(queue->queue_pages); | ||
113 | } | ||
114 | |||
115 | struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | ||
116 | int nr_of_cqe, u64 eq_handle, u32 cq_token) | ||
117 | { | ||
118 | struct ehea_cq *cq; | ||
119 | struct h_epa epa; | ||
120 | u64 *cq_handle_ref, hret, rpage; | ||
121 | u32 act_nr_of_entries, act_pages, counter; | ||
122 | int ret; | ||
123 | void *vpage; | ||
124 | |||
125 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | ||
126 | if (!cq) { | ||
127 | ehea_error("no mem for cq"); | ||
128 | goto out_nomem; | ||
129 | } | ||
130 | |||
131 | cq->attr.max_nr_of_cqes = nr_of_cqe; | ||
132 | cq->attr.cq_token = cq_token; | ||
133 | cq->attr.eq_handle = eq_handle; | ||
134 | |||
135 | cq->adapter = adapter; | ||
136 | |||
137 | cq_handle_ref = &cq->fw_handle; | ||
138 | act_nr_of_entries = 0; | ||
139 | act_pages = 0; | ||
140 | |||
141 | hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, | ||
142 | &cq->fw_handle, &cq->epas); | ||
143 | if (hret != H_SUCCESS) { | ||
144 | ehea_error("alloc_resource_cq failed"); | ||
145 | goto out_freemem; | ||
146 | } | ||
147 | |||
148 | ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages, | ||
149 | EHEA_PAGESIZE, sizeof(struct ehea_cqe)); | ||
150 | if (ret) | ||
151 | goto out_freeres; | ||
152 | |||
153 | for (counter = 0; counter < cq->attr.nr_pages; counter++) { | ||
154 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | ||
155 | if (!vpage) { | ||
156 | ehea_error("hw_qpageit_get_inc failed"); | ||
157 | goto out_kill_hwq; | ||
158 | } | ||
159 | |||
160 | rpage = virt_to_abs(vpage); | ||
161 | hret = ehea_h_register_rpage(adapter->handle, | ||
162 | 0, EHEA_CQ_REGISTER_ORIG, | ||
163 | cq->fw_handle, rpage, 1); | ||
164 | if (hret < H_SUCCESS) { | ||
165 | ehea_error("register_rpage_cq failed ehea_cq=%p " | ||
166 | "hret=%lx counter=%i act_pages=%i", | ||
167 | cq, hret, counter, cq->attr.nr_pages); | ||
168 | goto out_kill_hwq; | ||
169 | } | ||
170 | |||
171 | if (counter == (cq->attr.nr_pages - 1)) { | ||
172 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | ||
173 | |||
174 | if ((hret != H_SUCCESS) || (vpage)) { | ||
175 | ehea_error("registration of pages not " | ||
176 | "complete hret=%lx\n", hret); | ||
177 | goto out_kill_hwq; | ||
178 | } | ||
179 | } else { | ||
180 | if ((hret != H_PAGE_REGISTERED) || (!vpage)) { | ||
181 | ehea_error("CQ: registration of page failed " | ||
182 | "hret=%lx\n", hret); | ||
183 | goto out_kill_hwq; | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | |||
188 | hw_qeit_reset(&cq->hw_queue); | ||
189 | epa = cq->epas.kernel; | ||
190 | ehea_reset_cq_ep(cq); | ||
191 | ehea_reset_cq_n1(cq); | ||
192 | |||
193 | return cq; | ||
194 | |||
195 | out_kill_hwq: | ||
196 | hw_queue_dtor(&cq->hw_queue); | ||
197 | |||
198 | out_freeres: | ||
199 | ehea_h_free_resource(adapter->handle, cq->fw_handle); | ||
200 | |||
201 | out_freemem: | ||
202 | kfree(cq); | ||
203 | |||
204 | out_nomem: | ||
205 | return NULL; | ||
206 | } | ||
207 | |||
208 | int ehea_destroy_cq(struct ehea_cq *cq) | ||
209 | { | ||
210 | u64 adapter_handle, hret; | ||
211 | |||
212 | adapter_handle = cq->adapter->handle; | ||
213 | |||
214 | if (!cq) | ||
215 | return 0; | ||
216 | |||
217 | /* deregister all previous registered pages */ | ||
218 | hret = ehea_h_free_resource(adapter_handle, cq->fw_handle); | ||
219 | if (hret != H_SUCCESS) { | ||
220 | ehea_error("destroy CQ failed"); | ||
221 | return -EIO; | ||
222 | } | ||
223 | |||
224 | hw_queue_dtor(&cq->hw_queue); | ||
225 | kfree(cq); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | ||
231 | const enum ehea_eq_type type, | ||
232 | const u32 max_nr_of_eqes, const u8 eqe_gen) | ||
233 | { | ||
234 | int ret, i; | ||
235 | u64 hret, rpage; | ||
236 | void *vpage; | ||
237 | struct ehea_eq *eq; | ||
238 | |||
239 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); | ||
240 | if (!eq) { | ||
241 | ehea_error("no mem for eq"); | ||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | eq->adapter = adapter; | ||
246 | eq->attr.type = type; | ||
247 | eq->attr.max_nr_of_eqes = max_nr_of_eqes; | ||
248 | eq->attr.eqe_gen = eqe_gen; | ||
249 | spin_lock_init(&eq->spinlock); | ||
250 | |||
251 | hret = ehea_h_alloc_resource_eq(adapter->handle, | ||
252 | &eq->attr, &eq->fw_handle); | ||
253 | if (hret != H_SUCCESS) { | ||
254 | ehea_error("alloc_resource_eq failed"); | ||
255 | goto out_freemem; | ||
256 | } | ||
257 | |||
258 | ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, | ||
259 | EHEA_PAGESIZE, sizeof(struct ehea_eqe)); | ||
260 | if (ret) { | ||
261 | ehea_error("can't allocate eq pages"); | ||
262 | goto out_freeres; | ||
263 | } | ||
264 | |||
265 | for (i = 0; i < eq->attr.nr_pages; i++) { | ||
266 | vpage = hw_qpageit_get_inc(&eq->hw_queue); | ||
267 | if (!vpage) { | ||
268 | ehea_error("hw_qpageit_get_inc failed"); | ||
269 | hret = H_RESOURCE; | ||
270 | goto out_kill_hwq; | ||
271 | } | ||
272 | |||
273 | rpage = virt_to_abs(vpage); | ||
274 | |||
275 | hret = ehea_h_register_rpage(adapter->handle, 0, | ||
276 | EHEA_EQ_REGISTER_ORIG, | ||
277 | eq->fw_handle, rpage, 1); | ||
278 | |||
279 | if (i == (eq->attr.nr_pages - 1)) { | ||
280 | /* last page */ | ||
281 | vpage = hw_qpageit_get_inc(&eq->hw_queue); | ||
282 | if ((hret != H_SUCCESS) || (vpage)) { | ||
283 | goto out_kill_hwq; | ||
284 | } | ||
285 | } else { | ||
286 | if ((hret != H_PAGE_REGISTERED) || (!vpage)) { | ||
287 | goto out_kill_hwq; | ||
288 | } | ||
289 | } | ||
290 | } | ||
291 | |||
292 | hw_qeit_reset(&eq->hw_queue); | ||
293 | return eq; | ||
294 | |||
295 | out_kill_hwq: | ||
296 | hw_queue_dtor(&eq->hw_queue); | ||
297 | |||
298 | out_freeres: | ||
299 | ehea_h_free_resource(adapter->handle, eq->fw_handle); | ||
300 | |||
301 | out_freemem: | ||
302 | kfree(eq); | ||
303 | return NULL; | ||
304 | } | ||
305 | |||
306 | struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq) | ||
307 | { | ||
308 | struct ehea_eqe *eqe; | ||
309 | unsigned long flags; | ||
310 | |||
311 | spin_lock_irqsave(&eq->spinlock, flags); | ||
312 | eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue); | ||
313 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
314 | |||
315 | return eqe; | ||
316 | } | ||
317 | |||
318 | int ehea_destroy_eq(struct ehea_eq *eq) | ||
319 | { | ||
320 | u64 hret; | ||
321 | unsigned long flags; | ||
322 | |||
323 | if (!eq) | ||
324 | return 0; | ||
325 | |||
326 | spin_lock_irqsave(&eq->spinlock, flags); | ||
327 | |||
328 | hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle); | ||
329 | spin_unlock_irqrestore(&eq->spinlock, flags); | ||
330 | |||
331 | if (hret != H_SUCCESS) { | ||
332 | ehea_error("destroy_eq failed"); | ||
333 | return -EIO; | ||
334 | } | ||
335 | |||
336 | hw_queue_dtor(&eq->hw_queue); | ||
337 | kfree(eq); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * allocates memory for a queue and registers pages in phyp | ||
344 | */ | ||
345 | int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, | ||
346 | int nr_pages, int wqe_size, int act_nr_sges, | ||
347 | struct ehea_adapter *adapter, int h_call_q_selector) | ||
348 | { | ||
349 | u64 hret, rpage; | ||
350 | int ret, cnt; | ||
351 | void *vpage; | ||
352 | |||
353 | ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size); | ||
354 | if (ret) | ||
355 | return ret; | ||
356 | |||
357 | for (cnt = 0; cnt < nr_pages; cnt++) { | ||
358 | vpage = hw_qpageit_get_inc(hw_queue); | ||
359 | if (!vpage) { | ||
360 | ehea_error("hw_qpageit_get_inc failed"); | ||
361 | goto out_kill_hwq; | ||
362 | } | ||
363 | rpage = virt_to_abs(vpage); | ||
364 | hret = ehea_h_register_rpage(adapter->handle, | ||
365 | 0, h_call_q_selector, | ||
366 | qp->fw_handle, rpage, 1); | ||
367 | if (hret < H_SUCCESS) { | ||
368 | ehea_error("register_rpage_qp failed"); | ||
369 | goto out_kill_hwq; | ||
370 | } | ||
371 | } | ||
372 | hw_qeit_reset(hw_queue); | ||
373 | return 0; | ||
374 | |||
375 | out_kill_hwq: | ||
376 | hw_queue_dtor(hw_queue); | ||
377 | return -EIO; | ||
378 | } | ||
379 | |||
380 | static inline u32 map_wqe_size(u8 wqe_enc_size) | ||
381 | { | ||
382 | return 128 << wqe_enc_size; | ||
383 | } | ||
384 | |||
385 | struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | ||
386 | u32 pd, struct ehea_qp_init_attr *init_attr) | ||
387 | { | ||
388 | int ret; | ||
389 | u64 hret; | ||
390 | struct ehea_qp *qp; | ||
391 | u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1; | ||
392 | u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3; | ||
393 | |||
394 | |||
395 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | ||
396 | if (!qp) { | ||
397 | ehea_error("no mem for qp"); | ||
398 | return NULL; | ||
399 | } | ||
400 | |||
401 | qp->adapter = adapter; | ||
402 | |||
403 | hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, | ||
404 | &qp->fw_handle, &qp->epas); | ||
405 | if (hret != H_SUCCESS) { | ||
406 | ehea_error("ehea_h_alloc_resource_qp failed"); | ||
407 | goto out_freemem; | ||
408 | } | ||
409 | |||
410 | wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq); | ||
411 | wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1); | ||
412 | wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2); | ||
413 | wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3); | ||
414 | |||
415 | ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, | ||
416 | wqe_size_in_bytes_sq, | ||
417 | init_attr->act_wqe_size_enc_sq, adapter, | ||
418 | 0); | ||
419 | if (ret) { | ||
420 | ehea_error("can't register for sq ret=%x", ret); | ||
421 | goto out_freeres; | ||
422 | } | ||
423 | |||
424 | ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, | ||
425 | init_attr->nr_rq1_pages, | ||
426 | wqe_size_in_bytes_rq1, | ||
427 | init_attr->act_wqe_size_enc_rq1, | ||
428 | adapter, 1); | ||
429 | if (ret) { | ||
430 | ehea_error("can't register for rq1 ret=%x", ret); | ||
431 | goto out_kill_hwsq; | ||
432 | } | ||
433 | |||
434 | if (init_attr->rq_count > 1) { | ||
435 | ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, | ||
436 | init_attr->nr_rq2_pages, | ||
437 | wqe_size_in_bytes_rq2, | ||
438 | init_attr->act_wqe_size_enc_rq2, | ||
439 | adapter, 2); | ||
440 | if (ret) { | ||
441 | ehea_error("can't register for rq2 ret=%x", ret); | ||
442 | goto out_kill_hwr1q; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | if (init_attr->rq_count > 2) { | ||
447 | ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3, | ||
448 | init_attr->nr_rq3_pages, | ||
449 | wqe_size_in_bytes_rq3, | ||
450 | init_attr->act_wqe_size_enc_rq3, | ||
451 | adapter, 3); | ||
452 | if (ret) { | ||
453 | ehea_error("can't register for rq3 ret=%x", ret); | ||
454 | goto out_kill_hwr2q; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | qp->init_attr = *init_attr; | ||
459 | |||
460 | return qp; | ||
461 | |||
462 | out_kill_hwr2q: | ||
463 | hw_queue_dtor(&qp->hw_rqueue2); | ||
464 | |||
465 | out_kill_hwr1q: | ||
466 | hw_queue_dtor(&qp->hw_rqueue1); | ||
467 | |||
468 | out_kill_hwsq: | ||
469 | hw_queue_dtor(&qp->hw_squeue); | ||
470 | |||
471 | out_freeres: | ||
472 | ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle); | ||
473 | ehea_h_free_resource(adapter->handle, qp->fw_handle); | ||
474 | |||
475 | out_freemem: | ||
476 | kfree(qp); | ||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | int ehea_destroy_qp(struct ehea_qp *qp) | ||
481 | { | ||
482 | u64 hret; | ||
483 | struct ehea_qp_init_attr *qp_attr = &qp->init_attr; | ||
484 | |||
485 | if (!qp) | ||
486 | return 0; | ||
487 | |||
488 | hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle); | ||
489 | if (hret != H_SUCCESS) { | ||
490 | ehea_error("destroy_qp failed"); | ||
491 | return -EIO; | ||
492 | } | ||
493 | |||
494 | hw_queue_dtor(&qp->hw_squeue); | ||
495 | hw_queue_dtor(&qp->hw_rqueue1); | ||
496 | |||
497 | if (qp_attr->rq_count > 1) | ||
498 | hw_queue_dtor(&qp->hw_rqueue2); | ||
499 | if (qp_attr->rq_count > 2) | ||
500 | hw_queue_dtor(&qp->hw_rqueue3); | ||
501 | kfree(qp); | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | int ehea_reg_mr_adapter(struct ehea_adapter *adapter) | ||
507 | { | ||
508 | int i, k, ret; | ||
509 | u64 hret, pt_abs, start, end, nr_pages; | ||
510 | u32 acc_ctrl = EHEA_MR_ACC_CTRL; | ||
511 | u64 *pt; | ||
512 | |||
513 | start = KERNELBASE; | ||
514 | end = (u64)high_memory; | ||
515 | nr_pages = (end - start) / PAGE_SIZE; | ||
516 | |||
517 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
518 | if (!pt) { | ||
519 | ehea_error("no mem"); | ||
520 | ret = -ENOMEM; | ||
521 | goto out; | ||
522 | } | ||
523 | pt_abs = virt_to_abs(pt); | ||
524 | |||
525 | hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, | ||
526 | acc_ctrl, adapter->pd, | ||
527 | &adapter->mr.handle, &adapter->mr.lkey); | ||
528 | if (hret != H_SUCCESS) { | ||
529 | ehea_error("alloc_resource_mr failed"); | ||
530 | ret = -EIO; | ||
531 | goto out; | ||
532 | } | ||
533 | |||
534 | adapter->mr.vaddr = KERNELBASE; | ||
535 | k = 0; | ||
536 | |||
537 | while (nr_pages > 0) { | ||
538 | if (nr_pages > 1) { | ||
539 | u64 num_pages = min(nr_pages, (u64)512); | ||
540 | for (i = 0; i < num_pages; i++) | ||
541 | pt[i] = virt_to_abs((void*)(((u64)start) | ||
542 | + ((k++) * | ||
543 | PAGE_SIZE))); | ||
544 | |||
545 | hret = ehea_h_register_rpage_mr(adapter->handle, | ||
546 | adapter->mr.handle, 0, | ||
547 | 0, (u64)pt_abs, | ||
548 | num_pages); | ||
549 | nr_pages -= num_pages; | ||
550 | } else { | ||
551 | u64 abs_adr = virt_to_abs((void*)(((u64)start) | ||
552 | + (k * PAGE_SIZE))); | ||
553 | hret = ehea_h_register_rpage_mr(adapter->handle, | ||
554 | adapter->mr.handle, 0, | ||
555 | 0, abs_adr,1); | ||
556 | nr_pages--; | ||
557 | } | ||
558 | |||
559 | if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { | ||
560 | ehea_h_free_resource(adapter->handle, | ||
561 | adapter->mr.handle); | ||
562 | ehea_error("register_rpage_mr failed: hret = %lX", | ||
563 | hret); | ||
564 | ret = -EIO; | ||
565 | goto out; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | if (hret != H_SUCCESS) { | ||
570 | ehea_h_free_resource(adapter->handle, adapter->mr.handle); | ||
571 | ehea_error("register_rpage failed for last page: hret = %lX", | ||
572 | hret); | ||
573 | ret = -EIO; | ||
574 | goto out; | ||
575 | } | ||
576 | ret = 0; | ||
577 | out: | ||
578 | kfree(pt); | ||
579 | return ret; | ||
580 | } | ||
581 | |||
582 | |||
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h new file mode 100644 index 000000000000..7efdc96919ca --- /dev/null +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * linux/drivers/net/ehea/ehea_qmr.h | ||
3 | * | ||
4 | * eHEA ethernet device driver for IBM eServer System p | ||
5 | * | ||
6 | * (C) Copyright IBM Corp. 2006 | ||
7 | * | ||
8 | * Authors: | ||
9 | * Christoph Raisch <raisch@de.ibm.com> | ||
10 | * Jan-Bernd Themann <themann@de.ibm.com> | ||
11 | * Thomas Klein <tklein@de.ibm.com> | ||
12 | * | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #ifndef __EHEA_QMR_H__ | ||
30 | #define __EHEA_QMR_H__ | ||
31 | |||
32 | #include "ehea.h" | ||
33 | #include "ehea_hw.h" | ||
34 | |||
35 | /* | ||
36 | * page size of ehea hardware queues | ||
37 | */ | ||
38 | |||
39 | #define EHEA_PAGESHIFT 12 | ||
40 | #define EHEA_PAGESIZE 4096UL | ||
41 | |||
42 | /* Some abbreviations used here: | ||
43 | * | ||
44 | * WQE - Work Queue Entry | ||
45 | * SWQE - Send Work Queue Entry | ||
46 | * RWQE - Receive Work Queue Entry | ||
47 | * CQE - Completion Queue Entry | ||
48 | * EQE - Event Queue Entry | ||
49 | * MR - Memory Region | ||
50 | */ | ||
51 | |||
52 | /* Use of WR_ID field for EHEA */ | ||
53 | #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19) | ||
54 | #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23) | ||
55 | #define EHEA_SWQE2_TYPE 0x1 | ||
56 | #define EHEA_SWQE3_TYPE 0x2 | ||
57 | #define EHEA_RWQE2_TYPE 0x3 | ||
58 | #define EHEA_RWQE3_TYPE 0x4 | ||
59 | #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47) | ||
60 | #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63) | ||
61 | |||
62 | struct ehea_vsgentry { | ||
63 | u64 vaddr; | ||
64 | u32 l_key; | ||
65 | u32 len; | ||
66 | }; | ||
67 | |||
68 | /* maximum number of sg entries allowed in a WQE */ | ||
69 | #define EHEA_MAX_WQE_SG_ENTRIES 252 | ||
70 | #define SWQE2_MAX_IMM (0xD0 - 0x30) | ||
71 | #define SWQE3_MAX_IMM 224 | ||
72 | |||
73 | /* tx control flags for swqe */ | ||
74 | #define EHEA_SWQE_CRC 0x8000 | ||
75 | #define EHEA_SWQE_IP_CHECKSUM 0x4000 | ||
76 | #define EHEA_SWQE_TCP_CHECKSUM 0x2000 | ||
77 | #define EHEA_SWQE_TSO 0x1000 | ||
78 | #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800 | ||
79 | #define EHEA_SWQE_VLAN_INSERT 0x0400 | ||
80 | #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200 | ||
81 | #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100 | ||
82 | #define EHEA_SWQE_WRAP_CTL_REC 0x0080 | ||
83 | #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040 | ||
84 | #define EHEA_SWQE_BIND 0x0020 | ||
85 | #define EHEA_SWQE_PURGE 0x0010 | ||
86 | |||
87 | /* sizeof(struct ehea_swqe) less the union */ | ||
88 | #define SWQE_HEADER_SIZE 32 | ||
89 | |||
90 | struct ehea_swqe { | ||
91 | u64 wr_id; | ||
92 | u16 tx_control; | ||
93 | u16 vlan_tag; | ||
94 | u8 reserved1; | ||
95 | u8 ip_start; | ||
96 | u8 ip_end; | ||
97 | u8 immediate_data_length; | ||
98 | u8 tcp_offset; | ||
99 | u8 reserved2; | ||
100 | u16 tcp_end; | ||
101 | u8 wrap_tag; | ||
102 | u8 descriptors; /* number of valid descriptors in WQE */ | ||
103 | u16 reserved3; | ||
104 | u16 reserved4; | ||
105 | u16 mss; | ||
106 | u32 reserved5; | ||
107 | union { | ||
108 | /* Send WQE Format 1 */ | ||
109 | struct { | ||
110 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | ||
111 | } no_immediate_data; | ||
112 | |||
113 | /* Send WQE Format 2 */ | ||
114 | struct { | ||
115 | struct ehea_vsgentry sg_entry; | ||
116 | /* 0x30 */ | ||
117 | u8 immediate_data[SWQE2_MAX_IMM]; | ||
118 | /* 0xd0 */ | ||
119 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; | ||
120 | } immdata_desc __attribute__ ((packed)); | ||
121 | |||
122 | /* Send WQE Format 3 */ | ||
123 | struct { | ||
124 | u8 immediate_data[SWQE3_MAX_IMM]; | ||
125 | } immdata_nodesc; | ||
126 | } u; | ||
127 | }; | ||
128 | |||
129 | struct ehea_rwqe { | ||
130 | u64 wr_id; /* work request ID */ | ||
131 | u8 reserved1[5]; | ||
132 | u8 data_segments; | ||
133 | u16 reserved2; | ||
134 | u64 reserved3; | ||
135 | u64 reserved4; | ||
136 | struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; | ||
137 | }; | ||
138 | |||
139 | #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 | ||
140 | |||
141 | #define EHEA_CQE_TYPE_RQ 0x60 | ||
142 | #define EHEA_CQE_STAT_ERR_MASK 0x721F | ||
143 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F | ||
144 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 | ||
145 | |||
146 | struct ehea_cqe { | ||
147 | u64 wr_id; /* work request ID from WQE */ | ||
148 | u8 type; | ||
149 | u8 valid; | ||
150 | u16 status; | ||
151 | u16 reserved1; | ||
152 | u16 num_bytes_transfered; | ||
153 | u16 vlan_tag; | ||
154 | u16 inet_checksum_value; | ||
155 | u8 reserved2; | ||
156 | u8 header_length; | ||
157 | u16 reserved3; | ||
158 | u16 page_offset; | ||
159 | u16 wqe_count; | ||
160 | u32 qp_token; | ||
161 | u32 timestamp; | ||
162 | u32 reserved4; | ||
163 | u64 reserved5[3]; | ||
164 | }; | ||
165 | |||
166 | #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0) | ||
167 | #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1) | ||
168 | #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7) | ||
169 | #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31) | ||
170 | #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63) | ||
171 | #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63) | ||
172 | #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63) | ||
173 | #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | ||
174 | #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63) | ||
175 | #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63) | ||
176 | #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) | ||
177 | #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) | ||
178 | |||
179 | struct ehea_eqe { | ||
180 | u64 entry; | ||
181 | }; | ||
182 | |||
183 | static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) | ||
184 | { | ||
185 | struct ehea_page *current_page; | ||
186 | |||
187 | if (q_offset >= queue->queue_length) | ||
188 | q_offset -= queue->queue_length; | ||
189 | current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; | ||
190 | return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; | ||
191 | } | ||
192 | |||
193 | static inline void *hw_qeit_get(struct hw_queue *queue) | ||
194 | { | ||
195 | return hw_qeit_calc(queue, queue->current_q_offset); | ||
196 | } | ||
197 | |||
198 | static inline void hw_qeit_inc(struct hw_queue *queue) | ||
199 | { | ||
200 | queue->current_q_offset += queue->qe_size; | ||
201 | if (queue->current_q_offset >= queue->queue_length) { | ||
202 | queue->current_q_offset = 0; | ||
203 | /* toggle the valid flag */ | ||
204 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static inline void *hw_qeit_get_inc(struct hw_queue *queue) | ||
209 | { | ||
210 | void *retvalue = hw_qeit_get(queue); | ||
211 | hw_qeit_inc(queue); | ||
212 | return retvalue; | ||
213 | } | ||
214 | |||
215 | static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) | ||
216 | { | ||
217 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | ||
218 | u8 valid = retvalue->valid; | ||
219 | void *pref; | ||
220 | |||
221 | if ((valid >> 7) == (queue->toggle_state & 1)) { | ||
222 | /* this is a good one */ | ||
223 | hw_qeit_inc(queue); | ||
224 | pref = hw_qeit_calc(queue, queue->current_q_offset); | ||
225 | prefetch(pref); | ||
226 | prefetch(pref + 128); | ||
227 | } else | ||
228 | retvalue = NULL; | ||
229 | return retvalue; | ||
230 | } | ||
231 | |||
232 | static inline void *hw_qeit_get_valid(struct hw_queue *queue) | ||
233 | { | ||
234 | struct ehea_cqe *retvalue = hw_qeit_get(queue); | ||
235 | void *pref; | ||
236 | u8 valid; | ||
237 | |||
238 | pref = hw_qeit_calc(queue, queue->current_q_offset); | ||
239 | prefetch(pref); | ||
240 | prefetch(pref + 128); | ||
241 | prefetch(pref + 256); | ||
242 | valid = retvalue->valid; | ||
243 | if (!((valid >> 7) == (queue->toggle_state & 1))) | ||
244 | retvalue = NULL; | ||
245 | return retvalue; | ||
246 | } | ||
247 | |||
248 | static inline void *hw_qeit_reset(struct hw_queue *queue) | ||
249 | { | ||
250 | queue->current_q_offset = 0; | ||
251 | return hw_qeit_get(queue); | ||
252 | } | ||
253 | |||
254 | static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) | ||
255 | { | ||
256 | u64 last_entry_in_q = queue->queue_length - queue->qe_size; | ||
257 | void *retvalue; | ||
258 | |||
259 | retvalue = hw_qeit_get(queue); | ||
260 | queue->current_q_offset += queue->qe_size; | ||
261 | if (queue->current_q_offset > last_entry_in_q) { | ||
262 | queue->current_q_offset = 0; | ||
263 | queue->toggle_state = (~queue->toggle_state) & 1; | ||
264 | } | ||
265 | return retvalue; | ||
266 | } | ||
267 | |||
268 | static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) | ||
269 | { | ||
270 | void *retvalue = hw_qeit_get(queue); | ||
271 | u32 qe = *(u8*)retvalue; | ||
272 | if ((qe >> 7) == (queue->toggle_state & 1)) | ||
273 | hw_qeit_eq_get_inc(queue); | ||
274 | else | ||
275 | retvalue = NULL; | ||
276 | return retvalue; | ||
277 | } | ||
278 | |||
279 | static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, | ||
280 | int rq_nr) | ||
281 | { | ||
282 | struct hw_queue *queue; | ||
283 | |||
284 | if (rq_nr == 1) | ||
285 | queue = &qp->hw_rqueue1; | ||
286 | else if (rq_nr == 2) | ||
287 | queue = &qp->hw_rqueue2; | ||
288 | else | ||
289 | queue = &qp->hw_rqueue3; | ||
290 | |||
291 | return hw_qeit_get_inc(queue); | ||
292 | } | ||
293 | |||
294 | static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp, | ||
295 | int *wqe_index) | ||
296 | { | ||
297 | struct hw_queue *queue = &my_qp->hw_squeue; | ||
298 | struct ehea_swqe *wqe_p; | ||
299 | |||
300 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); | ||
301 | wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); | ||
302 | |||
303 | return wqe_p; | ||
304 | } | ||
305 | |||
306 | static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe) | ||
307 | { | ||
308 | iosync(); | ||
309 | ehea_update_sqa(my_qp, 1); | ||
310 | } | ||
311 | |||
312 | static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) | ||
313 | { | ||
314 | struct hw_queue *queue = &qp->hw_rqueue1; | ||
315 | |||
316 | *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); | ||
317 | return hw_qeit_get_valid(queue); | ||
318 | } | ||
319 | |||
320 | static inline void ehea_inc_rq1(struct ehea_qp *qp) | ||
321 | { | ||
322 | hw_qeit_inc(&qp->hw_rqueue1); | ||
323 | } | ||
324 | |||
325 | static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) | ||
326 | { | ||
327 | return hw_qeit_get_inc_valid(&my_cq->hw_queue); | ||
328 | } | ||
329 | |||
330 | #define EHEA_CQ_REGISTER_ORIG 0 | ||
331 | #define EHEA_EQ_REGISTER_ORIG 0 | ||
332 | |||
333 | enum ehea_eq_type { | ||
334 | EHEA_EQ = 0, /* event queue */ | ||
335 | EHEA_NEQ /* notification event queue */ | ||
336 | }; | ||
337 | |||
338 | struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | ||
339 | enum ehea_eq_type type, | ||
340 | const u32 length, const u8 eqe_gen); | ||
341 | |||
342 | int ehea_destroy_eq(struct ehea_eq *eq); | ||
343 | |||
344 | struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq); | ||
345 | |||
346 | struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, | ||
347 | u64 eq_handle, u32 cq_token); | ||
348 | |||
349 | int ehea_destroy_cq(struct ehea_cq *cq); | ||
350 | |||
351 | struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, | ||
352 | struct ehea_qp_init_attr *init_attr); | ||
353 | |||
354 | int ehea_destroy_qp(struct ehea_qp *qp); | ||
355 | |||
356 | int ehea_reg_mr_adapter(struct ehea_adapter *adapter); | ||
357 | |||
358 | #endif /* __EHEA_QMR_H__ */ | ||